diff --git a/.gitignore b/.gitignore index bf5ee6e01cd4..929054df5212 100644 --- a/.gitignore +++ b/.gitignore @@ -114,6 +114,7 @@ modules.order !.gitignore !.kunitconfig !.mailmap +!.pylintrc !.rustfmt.toml # diff --git a/Documentation/ABI/README b/Documentation/ABI/README index ef0e6d11e919..315fffe1f831 100644 --- a/Documentation/ABI/README +++ b/Documentation/ABI/README @@ -46,7 +46,9 @@ Every file in these directories will contain the following information: What: Short description of the interface Date: Date created -KernelVersion: Kernel version this feature first showed up in. +KernelVersion: (Optional) Kernel version this feature first showed up in. + Note: git history often provides more accurate version + info, so this field may be omitted. Contact: Primary contact for this interface (may be a mailing list) Description: Long description of the interface and how to use it. Users: All users of this interface who wish to be notified when diff --git a/Documentation/Makefile b/Documentation/Makefile index d30d66ddf1ad..b98477df5ddf 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -5,6 +5,7 @@ # for cleaning subdir- := devicetree/bindings +ifneq ($(MAKECMDGOALS),cleandocs) # Check for broken documentation file references ifeq ($(CONFIG_WARN_MISSING_DOCUMENTS),y) $(shell $(srctree)/scripts/documentation-file-ref-check --warn) @@ -14,6 +15,7 @@ endif ifeq ($(CONFIG_WARN_ABI_ERRORS),y) $(shell $(srctree)/scripts/get_abi.py --dir $(srctree)/Documentation/ABI validate) endif +endif # You can set these variables from the command line. SPHINXBUILD = sphinx-build diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst index 91339efdcb54..7a86042c9b6d 100644 --- a/Documentation/admin-guide/bootconfig.rst +++ b/Documentation/admin-guide/bootconfig.rst @@ -265,7 +265,7 @@ The final kernel cmdline will be the following:: Config File Limitation ====================== -Currently the maximum config size size is 32KB and the total key-words (not +Currently the maximum config size is 32KB and the total key-words (not key-value entries) must be under 1024 nodes. Note: this is not the number of entries but nodes, an entry must consume more than 2 nodes (a key-word and a value). So theoretically, it will be diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index c04e6b8eb2b1..3c8faad03d01 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -177,6 +177,7 @@ core_pattern %E executable path %c maximum size of core file by resource limit RLIMIT_CORE %C CPU the task ran on + %F pidfd number % both are dropped ======== ========================================== @@ -1106,7 +1107,8 @@ printk_ratelimit_burst While long term we enforce one message per `printk_ratelimit`_ seconds, we do allow a burst of messages to pass through. ``printk_ratelimit_burst`` specifies the number of messages we can -send before ratelimiting kicks in. +send before ratelimiting kicks in. After `printk_ratelimit`_ seconds +have elapsed, another burst of messages may be sent. The default value is 10 messages. diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst index 0560cbae5fa1..53fc9f89f3e4 100644 --- a/Documentation/arch/powerpc/index.rst +++ b/Documentation/arch/powerpc/index.rst @@ -19,6 +19,7 @@ powerpc elf_hwcaps elfnote firmware-assisted-dump + htm hvcs imc isa-versions diff --git a/Documentation/conf.py b/Documentation/conf.py index 12de52a2b17e..700516238d3f 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -1,25 +1,87 @@ -# -*- coding: utf-8 -*- -# -# The Linux Kernel documentation build configuration file, created by -# sphinx-quickstart on Fri Feb 12 13:51:46 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. +# SPDX-License-Identifier: GPL-2.0-only +# pylint: disable=C0103,C0209 + +""" +The Linux Kernel documentation build configuration file. +""" -import sys import os -import sphinx import shutil +import sys + +import sphinx + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("sphinx")) + +from load_config import loadConfig # pylint: disable=C0413,E0401 + +# Minimal supported version +needs_sphinx = "3.4.3" + +# Get Sphinx version +major, minor, patch = sphinx.version_info[:3] # pylint: disable=I1101 + +# Include_patterns were added on Sphinx 5.1 +if (major < 5) or (major == 5 and minor < 1): + has_include_patterns = False +else: + has_include_patterns = True + # Include patterns that don't contain directory names, in glob format + include_patterns = ["**.rst"] + +# Location of Documentation/ directory +doctree = os.path.abspath(".") + +# Exclude of patterns that don't contain directory names, in glob format. +exclude_patterns = [] + +# List of patterns that contain directory names in glob format. +dyn_include_patterns = [] +dyn_exclude_patterns = ["output"] + +# Properly handle include/exclude patterns +# ---------------------------------------- + +def update_patterns(app, config): + """ + On Sphinx, all directories are relative to what it is passed as + SOURCEDIR parameter for sphinx-build. Due to that, all patterns + that have directory names on it need to be dynamically set, after + converting them to a relative patch. + + As Sphinx doesn't include any patterns outside SOURCEDIR, we should + exclude relative patterns that start with "../". + """ + + # setup include_patterns dynamically + if has_include_patterns: + for p in dyn_include_patterns: + full = os.path.join(doctree, p) + + rel_path = os.path.relpath(full, start=app.srcdir) + if rel_path.startswith("../"): + continue + + config.include_patterns.append(rel_path) + + # setup exclude_patterns dynamically + for p in dyn_exclude_patterns: + full = os.path.join(doctree, p) + + rel_path = os.path.relpath(full, start=app.srcdir) + if rel_path.startswith("../"): + continue + + config.exclude_patterns.append(rel_path) + # helper # ------ + def have_command(cmd): """Search ``cmd`` in the ``PATH`` environment. @@ -28,24 +90,23 @@ def have_command(cmd): """ return shutil.which(cmd) is not None -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('sphinx')) -from load_config import loadConfig # -- General configuration ------------------------------------------------ -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '3.4.3' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', - 'kfigure', 'sphinx.ext.ifconfig', 'automarkup', - 'maintainers_include', 'sphinx.ext.autosectionlabel', - 'kernel_abi', 'kernel_feat', 'translations'] +# Add any Sphinx extensions in alphabetic order +extensions = [ + "automarkup", + "kernel_abi", + "kerneldoc", + "kernel_feat", + "kernel_include", + "kfigure", + "maintainers_include", + "rstFlatTable", + "sphinx.ext.autosectionlabel", + "sphinx.ext.ifconfig", + "translations", +] # Since Sphinx version 3, the C function parser is more pedantic with regards # to type checking. Due to that, having macros at c:function cause problems. @@ -120,28 +181,28 @@ autosectionlabel_maxdepth = 2 # Load math renderer: # For html builder, load imgmath only when its dependencies are met. # mathjax is the default math renderer since Sphinx 1.8. -have_latex = have_command('latex') -have_dvipng = have_command('dvipng') +have_latex = have_command("latex") +have_dvipng = have_command("dvipng") load_imgmath = have_latex and have_dvipng # Respect SPHINX_IMGMATH (for html docs only) -if 'SPHINX_IMGMATH' in os.environ: - env_sphinx_imgmath = os.environ['SPHINX_IMGMATH'] - if 'yes' in env_sphinx_imgmath: +if "SPHINX_IMGMATH" in os.environ: + env_sphinx_imgmath = os.environ["SPHINX_IMGMATH"] + if "yes" in env_sphinx_imgmath: load_imgmath = True - elif 'no' in env_sphinx_imgmath: + elif "no" in env_sphinx_imgmath: load_imgmath = False else: sys.stderr.write("Unknown env SPHINX_IMGMATH=%s ignored.\n" % env_sphinx_imgmath) if load_imgmath: extensions.append("sphinx.ext.imgmath") - math_renderer = 'imgmath' + math_renderer = "imgmath" else: - math_renderer = 'mathjax' + math_renderer = "mathjax" # Add any paths that contain templates here, relative to this directory. -templates_path = ['sphinx/templates'] +templates_path = ["sphinx/templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: @@ -149,15 +210,15 @@ templates_path = ['sphinx/templates'] source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'The Linux Kernel' -copyright = 'The kernel development community' -author = 'The kernel development community' +project = "The Linux Kernel" +copyright = "The kernel development community" # pylint: disable=W0622 +author = "The kernel development community" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -172,86 +233,86 @@ author = 'The kernel development community' try: makefile_version = None makefile_patchlevel = None - for line in open('../Makefile'): - key, val = [x.strip() for x in line.split('=', 2)] - if key == 'VERSION': - makefile_version = val - elif key == 'PATCHLEVEL': - makefile_patchlevel = val - if makefile_version and makefile_patchlevel: - break -except: + with open("../Makefile", encoding="utf=8") as fp: + for line in fp: + key, val = [x.strip() for x in line.split("=", 2)] + if key == "VERSION": + makefile_version = val + elif key == "PATCHLEVEL": + makefile_patchlevel = val + if makefile_version and makefile_patchlevel: + break +except Exception: pass finally: if makefile_version and makefile_patchlevel: - version = release = makefile_version + '.' + makefile_patchlevel + version = release = makefile_version + "." + makefile_patchlevel else: version = release = "unknown version" -# -# HACK: there seems to be no easy way for us to get at the version and -# release information passed in from the makefile...so go pawing through the -# command-line options and find it for ourselves. -# + def get_cline_version(): - c_version = c_release = '' + """ + HACK: There seems to be no easy way for us to get at the version and + release information passed in from the makefile...so go pawing through the + command-line options and find it for ourselves. + """ + + c_version = c_release = "" for arg in sys.argv: - if arg.startswith('version='): + if arg.startswith("version="): c_version = arg[8:] - elif arg.startswith('release='): + elif arg.startswith("release="): c_release = arg[8:] if c_version: if c_release: - return c_version + '-' + c_release + return c_version + "-" + c_release return c_version - return version # Whatever we came up with before + return version # Whatever we came up with before + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['output'] +# today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False -primary_domain = 'c' -highlight_language = 'none' +primary_domain = "c" +highlight_language = "none" # -- Options for HTML output ---------------------------------------------- @@ -259,43 +320,45 @@ highlight_language = 'none' # a list of builtin themes. # Default theme -html_theme = 'alabaster' +html_theme = "alabaster" html_css_files = [] if "DOCS_THEME" in os.environ: html_theme = os.environ["DOCS_THEME"] -if html_theme == 'sphinx_rtd_theme' or html_theme == 'sphinx_rtd_dark_mode': +if html_theme in ["sphinx_rtd_theme", "sphinx_rtd_dark_mode"]: # Read the Docs theme try: import sphinx_rtd_theme + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_css_files = [ - 'theme_overrides.css', + "theme_overrides.css", ] # Read the Docs dark mode override theme - if html_theme == 'sphinx_rtd_dark_mode': + if html_theme == "sphinx_rtd_dark_mode": try: - import sphinx_rtd_dark_mode - extensions.append('sphinx_rtd_dark_mode') - except ImportError: - html_theme == 'sphinx_rtd_theme' + import sphinx_rtd_dark_mode # pylint: disable=W0611 - if html_theme == 'sphinx_rtd_theme': - # Add color-specific RTD normal mode - html_css_files.append('theme_rtd_colors.css') + extensions.append("sphinx_rtd_dark_mode") + except ImportError: + html_theme = "sphinx_rtd_theme" + + if html_theme == "sphinx_rtd_theme": + # Add color-specific RTD normal mode + html_css_files.append("theme_rtd_colors.css") html_theme_options = { - 'navigation_depth': -1, + "navigation_depth": -1, } except ImportError: - html_theme = 'alabaster' + html_theme = "alabaster" if "DOCS_CSS" in os.environ: css = os.environ["DOCS_CSS"].split(" ") @@ -303,14 +366,14 @@ if "DOCS_CSS" in os.environ: for l in css: html_css_files.append(l) -if html_theme == 'alabaster': +if html_theme == "alabaster": html_theme_options = { - 'description': get_cline_version(), - 'page_width': '65em', - 'sidebar_width': '15em', - 'fixed_sidebar': 'true', - 'font_size': 'inherit', - 'font_family': 'serif', + "description": get_cline_version(), + "page_width": "65em", + "sidebar_width": "15em", + "fixed_sidebar": "true", + "font_size": "inherit", + "font_family": "serif", } sys.stderr.write("Using %s theme\n" % html_theme) @@ -318,104 +381,79 @@ sys.stderr.write("Using %s theme\n" % html_theme) # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['sphinx-static'] +html_static_path = ["sphinx-static"] # If true, Docutils "smart quotes" will be used to convert quotes and dashes # to typographically correct entities. However, conversion of "--" to "—" # is not always what we want, so enable only quotes. -smartquotes_action = 'q' +smartquotes_action = "q" # Custom sidebar templates, maps document names to template names. # Note that the RTD theme ignores this -html_sidebars = { '**': ['searchbox.html', 'kernel-toc.html', 'sourcelink.html']} +html_sidebars = {"**": ["searchbox.html", + "kernel-toc.html", + "sourcelink.html"]} # about.html is available for alabaster theme. Add it at the front. -if html_theme == 'alabaster': - html_sidebars['**'].insert(0, 'about.html') +if html_theme == "alabaster": + html_sidebars["**"].insert(0, "about.html") # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = 'images/logo.svg' +html_logo = "images/logo.svg" # Output file base name for HTML help builder. -htmlhelp_basename = 'TheLinuxKerneldoc' +htmlhelp_basename = "TheLinuxKerneldoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - + "papersize": "a4paper", # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '11pt', - + "pointsize": "11pt", # Latex figure (float) alignment - #'figure_align': 'htbp', - + # 'figure_align': 'htbp', # Don't mangle with UTF-8 chars - 'inputenc': '', - 'utf8extra': '', - + "inputenc": "", + "utf8extra": "", # Set document margins - 'sphinxsetup': ''' + "sphinxsetup": """ hmargin=0.5in, vmargin=1in, parsedliteralwraps=true, verbatimhintsturnover=false, - ''', - + """, # # Some of our authors are fond of deep nesting; tell latex to # cope. # - 'maxlistdepth': '10', - + "maxlistdepth": "10", # For CJK One-half spacing, need to be in front of hyperref - 'extrapackages': r'\usepackage{setspace}', - + "extrapackages": r"\usepackage{setspace}", # Additional stuff for the LaTeX preamble. - 'preamble': ''' + "preamble": """ % Use some font with UTF-8 support with XeLaTeX \\usepackage{fontspec} \\setsansfont{DejaVu Sans} \\setromanfont{DejaVu Serif} \\setmonofont{DejaVu Sans Mono} - ''', + """, } # Load kerneldoc specific LaTeX settings -latex_elements['preamble'] += ''' +latex_elements["preamble"] += """ % Load kerneldoc specific LaTeX settings - \\input{kerneldoc-preamble.sty} -''' - -# With Sphinx 1.6, it is possible to change the Bg color directly -# by using: -# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255} -# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204} -# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204} -# \definecolor{sphinximportantBgColor}{RGB}{192,255,204} -# -# However, it require to use sphinx heavy box with: -# -# \renewenvironment{sphinxlightbox} {% -# \\begin{sphinxheavybox} -# } -# \\end{sphinxheavybox} -# } -# -# Unfortunately, the implementation is buggy: if a note is inside a -# table, it isn't displayed well. So, for now, let's use boring -# black and white notes. + \\input{kerneldoc-preamble.sty} +""" # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). # Sorted in alphabetical order -latex_documents = [ -] +latex_documents = [] # Add all other index files from Documentation/ subdirectories -for fn in os.listdir('.'): +for fn in os.listdir("."): doc = os.path.join(fn, "index") if os.path.exists(doc + ".rst"): has = False @@ -424,34 +462,39 @@ for fn in os.listdir('.'): has = True break if not has: - latex_documents.append((doc, fn + '.tex', - 'Linux %s Documentation' % fn.capitalize(), - 'The kernel development community', - 'manual')) + latex_documents.append( + ( + doc, + fn + ".tex", + "Linux %s Documentation" % fn.capitalize(), + "The kernel development community", + "manual", + ) + ) # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # Additional LaTeX stuff to be copied to build directory latex_additional_files = [ - 'sphinx/kerneldoc-preamble.sty', + "sphinx/kerneldoc-preamble.sty", ] @@ -460,12 +503,11 @@ latex_additional_files = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation', - [author], 1) + (master_doc, "thelinuxkernel", "The Linux Kernel Documentation", [author], 1) ] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -473,11 +515,15 @@ man_pages = [ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation', - author, 'TheLinuxKernel', 'One line description of project.', - 'Miscellaneous'), -] +texinfo_documents = [( + master_doc, + "TheLinuxKernel", + "The Linux Kernel Documentation", + author, + "TheLinuxKernel", + "One line description of project.", + "Miscellaneous", + ),] # -- Options for Epub output ---------------------------------------------- @@ -488,9 +534,9 @@ epub_publisher = author epub_copyright = copyright # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] -#======= +# ======= # rst2pdf # # Grouping the document tree into PDF files. List of tuples @@ -502,17 +548,23 @@ epub_exclude_files = ['search.html'] # multiple PDF files here actually tries to get the cross-referencing right # *between* PDF files. pdf_documents = [ - ('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'), + ("kernel-documentation", "Kernel", "Kernel", "J. Random Bozo"), ] # kernel-doc extension configuration for running Sphinx directly (e.g. by Read # the Docs). In a normal build, these are supplied from the Makefile via command # line arguments. -kerneldoc_bin = '../scripts/kernel-doc.py' -kerneldoc_srctree = '..' +kerneldoc_bin = "../scripts/kernel-doc.py" +kerneldoc_srctree = ".." # ------------------------------------------------------------------------------ # Since loadConfig overwrites settings from the global namespace, it has to be # the last statement in the conf.py file # ------------------------------------------------------------------------------ loadConfig(globals()) + + +def setup(app): + """Patterns need to be updated at init time on older Sphinx versions""" + + app.connect('config-inited', update_patterns) diff --git a/Documentation/core-api/dma-api-howto.rst b/Documentation/core-api/dma-api-howto.rst index 0bf31b6c4383..96fce2a9aa90 100644 --- a/Documentation/core-api/dma-api-howto.rst +++ b/Documentation/core-api/dma-api-howto.rst @@ -155,7 +155,7 @@ a device with limitations, it needs to be decreased. Special note about PCI: PCI-X specification requires PCI-X devices to support 64-bit addressing (DAC) for all transactions. And at least one platform (SGI -SN2) requires 64-bit consistent allocations to operate correctly when the IO +SN2) requires 64-bit coherent allocations to operate correctly when the IO bus is in PCI-X mode. For correct operation, you must set the DMA mask to inform the kernel about @@ -174,7 +174,7 @@ used instead: int dma_set_mask(struct device *dev, u64 mask); - The setup for consistent allocations is performed via a call + The setup for coherent allocations is performed via a call to dma_set_coherent_mask():: int dma_set_coherent_mask(struct device *dev, u64 mask); @@ -241,7 +241,7 @@ it would look like this:: The coherent mask will always be able to set the same or a smaller mask as the streaming mask. However for the rare case that a device driver only -uses consistent allocations, one would have to check the return value from +uses coherent allocations, one would have to check the return value from dma_set_coherent_mask(). Finally, if your device can only drive the low 24-bits of @@ -298,20 +298,20 @@ Types of DMA mappings There are two types of DMA mappings: -- Consistent DMA mappings which are usually mapped at driver +- Coherent DMA mappings which are usually mapped at driver initialization, unmapped at the end and for which the hardware should guarantee that the device and the CPU can access the data in parallel and will see updates made by each other without any explicit software flushing. - Think of "consistent" as "synchronous" or "coherent". + Think of "coherent" as "synchronous". - The current default is to return consistent memory in the low 32 + The current default is to return coherent memory in the low 32 bits of the DMA space. However, for future compatibility you should - set the consistent mask even if this default is fine for your + set the coherent mask even if this default is fine for your driver. - Good examples of what to use consistent mappings for are: + Good examples of what to use coherent mappings for are: - Network card DMA ring descriptors. - SCSI adapter mailbox command data structures. @@ -320,13 +320,13 @@ There are two types of DMA mappings: The invariant these examples all require is that any CPU store to memory is immediately visible to the device, and vice - versa. Consistent mappings guarantee this. + versa. Coherent mappings guarantee this. .. important:: - Consistent DMA memory does not preclude the usage of + Coherent DMA memory does not preclude the usage of proper memory barriers. The CPU may reorder stores to - consistent memory just as it may normal memory. Example: + coherent memory just as it may normal memory. Example: if it is important for the device to see the first word of a descriptor updated before the second, you must do something like:: @@ -365,10 +365,10 @@ Also, systems with caches that aren't DMA-coherent will work better when the underlying buffers don't share cache lines with other data. -Using Consistent DMA mappings -============================= +Using Coherent DMA mappings +=========================== -To allocate and map large (PAGE_SIZE or so) consistent DMA regions, +To allocate and map large (PAGE_SIZE or so) coherent DMA regions, you should do:: dma_addr_t dma_handle; @@ -385,10 +385,10 @@ __get_free_pages() (but takes size instead of a page order). If your driver needs regions sized smaller than a page, you may prefer using the dma_pool interface, described below. -The consistent DMA mapping interfaces, will by default return a DMA address +The coherent DMA mapping interfaces, will by default return a DMA address which is 32-bit addressable. Even if the device indicates (via the DMA mask) -that it may address the upper 32-bits, consistent allocation will only -return > 32-bit addresses for DMA if the consistent DMA mask has been +that it may address the upper 32-bits, coherent allocation will only +return > 32-bit addresses for DMA if the coherent DMA mask has been explicitly changed via dma_set_coherent_mask(). This is true of the dma_pool interface as well. @@ -497,7 +497,7 @@ program address space. Such platforms can and do report errors in the kernel logs when the DMA controller hardware detects violation of the permission setting. -Only streaming mappings specify a direction, consistent mappings +Only streaming mappings specify a direction, coherent mappings implicitly have a direction attribute setting of DMA_BIDIRECTIONAL. diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 2ad08517e626..3087bea715ed 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -8,15 +8,15 @@ This document describes the DMA API. For a more gentle introduction of the API (and actual examples), see Documentation/core-api/dma-api-howto.rst. This API is split into two pieces. Part I describes the basic API. -Part II describes extensions for supporting non-consistent memory +Part II describes extensions for supporting non-coherent memory machines. Unless you know that your driver absolutely has to support -non-consistent platforms (this is usually only legacy platforms) you +non-coherent platforms (this is usually only legacy platforms) you should only use the API described in part I. -Part I - dma_API +Part I - DMA API ---------------- -To get the dma_API, you must #include . This +To get the DMA API, you must #include . This provides dma_addr_t and the interfaces described below. A dma_addr_t can hold any valid DMA address for the platform. It can be @@ -33,13 +33,13 @@ Part Ia - Using large DMA-coherent buffers dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) -Consistent memory is memory for which a write by either the device or +Coherent memory is memory for which a write by either the device or the processor can immediately be read by the processor or device without having to worry about caching effects. (You may however need to make sure to flush the processor's write buffers before telling devices to read that memory.) -This routine allocates a region of bytes of consistent memory. +This routine allocates a region of bytes of coherent memory. It returns a pointer to the allocated region (in the processor's virtual address space) or NULL if the allocation failed. @@ -48,15 +48,14 @@ It also returns a which may be cast to an unsigned integer the same width as the bus and given to the device as the DMA address base of the region. -Note: consistent memory can be expensive on some platforms, and the +Note: coherent memory can be expensive on some platforms, and the minimum allocation length may be as big as a page, so you should -consolidate your requests for consistent memory as much as possible. +consolidate your requests for coherent memory as much as possible. The simplest way to do that is to use the dma_pool calls (see below). -The flag parameter (dma_alloc_coherent() only) allows the caller to -specify the ``GFP_`` flags (see kmalloc()) for the allocation (the -implementation may choose to ignore flags that affect the location of -the returned memory, like GFP_DMA). +The flag parameter allows the caller to specify the ``GFP_`` flags (see +kmalloc()) for the allocation (the implementation may ignore flags that affect +the location of the returned memory, like GFP_DMA). :: @@ -64,19 +63,18 @@ the returned memory, like GFP_DMA). dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) -Free a region of consistent memory you previously allocated. dev, -size and dma_handle must all be the same as those passed into -dma_alloc_coherent(). cpu_addr must be the virtual address returned by -the dma_alloc_coherent(). +Free a previously allocated region of coherent memory. dev, size and dma_handle +must all be the same as those passed into dma_alloc_coherent(). cpu_addr must +be the virtual address returned by dma_alloc_coherent(). -Note that unlike their sibling allocation calls, these routines -may only be called with IRQs enabled. +Note that unlike the sibling allocation call, this routine may only be called +with IRQs enabled. Part Ib - Using small DMA-coherent buffers ------------------------------------------ -To get this part of the dma_API, you must #include +To get this part of the DMA API, you must #include Many drivers need lots of small DMA-coherent memory regions for DMA descriptors or I/O buffers. Rather than allocating in units of a page @@ -85,78 +83,29 @@ much like a struct kmem_cache, except that they use the DMA-coherent allocator, not __get_free_pages(). Also, they understand common hardware constraints for alignment, like queue heads needing to be aligned on N-byte boundaries. +.. kernel-doc:: mm/dmapool.c + :export: -:: - - struct dma_pool * - dma_pool_create(const char *name, struct device *dev, - size_t size, size_t align, size_t alloc); - -dma_pool_create() initializes a pool of DMA-coherent buffers -for use with a given device. It must be called in a context which -can sleep. - -The "name" is for diagnostics (like a struct kmem_cache name); dev and size -are like what you'd pass to dma_alloc_coherent(). The device's hardware -alignment requirement for this type of data is "align" (which is expressed -in bytes, and must be a power of two). If your device has no boundary -crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated -from this pool must not cross 4KByte boundaries. - -:: - - void * - dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, - dma_addr_t *handle) - -Wraps dma_pool_alloc() and also zeroes the returned memory if the -allocation attempt succeeded. - - -:: - - void * - dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, - dma_addr_t *dma_handle); - -This allocates memory from the pool; the returned memory will meet the -size and alignment requirements specified at creation time. Pass -GFP_ATOMIC to prevent blocking, or if it's permitted (not -in_interrupt, not holding SMP locks), pass GFP_KERNEL to allow -blocking. Like dma_alloc_coherent(), this returns two values: an -address usable by the CPU, and the DMA address usable by the pool's -device. - -:: - - void - dma_pool_free(struct dma_pool *pool, void *vaddr, - dma_addr_t addr); - -This puts memory back into the pool. The pool is what was passed to -dma_pool_alloc(); the CPU (vaddr) and DMA addresses are what -were returned when that routine allocated the memory being freed. - -:: - - void - dma_pool_destroy(struct dma_pool *pool); - -dma_pool_destroy() frees the resources of the pool. It must be -called in a context which can sleep. Make sure you've freed all allocated -memory back to the pool before you destroy it. +.. kernel-doc:: include/linux/dmapool.h Part Ic - DMA addressing limitations ------------------------------------ +DMA mask is a bit mask of the addressable region for the device. In other words, +if applying the DMA mask (a bitwise AND operation) to the DMA address of a +memory region does not clear any bits in the address, then the device can +perform DMA to that memory region. + +All the below functions which set a DMA mask may fail if the requested mask +cannot be used with the device, or if the device is not capable of doing DMA. + :: int dma_set_mask_and_coherent(struct device *dev, u64 mask) -Checks to see if the mask is possible and updates the device -streaming and coherent DMA mask parameters if it is. +Updates both streaming and coherent DMA masks. Returns: 0 if successful and a negative error if not. @@ -165,8 +114,7 @@ Returns: 0 if successful and a negative error if not. int dma_set_mask(struct device *dev, u64 mask) -Checks to see if the mask is possible and updates the device -parameters if it is. +Updates only the streaming DMA mask. Returns: 0 if successful and a negative error if not. @@ -175,8 +123,7 @@ Returns: 0 if successful and a negative error if not. int dma_set_coherent_mask(struct device *dev, u64 mask) -Checks to see if the mask is possible and updates the device -parameters if it is. +Updates only the coherent DMA mask. Returns: 0 if successful and a negative error if not. @@ -231,12 +178,32 @@ transfer memory ownership. Returns %false if those calls can be skipped. unsigned long dma_get_merge_boundary(struct device *dev); -Returns the DMA merge boundary. If the device cannot merge any the DMA address +Returns the DMA merge boundary. If the device cannot merge any DMA address segments, the function returns 0. Part Id - Streaming DMA mappings -------------------------------- +Streaming DMA allows to map an existing buffer for DMA transfers and then +unmap it when finished. Map functions are not guaranteed to succeed, so the +return value must be checked. + +.. note:: + + In particular, mapping may fail for memory not addressable by the + device, e.g. if it is not within the DMA mask of the device and/or a + connecting bus bridge. Streaming DMA functions try to overcome such + addressing constraints, either by using an IOMMU (a device which maps + I/O DMA addresses to physical memory addresses), or by copying the + data to/from a bounce buffer if the kernel is configured with a + :doc:`SWIOTLB `. However, these methods are not always + available, and even if they are, they may still fail for a number of + reasons. + + In short, a device driver may need to be wary of where buffers are + located in physical memory, especially if the DMA mask is less than 32 + bits. + :: dma_addr_t @@ -246,9 +213,7 @@ Part Id - Streaming DMA mappings Maps a piece of processor virtual memory so it can be accessed by the device and returns the DMA address of the memory. -The direction for both APIs may be converted freely by casting. -However the dma_API uses a strongly typed enumerator for its -direction: +The DMA API uses a strongly typed enumerator for its direction: ======================= ============================================= DMA_NONE no direction (used for debugging) @@ -259,31 +224,13 @@ DMA_BIDIRECTIONAL direction isn't known .. note:: - Not all memory regions in a machine can be mapped by this API. - Further, contiguous kernel virtual space may not be contiguous as + Contiguous kernel virtual space may not be contiguous as physical memory. Since this API does not provide any scatter/gather capability, it will fail if the user tries to map a non-physically contiguous piece of memory. For this reason, memory to be mapped by this API should be obtained from sources which guarantee it to be physically contiguous (like kmalloc). - Further, the DMA address of the memory must be within the - dma_mask of the device (the dma_mask is a bit mask of the - addressable region for the device, i.e., if the DMA address of - the memory ANDed with the dma_mask is still equal to the DMA - address, then the device can perform DMA to the memory). To - ensure that the memory allocated by kmalloc is within the dma_mask, - the driver may specify various platform-dependent flags to restrict - the DMA address range of the allocation (e.g., on x86, GFP_DMA - guarantees to be within the first 16MB of available DMA addresses, - as required by ISA devices). - - Note also that the above constraints on physical contiguity and - dma_mask may not apply if the platform has an IOMMU (a device which - maps an I/O DMA address to a physical memory address). However, to be - portable, device driver writers may *not* assume that such an IOMMU - exists. - .. warning:: Memory coherency operates at a granularity called the cache @@ -325,8 +272,7 @@ DMA_BIDIRECTIONAL direction isn't known enum dma_data_direction direction) Unmaps the region previously mapped. All the parameters passed in -must be identical to those passed in (and returned) by the mapping -API. +must be identical to those passed to (and returned by) dma_map_single(). :: @@ -376,10 +322,10 @@ action (e.g. reduce current DMA mapping usage or delay and try again later). dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) -Returns: the number of DMA address segments mapped (this may be shorter -than passed in if some elements of the scatter/gather list are -physically or virtually adjacent and an IOMMU maps them with a single -entry). +Maps a scatter/gather list for DMA. Returns the number of DMA address segments +mapped, which may be smaller than passed in if several consecutive +sglist entries are merged (e.g. with an IOMMU, or if some adjacent segments +just happen to be physically contiguous). Please note that the sg cannot be mapped again if it has been mapped once. The mapping process is allowed to destroy information in the sg. @@ -403,9 +349,8 @@ With scatterlists, you use the resulting mapping like this:: where nents is the number of entries in the sglist. The implementation is free to merge several consecutive sglist entries -into one (e.g. with an IOMMU, or if several pages just happen to be -physically contiguous) and returns the actual number of sg entries it -mapped them to. On failure 0, is returned. +into one. The returned number is the actual number of sg entries it +mapped them to. On failure, 0 is returned. Then you should loop count times (note: this can be less than nents times) and use sg_dma_address() and sg_dma_len() macros where you previously @@ -775,19 +720,19 @@ memory or doing partial flushes. of two for easy alignment. -Part III - Debug drivers use of the DMA-API +Part III - Debug drivers use of the DMA API ------------------------------------------- -The DMA-API as described above has some constraints. DMA addresses must be +The DMA API as described above has some constraints. DMA addresses must be released with the corresponding function with the same size for example. With the advent of hardware IOMMUs it becomes more and more important that drivers do not violate those constraints. In the worst case such a violation can result in data corruption up to destroyed filesystems. -To debug drivers and find bugs in the usage of the DMA-API checking code can +To debug drivers and find bugs in the usage of the DMA API checking code can be compiled into the kernel which will tell the developer about those violations. If your architecture supports it you can select the "Enable -debugging of DMA-API usage" option in your kernel configuration. Enabling this +debugging of DMA API usage" option in your kernel configuration. Enabling this option has a performance impact. Do not enable it in production kernels. If you boot the resulting kernel will contain code which does some bookkeeping @@ -826,7 +771,7 @@ example warning message may look like this:: <4>---[ end trace f6435a98e2a38c0e ]--- The driver developer can find the driver and the device including a stacktrace -of the DMA-API call which caused this warning. +of the DMA API call which caused this warning. Per default only the first error will result in a warning message. All other errors will only silently counted. This limitation exist to prevent the code @@ -834,7 +779,7 @@ from flooding your kernel log. To support debugging a device driver this can be disabled via debugfs. See the debugfs interface documentation below for details. -The debugfs directory for the DMA-API debugging code is called dma-api/. In +The debugfs directory for the DMA API debugging code is called dma-api/. In this directory the following files can currently be found: =============================== =============================================== @@ -882,7 +827,7 @@ dma-api/driver_filter You can write a name of a driver into this file If you have this code compiled into your kernel it will be enabled by default. If you want to boot without the bookkeeping anyway you can provide -'dma_debug=off' as a boot parameter. This will disable DMA-API debugging. +'dma_debug=off' as a boot parameter. This will disable DMA API debugging. Notice that you can not enable it again at runtime. You have to reboot to do so. @@ -915,3 +860,9 @@ the driver. When driver does unmap, debug_dma_unmap() checks the flag and if this flag is still set, prints warning message that includes call trace that leads up to the unmap. This interface can be called from dma_mapping_error() routines to enable DMA mapping error check debugging. + +Functions and structures +======================== + +.. kernel-doc:: include/linux/scatterlist.h +.. kernel-doc:: lib/scatterlist.c diff --git a/Documentation/core-api/entry.rst b/Documentation/core-api/entry.rst index a15f9b1767a2..71d8eedc0549 100644 --- a/Documentation/core-api/entry.rst +++ b/Documentation/core-api/entry.rst @@ -105,7 +105,7 @@ has to do extra work between the various steps. In such cases it has to ensure that enter_from_user_mode() is called first on entry and exit_to_user_mode() is called last on exit. -Do not nest syscalls. Nested systcalls will cause RCU and/or context tracking +Do not nest syscalls. Nested syscalls will cause RCU and/or context tracking to print a warning. KVM @@ -115,8 +115,8 @@ Entering or exiting guest mode is very similar to syscalls. From the host kernel point of view the CPU goes off into user space when entering the guest and returns to the kernel on exit. -kvm_guest_enter_irqoff() is a KVM-specific variant of exit_to_user_mode() -and kvm_guest_exit_irqoff() is the KVM variant of enter_from_user_mode(). +guest_state_enter_irqoff() is a KVM-specific variant of exit_to_user_mode() +and guest_state_exit_irqoff() is the KVM variant of enter_from_user_mode(). The state operations have the same ordering. Task work handling is done separately for guest at the boundary of the diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 7a4ca18ca6e2..a03a99c2cac5 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -54,6 +54,7 @@ Library functionality that is used throughout the kernel. union_find min_heap parser + list Low level entry and exit ======================== diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index 111f6a595e48..e8211c4ca662 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -3,12 +3,6 @@ The Linux Kernel API ==================== -List Management Functions -========================= - -.. kernel-doc:: include/linux/list.h - :internal: - Basic C Library Functions ========================= diff --git a/Documentation/core-api/list.rst b/Documentation/core-api/list.rst new file mode 100644 index 000000000000..86873ce9adbf --- /dev/null +++ b/Documentation/core-api/list.rst @@ -0,0 +1,776 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +===================== +Linked Lists in Linux +===================== + +:Author: Nicolas Frattaroli + +.. contents:: + +Introduction +============ + +Linked lists are one of the most basic data structures used in many programs. +The Linux kernel implements several different flavours of linked lists. The +purpose of this document is not to explain linked lists in general, but to show +new kernel developers how to use the Linux kernel implementations of linked +lists. + +Please note that while linked lists certainly are ubiquitous, they are rarely +the best data structure to use in cases where a simple array doesn't already +suffice. In particular, due to their poor data locality, linked lists are a bad +choice in situations where performance may be of consideration. Familiarizing +oneself with other in-kernel generic data structures, especially for concurrent +accesses, is highly encouraged. + +Linux implementation of doubly linked lists +=========================================== + +Linux's linked list implementations can be used by including the header file +````. + +The doubly-linked list will likely be the most familiar to many readers. It's a +list that can efficiently be traversed forwards and backwards. + +The Linux kernel's doubly-linked list is circular in nature. This means that to +get from the head node to the tail, we can just travel one edge backwards. +Similarly, to get from the tail node to the head, we can simply travel forwards +"beyond" the tail and arrive back at the head. + +Declaring a node +---------------- + +A node in a doubly-linked list is declared by adding a struct list_head +member to the data structure you wish to be contained in the list: + +.. code-block:: c + + struct clown { + unsigned long long shoe_size; + const char *name; + struct list_head node; /* the aforementioned member */ + }; + +This may be an unfamiliar approach to some, as the classical explanation of a +linked list is a list node data structure with pointers to the previous and next +list node, as well the payload data. Linux chooses this approach because it +allows for generic list modification code regardless of what data structure is +contained within the list. Since the struct list_head member is not a pointer +but part of the data structure proper, the container_of() pattern can be used by +the list implementation to access the payload data regardless of its type, while +staying oblivious to what said type actually is. + +Declaring and initializing a list +--------------------------------- + +A doubly-linked list can then be declared as just another struct list_head, +and initialized with the LIST_HEAD_INIT() macro during initial assignment, or +with the INIT_LIST_HEAD() function later: + +.. code-block:: c + + struct clown_car { + int tyre_pressure[4]; + struct list_head clowns; /* Looks like a node! */ + }; + + /* ... Somewhere later in our driver ... */ + + static int circus_init(struct circus_priv *circus) + { + struct clown_car other_car = { + .tyre_pressure = {10, 12, 11, 9}, + .clowns = LIST_HEAD_INIT(other_car.clowns) + }; + + INIT_LIST_HEAD(&circus->car.clowns); + + return 0; + } + +A further point of confusion to some may be that the list itself doesn't really +have its own type. The concept of the entire linked list and a +struct list_head member that points to other entries in the list are one and +the same. + +Adding nodes to the list +------------------------ + +Adding a node to the linked list is done through the list_add() macro. + +We'll return to our clown car example to illustrate how nodes get added to the +list: + +.. code-block:: c + + static int circus_fill_car(struct circus_priv *circus) + { + struct clown_car *car = &circus->car; + struct clown *grock; + struct clown *dimitri; + + /* State 1 */ + + grock = kzalloc(sizeof(*grock), GFP_KERNEL); + if (!grock) + return -ENOMEM; + grock->name = "Grock"; + grock->shoe_size = 1000; + + /* Note that we're adding the "node" member */ + list_add(&grock->node, &car->clowns); + + /* State 2 */ + + dimitri = kzalloc(sizeof(*dimitri), GFP_KERNEL); + if (!dimitri) + return -ENOMEM; + dimitri->name = "Dimitri"; + dimitri->shoe_size = 50; + + list_add(&dimitri->node, &car->clowns); + + /* State 3 */ + + return 0; + } + +In State 1, our list of clowns is still empty:: + + .------. + v | + .--------. | + | clowns |--' + '--------' + +This diagram shows the singular "clowns" node pointing at itself. In this +diagram, and all following diagrams, only the forward edges are shown, to aid in +clarity. + +In State 2, we've added Grock after the list head:: + + .--------------------. + v | + .--------. .-------. | + | clowns |---->| Grock |--' + '--------' '-------' + +This diagram shows the "clowns" node pointing at a new node labeled "Grock". +The Grock node is pointing back at the "clowns" node. + +In State 3, we've added Dimitri after the list head, resulting in the following:: + + .------------------------------------. + v | + .--------. .---------. .-------. | + | clowns |---->| Dimitri |---->| Grock |--' + '--------' '---------' '-------' + +This diagram shows the "clowns" node pointing at a new node labeled "Dimitri", +which then points at the node labeled "Grock". The "Grock" node still points +back at the "clowns" node. + +If we wanted to have Dimitri inserted at the end of the list instead, we'd use +list_add_tail(). Our code would then look like this: + +.. code-block:: c + + static int circus_fill_car(struct circus_priv *circus) + { + /* ... */ + + list_add_tail(&dimitri->node, &car->clowns); + + /* State 3b */ + + return 0; + } + +This results in the following list:: + + .------------------------------------. + v | + .--------. .-------. .---------. | + | clowns |---->| Grock |---->| Dimitri |--' + '--------' '-------' '---------' + +This diagram shows the "clowns" node pointing at the node labeled "Grock", +which points at the new node labeled "Dimitri". The node labeled "Dimitri" +points back at the "clowns" node. + +Traversing the list +------------------- + +To iterate the list, we can loop through all nodes within the list with +list_for_each(). + +In our clown example, this results in the following somewhat awkward code: + +.. code-block:: c + + static unsigned long long circus_get_max_shoe_size(struct circus_priv *circus) + { + unsigned long long res = 0; + struct clown *e; + struct list_head *cur; + + list_for_each(cur, &circus->car.clowns) { + e = list_entry(cur, struct clown, node); + if (e->shoe_size > res) + res = e->shoe_size; + } + + return res; + } + +The list_entry() macro internally uses the aforementioned container_of() to +retrieve the data structure instance that ``node`` is a member of. + +Note how the additional list_entry() call is a little awkward here. It's only +there because we're iterating through the ``node`` members, but we really want +to iterate through the payload, i.e. the ``struct clown`` that contains each +node's struct list_head. For this reason, there is a second macro: +list_for_each_entry() + +Using it would change our code to something like this: + +.. code-block:: c + + static unsigned long long circus_get_max_shoe_size(struct circus_priv *circus) + { + unsigned long long res = 0; + struct clown *e; + + list_for_each_entry(e, &circus->car.clowns, node) { + if (e->shoe_size > res) + res = e->shoe_size; + } + + return res; + } + +This eliminates the need for the list_entry() step, and our loop cursor is now +of the type of our payload. The macro is given the member name that corresponds +to the list's struct list_head within the clown data structure so that it can +still walk the list. + +Removing nodes from the list +---------------------------- + +The list_del() function can be used to remove entries from the list. It not only +removes the given entry from the list, but poisons the entry's ``prev`` and +``next`` pointers, so that unintended use of the entry after removal does not +go unnoticed. + +We can extend our previous example to remove one of the entries: + +.. code-block:: c + + static int circus_fill_car(struct circus_priv *circus) + { + /* ... */ + + list_add(&dimitri->node, &car->clowns); + + /* State 3 */ + + list_del(&dimitri->node); + + /* State 4 */ + + return 0; + } + +The result of this would be this:: + + .--------------------. + v | + .--------. .-------. | .---------. + | clowns |---->| Grock |--' | Dimitri | + '--------' '-------' '---------' + +This diagram shows the "clowns" node pointing at the node labeled "Grock", +which points back at the "clowns" node. Off to the side is a lone node labeled +"Dimitri", which has no arrows pointing anywhere. + +Note how the Dimitri node does not point to itself; its pointers are +intentionally set to a "poison" value that the list code refuses to traverse. + +If we wanted to reinitialize the removed node instead to make it point at itself +again like an empty list head, we can use list_del_init() instead: + +.. code-block:: c + + static int circus_fill_car(struct circus_priv *circus) + { + /* ... */ + + list_add(&dimitri->node, &car->clowns); + + /* State 3 */ + + list_del_init(&dimitri->node); + + /* State 4b */ + + return 0; + } + +This results in the deleted node pointing to itself again:: + + .--------------------. .-------. + v | v | + .--------. .-------. | .---------. | + | clowns |---->| Grock |--' | Dimitri |--' + '--------' '-------' '---------' + +This diagram shows the "clowns" node pointing at the node labeled "Grock", +which points back at the "clowns" node. Off to the side is a lone node labeled +"Dimitri", which points to itself. + +Traversing whilst removing nodes +-------------------------------- + +Deleting entries while we're traversing the list will cause problems if we use +list_for_each() and list_for_each_entry(), as deleting the current entry would +modify the ``next`` pointer of it, which means the traversal can't properly +advance to the next list entry. + +There is a solution to this however: list_for_each_safe() and +list_for_each_entry_safe(). These take an additional parameter of a pointer to +a struct list_head to use as temporary storage for the next entry during +iteration, solving the issue. + +An example of how to use it: + +.. code-block:: c + + static void circus_eject_insufficient_clowns(struct circus_priv *circus) + { + struct clown *e; + struct clown *n; /* temporary storage for safe iteration */ + + list_for_each_entry_safe(e, n, &circus->car.clowns, node) { + if (e->shoe_size < 500) + list_del(&e->node); + } + } + +Proper memory management (i.e. freeing the deleted node while making sure +nothing still references it) in this case is left as an exercise to the reader. + +Cutting a list +-------------- + +There are two helper functions to cut lists with. Both take elements from the +list ``head``, and replace the contents of the list ``list``. + +The first such function is list_cut_position(). It removes all list entries from +``head`` up to and including ``entry``, placing them in ``list`` instead. + +In this example, it's assumed we start with the following list:: + + .----------------------------------------------------------------. + v | + .--------. .-------. .---------. .-----. .---------. | + | clowns |---->| Grock |---->| Dimitri |---->| Pic |---->| Alfredo |--' + '--------' '-------' '---------' '-----' '---------' + +With the following code, every clown up to and including "Pic" is moved from +the "clowns" list head to a separate struct list_head initialized at local +stack variable ``retirement``: + +.. code-block:: c + + static void circus_retire_clowns(struct circus_priv *circus) + { + struct list_head retirement = LIST_HEAD_INIT(retirement); + struct clown *grock, *dimitri, *pic, *alfredo; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + list_cut_position(&retirement, &car->clowns, &pic->node); + + /* State 1 */ + } + +The resulting ``car->clowns`` list would be this:: + + .----------------------. + v | + .--------. .---------. | + | clowns |---->| Alfredo |--' + '--------' '---------' + +Meanwhile, the ``retirement`` list is transformed to the following:: + + .--------------------------------------------------. + v | + .------------. .-------. .---------. .-----. | + | retirement |---->| Grock |---->| Dimitri |---->| Pic |--' + '------------' '-------' '---------' '-----' + +The second function, list_cut_before(), is much the same, except it cuts before +the ``entry`` node, i.e. it removes all list entries from ``head`` up to but +excluding ``entry``, placing them in ``list`` instead. This example assumes the +same initial starting list as the previous example: + +.. code-block:: c + + static void circus_retire_clowns(struct circus_priv *circus) + { + struct list_head retirement = LIST_HEAD_INIT(retirement); + struct clown *grock, *dimitri, *pic, *alfredo; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + list_cut_before(&retirement, &car->clowns, &pic->node); + + /* State 1b */ + } + +The resulting ``car->clowns`` list would be this:: + + .----------------------------------. + v | + .--------. .-----. .---------. | + | clowns |---->| Pic |---->| Alfredo |--' + '--------' '-----' '---------' + +Meanwhile, the ``retirement`` list is transformed to the following:: + + .--------------------------------------. + v | + .------------. .-------. .---------. | + | retirement |---->| Grock |---->| Dimitri |--' + '------------' '-------' '---------' + +It should be noted that both functions will destroy links to any existing nodes +in the destination ``struct list_head *list``. + +Moving entries and partial lists +-------------------------------- + +The list_move() and list_move_tail() functions can be used to move an entry +from one list to another, to either the start or end respectively. + +In the following example, we'll assume we start with two lists ("clowns" and +"sidewalk" in the following initial state "State 0":: + + .----------------------------------------------------------------. + v | + .--------. .-------. .---------. .-----. .---------. | + | clowns |---->| Grock |---->| Dimitri |---->| Pic |---->| Alfredo |--' + '--------' '-------' '---------' '-----' '---------' + + .-------------------. + v | + .----------. .-----. | + | sidewalk |---->| Pio |--' + '----------' '-----' + +We apply the following example code to the two lists: + +.. code-block:: c + + static void circus_clowns_exit_car(struct circus_priv *circus) + { + struct list_head sidewalk = LIST_HEAD_INIT(sidewalk); + struct clown *grock, *dimitri, *pic, *alfredo, *pio; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + /* State 0 */ + + list_move(&pic->node, &sidewalk); + + /* State 1 */ + + list_move_tail(&dimitri->node, &sidewalk); + + /* State 2 */ + } + +In State 1, we arrive at the following situation:: + + .-----------------------------------------------------. + | | + v | + .--------. .-------. .---------. .---------. | + | clowns |---->| Grock |---->| Dimitri |---->| Alfredo |--' + '--------' '-------' '---------' '---------' + + .-------------------------------. + v | + .----------. .-----. .-----. | + | sidewalk |---->| Pic |---->| Pio |--' + '----------' '-----' '-----' + +In State 2, after we've moved Dimitri to the tail of sidewalk, the situation +changes as follows:: + + .-------------------------------------. + | | + v | + .--------. .-------. .---------. | + | clowns |---->| Grock |---->| Alfredo |--' + '--------' '-------' '---------' + + .-----------------------------------------------. + v | + .----------. .-----. .-----. .---------. | + | sidewalk |---->| Pic |---->| Pio |---->| Dimitri |--' + '----------' '-----' '-----' '---------' + +As long as the source and destination list head are part of the same list, we +can also efficiently bulk move a segment of the list to the tail end of the +list. We continue the previous example by adding a list_bulk_move_tail() after +State 2, moving Pic and Pio to the tail end of the sidewalk list. + +.. code-block:: c + + static void circus_clowns_exit_car(struct circus_priv *circus) + { + struct list_head sidewalk = LIST_HEAD_INIT(sidewalk); + struct clown *grock, *dimitri, *pic, *alfredo, *pio; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + /* State 0 */ + + list_move(&pic->node, &sidewalk); + + /* State 1 */ + + list_move_tail(&dimitri->node, &sidewalk); + + /* State 2 */ + + list_bulk_move_tail(&sidewalk, &pic->node, &pio->node); + + /* State 3 */ + } + +For the sake of brevity, only the altered "sidewalk" list at State 3 is depicted +in the following diagram:: + + .-----------------------------------------------. + v | + .----------. .---------. .-----. .-----. | + | sidewalk |---->| Dimitri |---->| Pic |---->| Pio |--' + '----------' '---------' '-----' '-----' + +Do note that list_bulk_move_tail() does not do any checking as to whether all +three supplied ``struct list_head *`` parameters really do belong to the same +list. If you use it outside the constraints the documentation gives, then the +result is a matter between you and the implementation. + +Rotating entries +---------------- + +A common write operation on lists, especially when using them as queues, is +to rotate it. A list rotation means entries at the front are sent to the back. + +For rotation, Linux provides us with two functions: list_rotate_left() and +list_rotate_to_front(). The former can be pictured like a bicycle chain, taking +the entry after the supplied ``struct list_head *`` and moving it to the tail, +which in essence means the entire list, due to its circular nature, rotates by +one position. + +The latter, list_rotate_to_front(), takes the same concept one step further: +instead of advancing the list by one entry, it advances it *until* the specified +entry is the new front. + +In the following example, our starting state, State 0, is the following:: + + .-----------------------------------------------------------------. + v | + .--------. .-------. .---------. .-----. .---------. .-----. | + | clowns |-->| Grock |-->| Dimitri |-->| Pic |-->| Alfredo |-->| Pio |-' + '--------' '-------' '---------' '-----' '---------' '-----' + +The example code being used to demonstrate list rotations is the following: + +.. code-block:: c + + static void circus_clowns_rotate(struct circus_priv *circus) + { + struct clown *grock, *dimitri, *pic, *alfredo, *pio; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + /* State 0 */ + + list_rotate_left(&car->clowns); + + /* State 1 */ + + list_rotate_to_front(&alfredo->node, &car->clowns); + + /* State 2 */ + + } + +In State 1, we arrive at the following situation:: + + .-----------------------------------------------------------------. + v | + .--------. .---------. .-----. .---------. .-----. .-------. | + | clowns |-->| Dimitri |-->| Pic |-->| Alfredo |-->| Pio |-->| Grock |-' + '--------' '---------' '-----' '---------' '-----' '-------' + +Next, after the list_rotate_to_front() call, we arrive in the following +State 2:: + + .-----------------------------------------------------------------. + v | + .--------. .---------. .-----. .-------. .---------. .-----. | + | clowns |-->| Alfredo |-->| Pio |-->| Grock |-->| Dimitri |-->| Pic |-' + '--------' '---------' '-----' '-------' '---------' '-----' + +As is hopefully evident from the diagrams, the entries in front of "Alfredo" +were cycled to the tail end of the list. + +Swapping entries +---------------- + +Another common operation is that two entries need to be swapped with each other. + +For this, Linux provides us with list_swap(). + +In the following example, we have a list with three entries, and swap two of +them. This is our starting state in "State 0":: + + .-----------------------------------------. + v | + .--------. .-------. .---------. .-----. | + | clowns |-->| Grock |-->| Dimitri |-->| Pic |-' + '--------' '-------' '---------' '-----' + +.. code-block:: c + + static void circus_clowns_swap(struct circus_priv *circus) + { + struct clown *grock, *dimitri, *pic; + struct clown_car *car = &circus->car; + + /* ... clown initialization, list adding ... */ + + /* State 0 */ + + list_swap(&dimitri->node, &pic->node); + + /* State 1 */ + } + +The resulting list at State 1 is the following:: + + .-----------------------------------------. + v | + .--------. .-------. .-----. .---------. | + | clowns |-->| Grock |-->| Pic |-->| Dimitri |-' + '--------' '-------' '-----' '---------' + +As is evident by comparing the diagrams, the "Pic" and "Dimitri" nodes have +traded places. + +Splicing two lists together +--------------------------- + +Say we have two lists, in the following example one represented by a list head +we call "knie" and one we call "stey". In a hypothetical circus acquisition, +the two list of clowns should be spliced together. The following is our +situation in "State 0":: + + .-----------------------------------------. + | | + v | + .------. .-------. .---------. .-----. | + | knie |-->| Grock |-->| Dimitri |-->| Pic |--' + '------' '-------' '---------' '-----' + + .-----------------------------. + v | + .------. .---------. .-----. | + | stey |-->| Alfredo |-->| Pio |--' + '------' '---------' '-----' + +The function to splice these two lists together is list_splice(). Our example +code is as follows: + +.. code-block:: c + + static void circus_clowns_splice(void) + { + struct clown *grock, *dimitri, *pic, *alfredo, *pio; + struct list_head knie = LIST_HEAD_INIT(knie); + struct list_head stey = LIST_HEAD_INIT(stey); + + /* ... Clown allocation and initialization here ... */ + + list_add_tail(&grock->node, &knie); + list_add_tail(&dimitri->node, &knie); + list_add_tail(&pic->node, &knie); + list_add_tail(&alfredo->node, &stey); + list_add_tail(&pio->node, &stey); + + /* State 0 */ + + list_splice(&stey, &dimitri->node); + + /* State 1 */ + } + +The list_splice() call here adds all the entries in ``stey`` to the list +``dimitri``'s ``node`` list_head is in, after the ``node`` of ``dimitri``. A +somewhat surprising diagram of the resulting "State 1" follows:: + + .-----------------------------------------------------------------. + | | + v | + .------. .-------. .---------. .---------. .-----. .-----. | + | knie |-->| Grock |-->| Dimitri |-->| Alfredo |-->| Pio |-->| Pic |--' + '------' '-------' '---------' '---------' '-----' '-----' + ^ + .-------------------------------' + | + .------. | + | stey |--' + '------' + +Traversing the ``stey`` list no longer results in correct behavior. A call of +list_for_each() on ``stey`` results in an infinite loop, as it never returns +back to the ``stey`` list head. + +This is because list_splice() did not reinitialize the list_head it took +entries from, leaving its pointer pointing into what is now a different list. + +If we want to avoid this situation, list_splice_init() can be used. It does the +same thing as list_splice(), except reinitalizes the donor list_head after the +transplant. + +Concurrency considerations +-------------------------- + +Concurrent access and modification of a list needs to be protected with a lock +in most cases. Alternatively and preferably, one may use the RCU primitives for +lists in read-mostly use-cases, where read accesses to the list are common but +modifications to the list less so. See Documentation/RCU/listRCU.rst for more +details. + +Further reading +--------------- + +* `How does the kernel implements Linked Lists? - KernelNewbies `_ + +Full List API +============= + +.. kernel-doc:: include/linux/list.h + :internal: diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst index af8151db88b2..50cfc7842930 100644 --- a/Documentation/core-api/mm-api.rst +++ b/Documentation/core-api/mm-api.rst @@ -91,12 +91,6 @@ Memory pools .. kernel-doc:: mm/mempool.c :export: -DMA pools -========= - -.. kernel-doc:: mm/dmapool.c - :export: - More Memory Management Functions ================================ diff --git a/Documentation/core-api/packing.rst b/Documentation/core-api/packing.rst index 0ce2078c8e13..f68f1e08fef9 100644 --- a/Documentation/core-api/packing.rst +++ b/Documentation/core-api/packing.rst @@ -319,7 +319,7 @@ Here is an example of how to use the fields APIs: #define SIZE 13 - typdef struct __packed { u8 buf[SIZE]; } packed_buf_t; + typedef struct __packed { u8 buf[SIZE]; } packed_buf_t; static const struct packed_field_u8 fields[] = { PACKED_FIELD(100, 90, struct data, field1), diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 5a91df105141..607589592bfb 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -131,6 +131,29 @@ It supports two optional parameters: ``--no-virtualenv`` Use OS packaging for Sphinx instead of Python virtual environment. +Installing Sphinx Minimal Version +--------------------------------- + +When changing Sphinx build system, it is important to ensure that +the minimal version will still be supported. Nowadays, it is +becoming harder to do that on modern distributions, as it is not +possible to install with Python 3.13 and above. + +Testing with the lowest supported Python version as defined at +Documentation/process/changes.rst can be done by creating +a venv with it with, and install minimal requirements with:: + + /usr/bin/python3.9 -m venv sphinx_min + . sphinx_min/bin/activate + pip install -r Documentation/sphinx/min_requirements.txt + +A more comprehensive test can be done by using: + + scripts/test_doc_build.py + +Such script create one Python venv per supported version, +optionally building documentation for a range of Sphinx versions. + Sphinx Build ============ diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst index ae433261e11a..85d86f92c41b 100644 --- a/Documentation/driver-api/gpio/driver.rst +++ b/Documentation/driver-api/gpio/driver.rst @@ -750,7 +750,7 @@ compliance: - Test your driver with the appropriate in-kernel real-time test cases for both level and edge IRQs -* [1] http://www.spinics.net/lists/linux-omap/msg120425.html +* [1] https://lore.kernel.org/r/1437496011-11486-1-git-send-email-bigeasy@linutronix.de/ * [2] https://lore.kernel.org/r/1443209283-20781-2-git-send-email-grygorii.strashko@ti.com * [3] https://lore.kernel.org/r/1443209283-20781-3-git-send-email-grygorii.strashko@ti.com diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 1c14ba08fbfc..c2d3996b5b40 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -2,7 +2,7 @@ Fault injection capabilities infrastructure =========================================== -See also drivers/md/md-faulty.c and "every_nth" module option for scsi_debug. +See also "every_nth" module option for scsi_debug. Available fault injection capabilities diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst index 08dd5e254cc5..5b283f3d1eb1 100644 --- a/Documentation/filesystems/dax.rst +++ b/Documentation/filesystems/dax.rst @@ -206,7 +206,6 @@ stall the CPU for an extended period, you should also not attempt to implement direct_access. These block devices may be used for inspiration: -- brd: RAM backed block device driver - pmem: NVDIMM persistent memory driver diff --git a/Documentation/filesystems/ext4/atomic_writes.rst b/Documentation/filesystems/ext4/atomic_writes.rst index f65767df3620..aeb47ace738d 100644 --- a/Documentation/filesystems/ext4/atomic_writes.rst +++ b/Documentation/filesystems/ext4/atomic_writes.rst @@ -148,10 +148,10 @@ reserved during: only required to handle a split extent across leaf blocks. How to ------- +~~~~~~ Creating Filesystems with Atomic Write Support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ First check the atomic write units supported by block device. See :ref:`atomic_write_bdev_support` for more details. @@ -176,7 +176,7 @@ Where ``-b`` specifies the block size, ``-C`` specifies the cluster size in byte and ``-O bigalloc`` enables the bigalloc feature. Application Interface -~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^ Applications can use the ``pwritev2()`` system call with the ``RWF_ATOMIC`` flag to perform atomic writes: @@ -204,7 +204,7 @@ writes are supported. .. _atomic_write_bdev_support: Hardware Support ----------------- +~~~~~~~~~~~~~~~~ The underlying storage device must support atomic write operations. Modern NVMe and SCSI devices often provide this capability. @@ -217,7 +217,7 @@ Nonzero values for these attributes indicate that the device supports atomic writes. See Also --------- +~~~~~~~~ * :doc:`bigalloc` - Documentation on the bigalloc feature * :doc:`allocators` - Documentation on block allocation in ext4 diff --git a/Documentation/filesystems/ext4/bitmaps.rst b/Documentation/filesystems/ext4/bitmaps.rst index 91c45d86e9bb..9d7d7b083a25 100644 --- a/Documentation/filesystems/ext4/bitmaps.rst +++ b/Documentation/filesystems/ext4/bitmaps.rst @@ -19,10 +19,3 @@ necessarily the case that no blocks are in use -- if ``meta_bg`` is set, the bitmaps and group descriptor live inside the group. Unfortunately, ext2fs_test_block_bitmap2() will return '0' for those locations, which produces confusing debugfs output. - -Inode Table ------------ -Inode tables are statically allocated at mkfs time. Each block group -descriptor points to the start of the table, and the superblock records -the number of inodes per group. See the section on inodes for more -information. diff --git a/Documentation/filesystems/ext4/blockgroup.rst b/Documentation/filesystems/ext4/blockgroup.rst index ed5a5cac6d40..7cbf0b2b778e 100644 --- a/Documentation/filesystems/ext4/blockgroup.rst +++ b/Documentation/filesystems/ext4/blockgroup.rst @@ -1,7 +1,10 @@ .. SPDX-License-Identifier: GPL-2.0 +Block Groups +------------ + Layout ------- +~~~~~~ The layout of a standard block group is approximately as follows (each of these fields is discussed in a separate section below): @@ -60,7 +63,7 @@ groups (flex_bg). Leftover space is used for file data blocks, indirect block maps, extent tree blocks, and extended attributes. Flexible Block Groups ---------------------- +~~~~~~~~~~~~~~~~~~~~~ Starting in ext4, there is a new feature called flexible block groups (flex_bg). In a flex_bg, several block groups are tied together as one @@ -78,7 +81,7 @@ if flex_bg is enabled. The number of block groups that make up a flex_bg is given by 2 ^ ``sb.s_log_groups_per_flex``. Meta Block Groups ------------------ +~~~~~~~~~~~~~~~~~ Without the option META_BG, for safety concerns, all block group descriptors copies are kept in the first block group. Given the default @@ -117,7 +120,7 @@ Please see an important note about ``BLOCK_UNINIT`` in the section about block and inode bitmaps. Lazy Block Group Initialization -------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A new feature for ext4 are three block group descriptor flags that enable mkfs to skip initializing other parts of the block group diff --git a/Documentation/filesystems/ext4/dynamic.rst b/Documentation/filesystems/ext4/dynamic.rst index bb0c84333341..bbad439aada2 100644 --- a/Documentation/filesystems/ext4/dynamic.rst +++ b/Documentation/filesystems/ext4/dynamic.rst @@ -6,7 +6,9 @@ Dynamic Structures Dynamic metadata are created on the fly when files and blocks are allocated to files. -.. include:: inodes.rst -.. include:: ifork.rst -.. include:: directory.rst -.. include:: attributes.rst +.. toctree:: + + inodes + ifork + directory + attributes diff --git a/Documentation/filesystems/ext4/globals.rst b/Documentation/filesystems/ext4/globals.rst index b17418974fd3..c6a6abce818a 100644 --- a/Documentation/filesystems/ext4/globals.rst +++ b/Documentation/filesystems/ext4/globals.rst @@ -6,9 +6,12 @@ Global Structures The filesystem is sharded into a number of block groups, each of which have static metadata at fixed locations. -.. include:: super.rst -.. include:: group_descr.rst -.. include:: bitmaps.rst -.. include:: mmp.rst -.. include:: journal.rst -.. include:: orphan.rst +.. toctree:: + + super + group_descr + bitmaps + inode_table + mmp + journal + orphan diff --git a/Documentation/filesystems/ext4/index.rst b/Documentation/filesystems/ext4/index.rst index 705d813d558f..1ff8150c50e9 100644 --- a/Documentation/filesystems/ext4/index.rst +++ b/Documentation/filesystems/ext4/index.rst @@ -5,7 +5,7 @@ ext4 Data Structures and Algorithms =================================== .. toctree:: - :maxdepth: 6 + :maxdepth: 2 :numbered: about diff --git a/Documentation/filesystems/ext4/inode_table.rst b/Documentation/filesystems/ext4/inode_table.rst new file mode 100644 index 000000000000..f7900a52c0d5 --- /dev/null +++ b/Documentation/filesystems/ext4/inode_table.rst @@ -0,0 +1,9 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Inode Table +----------- + +Inode tables are statically allocated at mkfs time. Each block group +descriptor points to the start of the table, and the superblock records +the number of inodes per group. See :doc:`inode documentation ` +for more information on inode table layout. diff --git a/Documentation/filesystems/ext4/overview.rst b/Documentation/filesystems/ext4/overview.rst index 9d4054c17ecb..171c3963d7f6 100644 --- a/Documentation/filesystems/ext4/overview.rst +++ b/Documentation/filesystems/ext4/overview.rst @@ -16,13 +16,15 @@ All fields in ext4 are written to disk in little-endian order. HOWEVER, all fields in jbd2 (the journal) are written to disk in big-endian order. -.. include:: blocks.rst -.. include:: blockgroup.rst -.. include:: special_inodes.rst -.. include:: allocators.rst -.. include:: checksums.rst -.. include:: bigalloc.rst -.. include:: inlinedata.rst -.. include:: eainode.rst -.. include:: verity.rst -.. include:: atomic_writes.rst +.. toctree:: + + blocks + blockgroup + special_inodes + allocators + checksums + bigalloc + inlinedata + eainode + verity + atomic_writes diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index 440e4ae74e44..8eeb7ea14f61 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -218,7 +218,7 @@ mode=%s Control block allocation mode which supports "adaptive" fragmentation/after-GC situation itself. The developers use these modes to understand filesystem fragmentation/after-GC condition well, and eventually get some insights to handle them better. - In "fragment:segment", f2fs allocates a new segment in ramdom + In "fragment:segment", f2fs allocates a new segment in random position. With this, we can simulate the after-GC condition. In "fragment:block", we can scatter block allocation with "max_fragment_chunk" and "max_fragment_hole" sysfs nodes. @@ -261,7 +261,7 @@ test_dummy_encryption=%s The argument may be either "v1" or "v2", in order to select the corresponding fscrypt policy version. checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enable" - to reenable checkpointing. Is enabled by default. While + to re-enable checkpointing. Is enabled by default. While disabled, any unmounting or unexpected shutdowns will cause the filesystem contents to appear as they did when the filesystem was mounted with that option. diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst index 4133a336486d..ab989807a2cb 100644 --- a/Documentation/filesystems/overlayfs.rst +++ b/Documentation/filesystems/overlayfs.rst @@ -9,7 +9,7 @@ Overlay Filesystem This document describes a prototype for a new approach to providing overlay-filesystem functionality in Linux (sometimes referred to as union-filesystems). An overlay-filesystem tries to present a -filesystem which is the result over overlaying one filesystem on top +filesystem which is the result of overlaying one filesystem on top of the other. @@ -61,7 +61,7 @@ Inode properties |Configuration | Persistent | Uniform | st_ino == d_ino | d_ino == i_ino | | | st_ino | st_dev | | [*] | +==============+=====+======+=====+======+========+========+========+=======+ -| | dir | !dir | dir | !dir | dir + !dir | dir | !dir | +| | dir | !dir | dir | !dir | dir | !dir | dir | !dir | +--------------+-----+------+-----+------+--------+--------+--------+-------+ | All layers | Y | Y | Y | Y | Y | Y | Y | Y | | on same fs | | | | | | | | | @@ -425,7 +425,7 @@ of information from up to three different layers: The "lower data" file can be on any lower layer, except from the top most lower layer. -Below the top most lower layer, any number of lower most layers may be defined +Below the topmost lower layer, any number of lowermost layers may be defined as "data-only" lower layers, using double colon ("::") separators. A normal lower layer is not allowed to be below a data-only layer, so single colon separators are not allowed to the right of double colon ("::") separators. @@ -445,8 +445,8 @@ to the absolute path of the "lower data" file in the "data-only" lower layer. Instead of explicitly enabling "metacopy=on" it is sufficient to specify at least one data-only layer to enable redirection of data to a data-only layer. -In this case other forms of metacopy are rejected. Note: this way data-only -layers may be used toghether with "userxattr", in which case careful attention +In this case other forms of metacopy are rejected. Note: this way, data-only +layers may be used together with "userxattr", in which case careful attention must be given to privileges needed to change the "user.overlay.redirect" xattr to prevent misuse. @@ -515,7 +515,7 @@ supports these values: The metacopy digest is never generated or used. This is the default if verity option is not specified. - "on": - Whenever a metacopy files specifies an expected digest, the + Whenever a metacopy file specifies an expected digest, the corresponding data file must match the specified digest. When generating a metacopy file the verity digest will be set in it based on the source file (if it has one). @@ -537,7 +537,7 @@ Using an upper layer path and/or a workdir path that are already used by another overlay mount is not allowed and may fail with EBUSY. Using partially overlapping paths is not allowed and may fail with EBUSY. If files are accessed from two overlayfs mounts which share or overlap the -upper layer and/or workdir path the behavior of the overlay is undefined, +upper layer and/or workdir path, the behavior of the overlay is undefined, though it will not result in a crash or deadlock. Mounting an overlay using an upper layer path, where the upper layer path @@ -778,7 +778,7 @@ controlled by the "uuid" mount option, which supports these values: - "auto": (default) UUID is taken from xattr "trusted.overlay.uuid" if it exists. Upgrade to "uuid=on" on first time mount of new overlay filesystem that - meets the prerequites. + meets the prerequisites. Downgrade to "uuid=null" for existing overlay filesystems that were never mounted with "uuid=on". @@ -794,20 +794,20 @@ without significant effort. The advantage of mounting with the "volatile" option is that all forms of sync calls to the upper filesystem are omitted. -In order to avoid a giving a false sense of safety, the syncfs (and fsync) +In order to avoid giving a false sense of safety, the syncfs (and fsync) semantics of volatile mounts are slightly different than that of the rest of VFS. If any writeback error occurs on the upperdir's filesystem after a volatile mount takes place, all sync functions will return an error. Once this condition is reached, the filesystem will not recover, and every subsequent sync -call will return an error, even if the upperdir has not experience a new error +call will return an error, even if the upperdir has not experienced a new error since the last sync call. When overlay is mounted with "volatile" option, the directory "$workdir/work/incompat/volatile" is created. During next mount, overlay checks for this directory and refuses to mount if present. This is a strong -indicator that user should throw away upper and work directories and create -fresh one. In very limited cases where the user knows that the system has -not crashed and contents of upperdir are intact, The "volatile" directory +indicator that the user should discard upper and work directories and create +fresh ones. In very limited cases where the user knows that the system has +not crashed and contents of upperdir are intact, the "volatile" directory can be removed. diff --git a/Documentation/filesystems/ubifs-authentication.rst b/Documentation/filesystems/ubifs-authentication.rst index 3d85ee88719a..106bb9c056f6 100644 --- a/Documentation/filesystems/ubifs-authentication.rst +++ b/Documentation/filesystems/ubifs-authentication.rst @@ -443,6 +443,6 @@ References [DM-VERITY] https://www.kernel.org/doc/Documentation/device-mapper/verity.rst -[FSCRYPT-POLICY2] https://www.spinics.net/lists/linux-ext4/msg58710.html +[FSCRYPT-POLICY2] https://lore.kernel.org/r/20171023214058.128121-1-ebiggers3@gmail.com/ [UBIFS-WP] http://www.linux-mtd.infradead.org/doc/ubifs_whitepaper.pdf diff --git a/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst b/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst index a88946bd188b..d3e130455043 100644 --- a/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst +++ b/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst @@ -268,14 +268,14 @@ Example 1: One port tx AVB configuration scheme for target board // Run your appropriate tools with socket option "SO_PRIORITY" // to 3 for class A and/or to 2 for class B - // (I took at https://www.spinics.net/lists/netdev/msg460869.html) + // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/) ./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500& ./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500& 13) :: // run your listener on workstation (should be in same vlan) - // (I took at https://www.spinics.net/lists/netdev/msg460869.html) + // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/) ./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500 Receiving data rate: 39012 kbps Receiving data rate: 39012 kbps @@ -555,7 +555,7 @@ Example 2: Two port tx AVB configuration scheme for target board 20) :: // run your listener on workstation (should be in same vlan) - // (I took at https://www.spinics.net/lists/netdev/msg460869.html) + // (I took at https://lore.kernel.org/r/20171017010128.22141-1-vinicius.gomes@intel.com/) ./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500 Receiving data rate: 39012 kbps Receiving data rate: 39012 kbps diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index b14bd5b7cbc9..bccfa19b45df 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -43,7 +43,6 @@ util-linux 2.10o mount --version kmod 13 depmod -V e2fsprogs 1.41.4 e2fsck -V jfsutils 1.1.3 fsck.jfs -V -reiserfsprogs 3.6.3 reiserfsck -V xfsprogs 2.6.0 xfs_db -V squashfs-tools 4.0 mksquashfs -version btrfs-progs 0.18 btrfs --version @@ -262,14 +261,6 @@ The following utilities are available: - other file system utilities are also available in this package. -Reiserfsprogs -------------- - -The reiserfsprogs package should be used for reiserfs-3.6.x -(Linux kernels 2.4.x). It is a combined package and contains working -versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and -``reiserfsck``. These utils work on both i386 and alpha platforms. - Xfsprogs -------- @@ -493,11 +484,6 @@ JFSutils - -Reiserfsprogs -------------- - -- - Xfsprogs -------- diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst index 19d2ed47ff79..d1a8e5465ed9 100644 --- a/Documentation/process/coding-style.rst +++ b/Documentation/process/coding-style.rst @@ -614,7 +614,10 @@ it. When commenting the kernel API functions, please use the kernel-doc format. See the files at :ref:`Documentation/doc-guide/ ` and -``scripts/kernel-doc`` for details. +``scripts/kernel-doc`` for details. Note that the danger of over-commenting +applies to kernel-doc comments all the same. Do not add boilerplate +kernel-doc which simply reiterates what's obvious from the signature +of the function. The preferred style for long (multi-line) comments is: diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst index a727827b8dd5..ec543a12f848 100644 --- a/Documentation/scheduler/sched-deadline.rst +++ b/Documentation/scheduler/sched-deadline.rst @@ -20,7 +20,8 @@ Deadline Task Scheduling 4.3 Default behavior 4.4 Behavior of sched_yield() 5. Tasks CPU affinity - 5.1 SCHED_DEADLINE and cpusets HOWTO + 5.1 Using cgroup v1 cpuset controller + 5.2 Using cgroup v2 cpuset controller 6. Future plans A. Test suite B. Minimal main() @@ -671,15 +672,17 @@ Deadline Task Scheduling 5. Tasks CPU affinity ===================== - -deadline tasks cannot have an affinity mask smaller that the entire - root_domain they are created on. However, affinities can be specified - through the cpuset facility (Documentation/admin-guide/cgroup-v1/cpusets.rst). + Deadline tasks cannot have a cpu affinity mask smaller than the root domain they + are created on. So, using ``sched_setaffinity(2)`` won't work. Instead, the + the deadline task should be created in a restricted root domain. This can be + done using the cpuset controller of either cgroup v1 (deprecated) or cgroup v2. + See :ref:`Documentation/admin-guide/cgroup-v1/cpusets.rst ` and + :ref:`Documentation/admin-guide/cgroup-v2.rst ` for more information. -5.1 SCHED_DEADLINE and cpusets HOWTO ------------------------------------- +5.1 Using cgroup v1 cpuset controller +------------------------------------- - An example of a simple configuration (pin a -deadline task to CPU0) - follows (rt-app is used to create a -deadline task):: + An example of a simple configuration (pin a -deadline task to CPU0) follows:: mkdir /dev/cpuset mount -t cgroup -o cpuset cpuset /dev/cpuset @@ -692,8 +695,20 @@ Deadline Task Scheduling echo 1 > cpu0/cpuset.cpu_exclusive echo 1 > cpu0/cpuset.mem_exclusive echo $$ > cpu0/tasks - rt-app -t 100000:10000:d:0 -D5 # it is now actually superfluous to specify - # task affinity + chrt --sched-runtime 100000 --sched-period 200000 --deadline 0 yes > /dev/null + +5.2 Using cgroup v2 cpuset controller +------------------------------------- + + Assuming the cgroup v2 root is mounted at ``/sys/fs/cgroup``. + + cd /sys/fs/cgroup + echo '+cpuset' > cgroup.subtree_control + mkdir deadline_group + echo 0 > deadline_group/cpuset.cpus + echo 'root' > deadline_group/cpuset.cpus.partition + echo $$ > deadline_group/cgroup.procs + chrt --sched-runtime 100000 --sched-period 200000 --deadline 0 yes > /dev/null 6. Future plans =============== @@ -731,24 +746,38 @@ Appendix A. Test suite behaves under such workloads. In this way, results are easily reproducible. rt-app is available at: https://github.com/scheduler-tools/rt-app. - Thread parameters can be specified from the command line, with something like - this:: + rt-app does not accept command line arguments, and instead reads from a JSON + configuration file. Here is an example ``config.json``: - # rt-app -t 100000:10000:d -t 150000:20000:f:10 -D5 + .. code-block:: json - The above creates 2 threads. The first one, scheduled by SCHED_DEADLINE, - executes for 10ms every 100ms. The second one, scheduled at SCHED_FIFO - priority 10, executes for 20ms every 150ms. The test will run for a total - of 5 seconds. + { + "tasks": { + "dl_task": { + "policy": "SCHED_DEADLINE", + "priority": 0, + "dl-runtime": 10000, + "dl-period": 100000, + "dl-deadline": 100000 + }, + "fifo_task": { + "policy": "SCHED_FIFO", + "priority": 10, + "runtime": 20000, + "sleep": 130000 + } + }, + "global": { + "duration": 5 + } + } - More interestingly, configurations can be described with a json file that - can be passed as input to rt-app with something like this:: + On running ``rt-app config.json``, it creates 2 threads. The first one, + scheduled by SCHED_DEADLINE, executes for 10ms every 100ms. The second one, + scheduled at SCHED_FIFO priority 10, executes for 20ms every 150ms. The test + will run for a total of 5 seconds. - # rt-app my_config.json - - The parameters that can be specified with the second method are a superset - of the command line options. Please refer to rt-app documentation for more - details (`/doc/*.json`). + Please refer to the rt-app documentation for the JSON schema and more examples. The second testing application is done using chrt which has support for SCHED_DEADLINE. diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst index d82e7d2b54f0..9d6a337755f4 100644 --- a/Documentation/scheduler/sched-stats.rst +++ b/Documentation/scheduler/sched-stats.rst @@ -86,13 +86,16 @@ Domain statistics ----------------- One of these is produced per domain for each cpu described. (Note that if CONFIG_SMP is not defined, *no* domains are utilized and these lines -will not appear in the output. is an extension to the domain field -that prints the name of the corresponding sched domain. It can appear in -schedstat version 17 and above. +will not appear in the output.) domain 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 -The first field is a bit mask indicating what cpus this domain operates over. +The field prints the name of the sched domain and is only supported +with schedstat version >= 17. On previous versions, is the first +field. + +The field is a bit mask indicating what cpus this domain operates +over. The next 33 are a variety of sched_balance_rq() statistics in grouped into types of idleness (busy, idle and newly idle): @@ -103,12 +106,13 @@ of idleness (busy, idle and newly idle): load did not require balancing when busy 3) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was busy - 4) Total imbalance in load when the cpu was busy - 5) Total imbalance in utilization when the cpu was busy - 6) Total imbalance in number of tasks when the cpu was busy - 7) Total imbalance due to misfit tasks when the cpu was busy - 8) # of times in this domain pull_task() was called when busy - 9) # of times in this domain pull_task() was called even though the + 4) Total imbalance in load in this domain when the cpu was busy + 5) Total imbalance in utilization in this domain when the cpu was busy + 6) Total imbalance in number of tasks in this domain when the cpu was busy + 7) Total imbalance due to misfit tasks in this domain when the cpu was + busy + 8) # of times in this domain detach_task() was called when busy + 9) # of times in this domain detach_task() was called even though the target task was cache-hot when busy 10) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was busy @@ -121,13 +125,14 @@ of idleness (busy, idle and newly idle): the load did not require balancing when the cpu was idle 14) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was idle - 15) Total imbalance in load when the cpu was idle - 16) Total imbalance in utilization when the cpu was idle - 17) Total imbalance in number of tasks when the cpu was idle - 18) Total imbalance due to misfit tasks when the cpu was idle - 19) # of times in this domain pull_task() was called when the cpu + 15) Total imbalance in load in this domain when the cpu was idle + 16) Total imbalance in utilization in this domain when the cpu was idle + 17) Total imbalance in number of tasks in this domain when the cpu was idle + 18) Total imbalance due to misfit tasks in this domain when the cpu was + idle + 19) # of times in this domain detach_task() was called when the cpu was idle - 20) # of times in this domain pull_task() was called even though + 20) # of times in this domain detach_task() was called even though the target task was cache-hot when idle 21) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was idle @@ -140,12 +145,16 @@ of idleness (busy, idle and newly idle): load did not require balancing when the cpu was just becoming idle 25) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was just becoming idle - 26) Total imbalance in load when the cpu was just becoming idle - 27) Total imbalance in utilization when the cpu was just becoming idle - 28) Total imbalance in number of tasks when the cpu was just becoming idle - 29) Total imbalance due to misfit tasks when the cpu was just becoming idle - 30) # of times in this domain pull_task() was called when newly idle - 31) # of times in this domain pull_task() was called even though the + 26) Total imbalance in load in this domain when the cpu was just becoming + idle + 27) Total imbalance in utilization in this domain when the cpu was just + becoming idle + 28) Total imbalance in number of tasks in this domain when the cpu was just + becoming idle + 29) Total imbalance due to misfit tasks in this domain when the cpu was + just becoming idle + 30) # of times in this domain detach_task() was called when newly idle + 31) # of times in this domain detach_task() was called even though the target task was cache-hot when just becoming idle 32) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was just becoming idle diff --git a/Documentation/sphinx-static/custom.css b/Documentation/sphinx-static/custom.css index f4285417c71a..06cedbae095c 100644 --- a/Documentation/sphinx-static/custom.css +++ b/Documentation/sphinx-static/custom.css @@ -136,3 +136,18 @@ div.language-selection:hover ul { div.language-selection ul li:hover { background: #dddddd; } + +/* Make xrefs more universally visible */ +a.reference, a.reference:hover { + border-bottom: none; + text-decoration: underline; + text-underline-offset: 0.3em; +} + +/* Slightly different style for sidebar links */ +div.sphinxsidebar a { border-bottom: none; } +div.sphinxsidebar a:hover { + border-bottom: none; + text-decoration: underline; + text-underline-offset: 0.3em; +} diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py index fd633f7a0bc3..563033f764bb 100644 --- a/Documentation/sphinx/automarkup.py +++ b/Documentation/sphinx/automarkup.py @@ -22,12 +22,6 @@ from kernel_abi import get_kernel_abi # RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII) -# -# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef -# -RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)', - flags=re.ASCII) - # # Sphinx 3 uses a different C role for each one of struct, union, enum and # typedef @@ -150,20 +144,12 @@ def markup_func_ref_sphinx3(docname, app, match): return target_text def markup_c_ref(docname, app, match): - class_str = {# Sphinx 2 only - RE_function: 'c-func', - RE_generic_type: 'c-type', - # Sphinx 3+ only - RE_struct: 'c-struct', + class_str = {RE_struct: 'c-struct', RE_union: 'c-union', RE_enum: 'c-enum', RE_typedef: 'c-type', } - reftype_str = {# Sphinx 2 only - RE_function: 'function', - RE_generic_type: 'type', - # Sphinx 3+ only - RE_struct: 'struct', + reftype_str = {RE_struct: 'struct', RE_union: 'union', RE_enum: 'enum', RE_typedef: 'type', @@ -249,8 +235,13 @@ def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None): if xref: return xref - - return None + # + # We didn't find the xref; if a container node was supplied, + # mark it as a broken xref + # + if contnode: + contnode['classes'].append("broken_xref") + return contnode # # Variant of markup_abi_ref() that warns whan a reference is not found diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py index e8ea80d4324c..3dc285dc70f5 100644 --- a/Documentation/sphinx/cdomain.py +++ b/Documentation/sphinx/cdomain.py @@ -1,4 +1,5 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=W0141,C0113,C0103,C0325 """ cdomain diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py index db6f0380de94..4c4375201b9e 100644 --- a/Documentation/sphinx/kernel_abi.py +++ b/Documentation/sphinx/kernel_abi.py @@ -146,8 +146,10 @@ class KernelCmd(Directive): n += 1 if f != old_f: - # Add the file to Sphinx build dependencies - env.note_dependency(os.path.abspath(f)) + # Add the file to Sphinx build dependencies if the file exists + fname = os.path.join(srctree, f) + if os.path.isfile(fname): + env.note_dependency(fname) old_f = f diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py index 8db176045bc5..1e566e87ebcd 100755 --- a/Documentation/sphinx/kernel_include.py +++ b/Documentation/sphinx/kernel_include.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=R0903, C0330, R0914, R0912, E0401 """ diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index b818d4c77924..2586b4d4e494 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -1,4 +1,5 @@ # coding=utf-8 +# SPDX-License-Identifier: MIT # # Copyright © 2016 Intel Corporation # @@ -24,8 +25,6 @@ # Authors: # Jani Nikula # -# Please make sure this works on both python2 and python3. -# import codecs import os diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index f1a7f13c9c60..ad495c0da270 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -1,4 +1,5 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=C0103, R0903, R0912, R0915 """ scalable figure and image handling diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py index ec50e1ee5223..1afb0c97f06b 100644 --- a/Documentation/sphinx/load_config.py +++ b/Documentation/sphinx/load_config.py @@ -1,4 +1,5 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=R0903, C0330, R0914, R0912, E0401 import os diff --git a/Documentation/sphinx/min_requirements.txt b/Documentation/sphinx/min_requirements.txt new file mode 100644 index 000000000000..96b5e0bfa3d7 --- /dev/null +++ b/Documentation/sphinx/min_requirements.txt @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +alabaster >=0.7,<0.8 +docutils>=0.15,<0.18 +jinja2>=2.3,<3.1 +PyYAML>=5.1,<6.1 +Sphinx==3.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-devhelp==1.0.1 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-qthelp==1.0.2 +sphinxcontrib-serializinghtml==1.1.4 diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl index b063f2f1cfb2..7b1458544e2e 100755 --- a/Documentation/sphinx/parse-headers.pl +++ b/Documentation/sphinx/parse-headers.pl @@ -1,4 +1,7 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016 by Mauro Carvalho Chehab . + use strict; use Text::Tabs; use Getopt::Long; @@ -391,7 +394,7 @@ Report bugs to Mauro Carvalho Chehab =head1 COPYRIGHT -Copyright (c) 2016 by Mauro Carvalho Chehab . +Copyright (c) 2016 by Mauro Carvalho Chehab . License GPLv2: GNU GPL version 2 . diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt index 5017f307c8a4..76b4255061d0 100644 --- a/Documentation/sphinx/requirements.txt +++ b/Documentation/sphinx/requirements.txt @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 alabaster Sphinx pyyaml diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py index 180fbb50c337..3d19569e5728 100755 --- a/Documentation/sphinx/rstFlatTable.py +++ b/Documentation/sphinx/rstFlatTable.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=C0330, R0903, R0912 """ diff --git a/Documentation/tools/rtla/common_appendix.rst b/Documentation/tools/rtla/common_appendix.rst index b5cf2dc223df..53cae7537537 100644 --- a/Documentation/tools/rtla/common_appendix.rst +++ b/Documentation/tools/rtla/common_appendix.rst @@ -1,3 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0 + +EXIT STATUS +=========== + +:: + + 0 Passed: the test did not hit the stop tracing condition + 1 Error: invalid argument + 2 Failed: the test hit the stop tracing condition + REPORTING BUGS ============== Report bugs to diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst index 03b7f3deb069..b2d8726271b3 100644 --- a/Documentation/tools/rtla/rtla-timerlat-hist.rst +++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst @@ -107,3 +107,5 @@ SEE ALSO AUTHOR ====== Written by Daniel Bristot de Oliveira + +.. include:: common_appendix.rst diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst index d594597201fd..3efac10adb36 100644 --- a/Documentation/trace/boottime-trace.rst +++ b/Documentation/trace/boottime-trace.rst @@ -198,8 +198,8 @@ Most of the subsystems and architecture dependent drivers will be initialized after that (arch_initcall or subsys_initcall). Thus, you can trace those with boot-time tracing. If you want to trace events before core_initcall, you can use the options -starting with ``kernel``. Some of them will be enabled eariler than the initcall -processing (for example,. ``kernel.ftrace=function`` and ``kernel.trace_event`` +starting with ``kernel``. Some of them will be enabled earlier than the initcall +processing (for example, ``kernel.ftrace=function`` and ``kernel.trace_event`` will start before the initcall.) diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 0aada18c38c6..2b98c1720a54 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -249,7 +249,7 @@ Extended error information table, it should keep a running total of the number of bytes requested by that call_site. - We'll let it run for awhile and then dump the contents of the 'hist' + We'll let it run for a while and then dump the contents of the 'hist' file in the kmalloc event's subdirectory (for readability, a number of entries have been omitted):: diff --git a/Documentation/translations/zh_CN/how-to.rst b/Documentation/translations/zh_CN/how-to.rst index 569b0209385a..ddd99c0f9b4d 100644 --- a/Documentation/translations/zh_CN/how-to.rst +++ b/Documentation/translations/zh_CN/how-to.rst @@ -1,19 +1,19 @@ .. SPDX-License-Identifier: GPL-2.0 -========================= -Linux内核中文文档翻译规范 -========================= +========================== +Linux 内核中文文档翻译规范 +========================== 修订记录: - - v1.0 2025年3月28日,司延腾、慕冬亮共同编写了该规范。 + - v1.0 2025 年 3 月 28 日,司延腾、慕冬亮共同编写了该规范。 制定规范的背景 ============== 过去几年,在广大社区爱好者的友好合作下,Linux 内核中文文档迎来了蓬勃的发 展。在翻译的早期,一切都是混乱的,社区对译稿只有一个准确翻译的要求,以鼓 -励更多的开发者参与进来,这是从0到1的必然过程,所以早期的中文文档目录更加 -具有多样性,不过好在文档不多,维护上并没有过大的压力。 +励更多的开发者参与进来,这是从 0 到 1 的必然过程,所以早期的中文文档目录 +更加具有多样性,不过好在文档不多,维护上并没有过大的压力。 然而,世事变幻,不觉有年,现在内核中文文档在前进的道路上越走越远,很多潜 在的问题逐渐浮出水面,而且随着中文文档数量的增加,翻译更多的文档与提高中 @@ -34,7 +34,7 @@ reviewer 们只能耐心地指导他们如何与社区更好地合作,但是 ======== 工欲善其事必先利其器,如果您目前对内核文档翻译满怀热情,并且会独立地安装 -linux 发行版和简单地使用 linux 命令行,那么可以迅速开始了。若您尚不具备该 +Linux 发行版和简单地使用 Linux 命令行,那么可以迅速开始了。若您尚不具备该 能力,很多网站上会有详细的手把手教程,最多一个上午,您应该就能掌握对应技 能。您需要注意的一点是,请不要使用 root 用户进行后续步骤和文档翻译。 @@ -66,11 +66,18 @@ linux 发行版和简单地使用 linux 命令行,那么可以迅速开始了 cd linux ./scripts/sphinx-pre-install -以Fedora为例,它的输出是这样的:: +以 Fedora 为例,它的输出是这样的:: You should run: - sudo dnf install -y dejavu-sans-fonts dejavu-sans-mono-fonts dejavu-serif-fonts google-noto-sans-cjk-fonts graphviz-gd latexmk librsvg2-tools texlive-anyfontsize texlive-capt-of texlive-collection-fontsrecommended texlive-ctex texlive-eqparbox texlive-fncychap texlive-framed texlive-luatex85 texlive-multirow texlive-needspace texlive-tabulary texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xecjk + sudo dnf install -y dejavu-sans-fonts dejavu-sans-mono-fonts \ + dejavu-serif-fonts google-noto-sans-cjk-fonts graphviz-gd \ + latexmk librsvg2-tools texlive-anyfontsize texlive-capt-of \ + texlive-collection-fontsrecommended texlive-ctex \ + texlive-eqparbox texlive-fncychap texlive-framed \ + texlive-luatex85 texlive-multirow texlive-needspace \ + texlive-tabulary texlive-threeparttable texlive-upquote \ + texlive-wrapfig texlive-xecjk Sphinx needs to be installed either: 1) via pip/pypi with: @@ -92,7 +99,8 @@ linux 发行版和简单地使用 linux 命令行,那么可以迅速开始了 https://github.com/sphinx-doc/sphinx/pull/8313 请您按照提示复制打印的命令到命令行执行,您必须具备 root 权限才能执行 sudo -开头的命令。 +开头的命令。**请注意**,最新版本 Sphinx 的文档编译速度有极大提升,强烈建议 +您通过 pip/pypi 安装最新版本 Sphinx。 如果您处于一个多用户环境中,为了避免对其他人造成影响,建议您配置单用户 sphinx 虚拟环境,即只需要执行:: @@ -126,11 +134,11 @@ sphinx 虚拟环境,即只需要执行:: 检查编译结果 ------------ -编译输出在Documentation/output/目录下,请用浏览器打开该目录下对应 +编译输出在 Documentation/output/ 目录下,请用浏览器打开该目录下对应 的文件进行检查。 -git和邮箱配置 -------------- +Git 和邮箱配置 +-------------- 打开命令行执行:: @@ -150,11 +158,11 @@ git和邮箱配置 smtpencryption = ssl smtpserver = smtp.migadu.com smtpuser = si.yanteng@linux.dev - smtppass = # 建议使用第三方客户端专用密码 + smtppass = # 建议使用第三方客户端专用密码 chainreplyto = false smtpserverport = 465 -关于邮件客户端的配置,请查阅Documentation/translations/zh_CN/process/email-clients.rst。 +关于邮件客户端的配置,请查阅 Documentation/translations/zh_CN/process/email-clients.rst。 开始翻译文档 ============ @@ -162,8 +170,8 @@ git和邮箱配置 文档索引结构 ------------ -目前中文文档是在Documentation/translations/zh_CN/目录下进行,该 -目录结构最终会与Documentation/结构一致,所以您只需要将您感兴趣的英文 +目前中文文档是在 Documentation/translations/zh_CN/ 目录下进行,该 +目录结构最终会与 Documentation/ 结构一致,所以您只需要将您感兴趣的英文 文档文件和对应的 index.rst 复制到 zh_CN 目录下对应的位置,然后修改更 上一级的 index 即可开始您的翻译。 @@ -177,13 +185,12 @@ git和邮箱配置 请执行以下命令,新建开发分支:: git checkout docs-next - git branch my-trans - git checkout my-trans + git checkout -b my-trans 译文格式要求 ------------ - - 每行长度最多不超过40个字符 + - 每行长度最多不超过 40 个字符 - 每行长度请保持一致 - 标题的下划线长度请按照一个英文一个字符、一个中文两个字符与标题对齐 - 其它的修饰符请与英文文档保持一致 @@ -192,7 +199,7 @@ git和邮箱配置 .. SPDX-License-Identifier: GPL-2.0 .. include:: ../disclaimer-zh_CN.rst #您需要了解该文件的路径,根 - 据您实际翻译的文档灵活调整 + 据您实际翻译的文档灵活调整 :Original: Documentation/xxx/xxx.rst #替换为您翻译的英文文档路径 @@ -203,11 +210,11 @@ git和邮箱配置 翻译技巧 -------- -中文文档有每行40字符限制,因为一个中文字符等于2个英文字符。但是社区并没有 -那么严格,一个诀窍是将您的翻译的内容与英文原文的每行长度对齐即可,这样, +中文文档有每行 40 字符限制,因为一个中文字符等于 2 个英文字符。但是社区并 +没有那么严格,一个诀窍是将您的翻译的内容与英文原文的每行长度对齐即可,这样, 您也不必总是检查有没有超限。 -如果您的英文阅读能力有限,可以考虑使用辅助翻译工具,例如 deepseek 。但是您 +如果您的英文阅读能力有限,可以考虑使用辅助翻译工具,例如 deepseek。但是您 必须仔细地打磨,使译文达到“信达雅”的标准。 **请注意** 社区不接受纯机器翻译的文档,社区工作建立在信任的基础上,请认真对待。 @@ -248,14 +255,17 @@ git和邮箱配置 Translate .../security/self-protection.rst into Chinese. - Update the translation through commit b080e52110ea #请执行git log <您翻译的英文文档路径> 复制最顶部第一个补丁的sha值的前12位,替换掉12位sha值。 + Update the translation through commit b080e52110ea ("docs: update self-protection __ro_after_init status") + # 请执行 git log --oneline <您翻译的英文文档路径>,并替换上述内容 - Signed-off-by: Yanteng Si #如果您前面的步骤正确执行,该行会自动显示,否则请检查gitconfig文件。 + Signed-off-by: Yanteng Si + # 如果您前面的步骤正确执行,该行会自动显示,否则请检查 gitconfig 文件 保存并退出。 -**请注意** 以上四行,缺少任何一行,您都将会在第一轮审阅后返工,如果您需要一个更加明确的示例,请对 zh_CN 目录执行 git log。 +**请注意** 以上四行,缺少任何一行,您都将会在第一轮审阅后返工,如果您需要一个 +更加明确的示例,请对 zh_CN 目录执行 git log。 导出补丁和制作封面 ------------------ @@ -263,6 +273,7 @@ git和邮箱配置 这个时候,可以导出补丁,做发送邮件列表最后的准备了。命令行执行:: git format-patch -N + # N 要替换为补丁数量,一般 N 大于等于 1 然后命令行会输出类似下面的内容:: @@ -286,13 +297,13 @@ warning 不需要解决:: 然后执行以下命令为补丁追加更改:: git checkout docs-next - git branch test-trans + git checkout -b test-trans-new git am 0001-xxxxx.patch ./scripts/checkpatch.pl 0001-xxxxx.patch - 直接修改您的翻译 + # 直接修改您的翻译 git add . git am --amend - 保存退出 + # 保存退出 git am 0002-xxxxx.patch …… @@ -301,28 +312,30 @@ warning 不需要解决:: 最后,如果检测时没有 warning 和 error 需要被处理或者您只有一个补丁,请跳 过下面这个步骤,否则请重新导出补丁制作封面:: - git format-patch -N --cover-letter --thread=shallow #N为您的补丁数量,N一般要大于1。 + git format-patch -N --cover-letter --thread=shallow + # N 要替换为补丁数量,一般 N 大于 1 然后命令行会输出类似下面的内容:: 0000-cover-letter.patch 0001-docs-zh_CN-add-xxxxxxxx.patch 0002-docs-zh_CN-add-xxxxxxxx.patch + …… -您需要用编辑器打开0号补丁,修改两处内容:: +您需要用编辑器打开 0 号补丁,修改两处内容:: vim 0000-cover-letter.patch ... - Subject: [PATCH 0/1] *** SUBJECT HERE *** #修改该字段,概括您的补丁集都做了哪些事情 + Subject: [PATCH 0/N] *** SUBJECT HERE *** #修改该字段,概括您的补丁集都做了哪些事情 - *** BLURB HERE *** #修改该字段,详细描述您的补丁集做了哪些事情 + *** BLURB HERE *** #修改该字段,详细描述您的补丁集做了哪些事情 Yanteng Si (1): docs/zh_CN: add xxxxx ... -如果您只有一个补丁,则可以不制作封面,即0号补丁,只需要执行:: +如果您只有一个补丁,则可以不制作封面,即 0 号补丁,只需要执行:: git format-patch -1 @@ -345,9 +358,10 @@ warning 不需要解决:: 打开上面您保存的邮件地址,执行:: - git send-email *.patch --to --cc #一个to对应一个地址,一个cc对应一个地址,有几个就写几个。 + git send-email *.patch --to --cc + # 一个 to 对应一个地址,一个 cc 对应一个地址,有几个就写几个 -执行该命令时,请确保网络通常,邮件发送成功一般会返回250。 +执行该命令时,请确保网络通常,邮件发送成功一般会返回 250。 您可以先发送给自己,尝试发出的 patch 是否可以用 'git am' 工具正常打上。 如果检查正常, 您就可以放心的发送到社区评审了。 @@ -382,15 +396,15 @@ reviewer 的评论,做到每条都有回复,每个回复都落实到位。 每次迭代一个补丁,不要一次多个:: git am <您要修改的补丁> - 直接对文件进行您的修改 + # 直接对文件进行您的修改 git add . git commit --amend 当您将所有的评论落实到位后,导出第二版补丁,并修改封面:: - git format-patch -N -v 2 --cover-letter --thread=shallow + git format-patch -N -v 2 --cover-letter --thread=shallow -打开0号补丁,在 BLURB HERE 处编写相较于上个版本,您做了哪些改动。 +打开 0 号补丁,在 BLURB HERE 处编写相较于上个版本,您做了哪些改动。 然后执行:: @@ -414,7 +428,7 @@ reviewer 的评论,做到每条都有回复,每个回复都落实到位。 如果您发送到邮件列表之后。发现发错了补丁集,尤其是在多个版本迭代的过程中; 自己发现了一些不妥的翻译;发送错了邮件列表…… -git email默认会抄送给您一份,所以您可以切换为审阅者的角色审查自己的补丁, +git email 默认会抄送给您一份,所以您可以切换为审阅者的角色审查自己的补丁, 并留下评论,描述有何不妥,将在下个版本怎么改,并付诸行动,重新提交,但是 注意频率,每天提交的次数不要超过两次。 @@ -425,9 +439,9 @@ git email默认会抄送给您一份,所以您可以切换为审阅者的角 ./script/checktransupdate.py -l zh_CN`` -该命令会列出需要翻译或更新的英文文档。 +该命令会列出需要翻译或更新的英文文档,结果同时保存在 checktransupdate.log 中。 -关于详细操作说明,请参考: Documentation/translations/zh_CN/doc-guide/checktransupdate.rst\ +关于详细操作说明,请参考:Documentation/translations/zh_CN/doc-guide/checktransupdate.rst。 进阶 ---- @@ -439,8 +453,8 @@ git email默认会抄送给您一份,所以您可以切换为审阅者的角 常见的问题 ========== -Maintainer回复补丁不能正常apply -------------------------------- +Maintainer 回复补丁不能正常 apply +--------------------------------- 这通常是因为您的补丁与邮件列表其他人的补丁产生了冲突,别人的补丁先被 apply 了, 您的补丁集就无法成功 apply 了,这需要您更新本地分支,在本地解决完冲突后再次提交。 @@ -455,5 +469,5 @@ Maintainer回复补丁不能正常apply 大部分情况下,是由于您发送了非纯文本格式的信件,请尽量避免使用 webmail,推荐 使用邮件客户端,比如 thunderbird,记得在设置中的回信配置那改为纯文本发送。 -如果超过了24小时,您依旧没有在发现您的邮 -件,请联系您的网络管理员帮忙解决。 +如果超过了 24 小时,您依旧没有在发现您的 +邮件,请联系您的网络管理员帮忙解决。 diff --git a/Documentation/translations/zh_CN/networking/alias.rst b/Documentation/translations/zh_CN/networking/alias.rst new file mode 100644 index 000000000000..e024d9eac50e --- /dev/null +++ b/Documentation/translations/zh_CN/networking/alias.rst @@ -0,0 +1,56 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/alias.rst + +:翻译: + + 邱禹潭 Qiu Yutan + +:校译: + +====== +IP别名 +====== + +IP别名是管理每个接口存在多个IP地址/子网掩码的一种过时方法。 +虽然更新的工具如iproute2支持每个接口多个地址/前缀, +但为了向后兼容性,别名仍被支持。 + +别名通过在使用 ifconfig 时在接口名后添加冒号和一个字符串来创建。 +这个字符串通常是数字,但并非必须。 + + +别名创建 +======== + +别名的创建是通过“特殊的”接口命名机制完成的:例如, +要为eth0创建一个 200.1.1.1 的别名... +:: + + # ifconfig eth0:0 200.1.1.1 等等 + ~~ -> 请求为eth0创建别名#0(如果尚不存在) + +该命令也会设置相应的路由表项。请注意:路由表项始终指向基础接口。 + + +别名删除 +======== + +通过关闭别名即可将其删除:: + + # ifconfig eth0:0 down + ~~~~~~~~~~ -> 将删除别名 + + +别名(重新)配置 +================ + +别名不是真实的设备,但程序应该能够正常配置和引用它们(ifconfig、route等)。 + + +与主设备的关系 +============== + +如果基础设备被关闭,则其上添加的所有别名也将被删除。 diff --git a/Documentation/translations/zh_CN/networking/index.rst b/Documentation/translations/zh_CN/networking/index.rst index d07dd69f980b..bb0edcffd144 100644 --- a/Documentation/translations/zh_CN/networking/index.rst +++ b/Documentation/translations/zh_CN/networking/index.rst @@ -21,6 +21,12 @@ :maxdepth: 1 msg_zerocopy + napi + vxlan + netif-msg + xfrm_proc + netmem + alias Todolist: @@ -45,7 +51,6 @@ Todolist: * page_pool * phy * sfp-phylink -* alias * bridge * snmp_counter * checksum-offloads @@ -94,14 +99,11 @@ Todolist: * mptcp-sysctl * multiqueue * multi-pf-netdev -* napi * net_cachelines/index * netconsole * netdev-features * netdevices * netfilter-sysctl -* netif-msg -* netmem * nexthop-group-resilient * nf_conntrack-sysctl * nf_flowtable @@ -142,11 +144,9 @@ Todolist: * tuntap * udplite * vrf -* vxlan * x25 * x25-iface * xfrm_device -* xfrm_proc * xfrm_sync * xfrm_sysctl * xdp-rx-metadata diff --git a/Documentation/translations/zh_CN/networking/napi.rst b/Documentation/translations/zh_CN/networking/napi.rst new file mode 100644 index 000000000000..619971c3dea3 --- /dev/null +++ b/Documentation/translations/zh_CN/networking/napi.rst @@ -0,0 +1,362 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/napi.rst + +:翻译: + + 王亚鑫 Yaxin Wang + +==== +NAPI +==== + +NAPI 是 Linux 网络堆栈中使用的事件处理机制。NAPI 的名称现在不再代表任何特定含义 [#]_。 + +在基本操作中,设备通过中断通知主机有新事件发生。主机随后调度 NAPI 实例来处理这些事件。 +该设备也可以通过 NAPI 进行事件轮询,而无需先接收中断信号(:ref:`忙轮询`)。 + +NAPI 处理通常发生在软中断上下文中,但有一个选项,可以使用 :ref:`单独的内核线程` +来进行 NAPI 处理。 + +总的来说,NAPI 为驱动程序抽象了事件(数据包接收和发送)处理的上下文环境和配置情况。 + +驱动程序API +=========== + +NAPI 最重要的两个元素是 struct napi_struct 和关联的 poll 方法。struct napi_struct +持有 NAPI 实例的状态,而方法则是与驱动程序相关的事件处理器。该方法通常会释放已传输的发送 +(Tx)数据包并处理新接收的数据包。 + +.. _drv_ctrl_zh_CN: + +控制API +------- + +netif_napi_add() 和 netif_napi_del() 用于向系统中添加/删除一个 NAPI 实例。实例会被 +附加到作为参数传递的 netdevice上(并在 netdevice 注销时自动删除)。实例在添加时处于禁 +用状态。 + +napi_enable() 和 napi_disable() 管理禁用状态。禁用的 NAPI 不会被调度,并且保证其 +poll 方法不会被调用。napi_disable() 会等待 NAPI 实例的所有权被释放。 + +这些控制 API 并非幂等的。控制 API 调用在面对数据路径 API 的并发使用时是安全的,但控制 +API 调用顺序错误可能会导致系统崩溃、死锁或竞态条件。例如,连续多次调用 napi_disable() +会造成死锁。 + +数据路径API +----------- + +napi_schedule() 是调度 NAPI 轮询的基本方法。驱动程序应在其中断处理程序中调用此函数 +(更多信息请参见 :ref:`drv_sched_zh_CN`)。成功的 napi_schedule() 调用将获得 NAPI 实例 +的所有权。 + +之后,在 NAPI 被调度后,驱动程序的 poll 方法将被调用以处理事件/数据包。该方法接受一个 +``budget`` 参数 - 驱动程序可以处理任意数量的发送 (Tx) 数据包完成,但处理最多处理 +``budget`` 个接收 (Rx) 数据包。处理接收数据包通常开销更大。 + +换句话说,对于接收数据包的处理,``budget`` 参数限制了驱动程序在单次轮询中能够处理的数 +据包数量。当 ``budget`` 为 0 时,像页面池或 XDP 这类专门用于接收的 API 根本无法使用。 +无论 ``budget`` 的值是多少,skb 的发送处理都应该进行,但是如果 ``budget`` 参数为 0, +驱动程序就不能调用任何 XDP(或页面池)API。 + +.. warning:: + + 如果内核仅尝试处理skb的发送完成情况,而不处理接收 (Rx) 或 XDP 数据包,那么 ``budget`` + 参数可能为 0。 + +轮询方法会返回已完成的工作量。如果驱动程序仍有未完成的工作(例如,``budget`` 已用完), +轮询方法应精确返回 ``budget`` 的值。在这种情况下,NAPI 实例将再次被处理 / 轮询(无需 +重新调度)。 + +如果事件处理已完成(所有未处理的数据包都已处理完毕),轮询方法在返回之前应调用 napi_complete_done()。 +napi_complete_done() 会释放实例的所有权。 + +.. warning:: + + 当出现既完成了所有事件处理,又恰好达到了 ``budget`` 数量的情况时,必须谨慎处理。因为没 + 有办法将这种(很少出现的)情况报告给协议栈,所以驱动程序要么不调用 napi_complete_done() + 并等待再次被调用,要么返回 ``budget - 1``。 + + 当 ``budget`` 为 0 时,napi_complete_done() 绝对不能被调用。 + +调用序列 +-------- + +驱动程序不应假定调用的顺序是固定不变的。即使驱动程序没有调度该实例,轮询方法也可能会被调用 +(除非该实例处于禁用状态)。同样,即便 napi_schedule() 调用成功,也不能保证轮询方法一定 +会被调用(例如,如果该实例被禁用)。 + +正如在 :ref:`drv_ctrl_zh_CN` 部分所提到的,napi_disable() 以及后续对轮询方法的调用, +仅会等待该实例的所有权被释放,而不会等待轮询方法退出。这意味着,驱动程序在调用 napi_complete_done() +之后,应避免访问任何数据结构。 + +.. _drv_sched_zh_CN: + +调度与IRQ屏蔽 +------------- + +驱动程序应在调度 NAPI 实例后保持中断屏蔽 - 直到 NAPI 轮询完成,任何进一步的中断都是不必要的。 + +显式屏蔽中断的驱动程序(而非设备自动屏蔽 IRQ)应使用 napi_schedule_prep() 和 +__napi_schedule() 调用: + +.. code-block:: c + + if (napi_schedule_prep(&v->napi)) { + mydrv_mask_rxtx_irq(v->idx); + /* 在屏蔽后调度以避免竞争 */ + __napi_schedule(&v->napi); + } + +IRQ 仅应在成功调用 napi_complete_done() 后取消屏蔽: + +.. code-block:: c + + if (budget && napi_complete_done(&v->napi, work_done)) { + mydrv_unmask_rxtx_irq(v->idx); + return min(work_done, budget - 1); + } + +napi_schedule_irqoff() 是 napi_schedule() 的一个变体,它利用了在中断请求(IRQ)上下文 +环境中调用所带来的特性(无需屏蔽中断)。如果中断请求(IRQ)是通过线程处理的(例如启用了 +``PREEMPT_RT`` 时的情况),napi_schedule_irqoff() 会回退为使用 napi_schedule() 。 + +实例到队列的映射 +---------------- + +现代设备每个接口有多个 NAPI 实例(struct napi_struct)。关于实例如何映射到队列和中断没有 +严格要求。NAPI 主要是事件处理/轮询抽象,没有用户可见的语义。也就是说,大多数网络设备最终以 +非常相似的方式使用 NAPI。 + +NAPI 实例最常以 1:1:1 映射到中断和队列对(队列对是由一个接收队列和一个发送队列组成的一组 +队列)。 + +在不太常见的情况下,一个 NAPI 实例可能会用于处理多个队列,或者在单个内核上,接收(Rx)队列 +和发送(Tx)队列可以由不同的 NAPI 实例来处理。不过,无论队列如何分配,通常 NAPI 实例和中断 +之间仍然保持一一对应的关系。 + +值得注意的是,ethtool API 使用了 “通道” 这一术语,每个通道可以是 ``rx`` (接收)、``tx`` +(发送)或 ``combined`` (组合)类型。目前尚不清楚一个通道具体由什么构成,建议的理解方式是 +将一个通道视为一个为特定类型队列提供服务的 IRQ(中断请求)/ NAPI 实例。例如,配置为 1 个 +``rx`` 通道、1 个 ``tx`` 通道和 1 个 ``combined`` 通道的情况下,预计会使用 3 个中断、 +2 个接收队列和 2 个发送队列。 + +持久化NAPI配置 +-------------- + +驱动程序常常会动态地分配和释放 NAPI 实例。这就导致每当 NAPI 实例被重新分配时,与 NAPI 相关 +的用户配置就会丢失。netif_napi_add_config() API接口通过将每个 NAPI 实例与基于驱动程序定义 +的索引值(如队列编号)的持久化 NAPI 配置相关联,从而避免了这种配置丢失的情况。 + +使用此 API 可实现持久化的 NAPI 标识符(以及其他设置),这对于使用 ``SO_INCOMING_NAPI_ID`` +的用户空间程序来说是有益的。有关其他 NAPI 配置的设置,请参阅以下章节。 + +驱动程序应尽可能尝试使用 netif_napi_add_config()。 + +用户API +======= + +用户与 NAPI 的交互依赖于 NAPI 实例 ID。这些实例 ID 仅通过 ``SO_INCOMING_NAPI_ID`` 套接字 +选项对用户可见。 + +用户可以使用 Netlink 来查询某个设备或设备队列的 NAPI 标识符。这既可以在用户应用程序中通过编程 +方式实现,也可以使用内核源代码树中包含的一个脚本:tools/net/ynl/pyynl/cli.py 来完成。 + +例如,使用该脚本转储某个设备的所有队列(这将显示每个队列的 NAPI 标识符): + + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --dump queue-get \ + --json='{"ifindex": 2}' + +有关可用操作和属性的更多详细信息,请参阅 ``Documentation/netlink/specs/netdev.yaml``。 + +软件IRQ合并 +----------- + +默认情况下,NAPI 不执行任何显式的事件合并。在大多数场景中,数据包的批量处理得益于设备进行 +的中断请求(IRQ)合并。不过,在某些情况下,软件层面的合并操作也很有帮助。 + +可以将 NAPI 配置为设置一个重新轮询定时器,而不是在处理完所有数据包后立即取消屏蔽硬件中断。 +网络设备的 ``gro_flush_timeout`` sysfs 配置项可用于控制该定时器的延迟时间,而 ``napi_defer_hard_irqs`` +则用于控制在 NAPI 放弃并重新启用硬件中断之前,连续进行空轮询的次数。 + +上述参数也可以通过 Netlink 的 netdev-genl 接口,基于每个 NAPI 实例进行设置。当通过 +Netlink 进行配置且是基于每个 NAPI 实例设置时,上述参数使用连字符(-)而非下划线(_) +来命名,即 ``gro-flush-timeout`` 和 ``napi-defer-hard-irqs``。 + +基于每个 NAPI 实例的配置既可以在用户应用程序中通过编程方式完成,也可以使用内核源代码树中的 +一个脚本实现,该脚本为 ``tools/net/ynl/pyynl/cli.py``。 + +例如,通过如下方式使用该脚本: + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --do napi-set \ + --json='{"id": 345, + "defer-hard-irqs": 111, + "gro-flush-timeout": 11111}' + +类似地,参数 ``irq-suspend-timeout`` 也可以通过 netlink 的 netdev-genl 设置。没有全局 +的 sysfs 参数可用于设置这个值。 + +``irq-suspend-timeout`` 用于确定应用程序可以完全挂起 IRQ 的时长。与 SO_PREFER_BUSY_POLL +结合使用,后者可以通过 ``EPIOCSPARAMS`` ioctl 在每个 epoll 上下文中设置。 + +.. _poll_zh_CN: + +忙轮询 +------ + +忙轮询允许用户进程在设备中断触发前检查传入的数据包。与其他忙轮询一样,它以 CPU 周期换取更低 +的延迟(生产环境中 NAPI 忙轮询的使用尚不明确)。 + +通过在选定套接字上设置 ``SO_BUSY_POLL`` 或使用全局 ``net.core.busy_poll`` 和 ``net.core.busy_read`` +等 sysctls 启用忙轮询。还存在基于 io_uring 的 NAPI 忙轮询 API 可使用。 + +基于epoll的忙轮询 +----------------- + +可以从 ``epoll_wait`` 调用直接触发数据包处理。为了使用此功能,用户应用程序必须确保添加到 +epoll 上下文的所有文件描述符具有相同的 NAPI ID。 + +如果应用程序使用专用的 acceptor 线程,那么该应用程序可以获取传入连接的 NAPI ID(使用 +SO_INCOMING_NAPI_ID)然后将该文件描述符分发给工作线程。工作线程将该文件描述符添加到其 +epoll 上下文。这确保了每个工作线程的 epoll 上下文中所包含的文件描述符具有相同的 NAPI ID。 + +或者,如果应用程序使用 SO_REUSEPORT,可以插入 bpf 或 ebpf 程序来分发传入连接,使得每个 +线程只接收具有相同 NAPI ID 的连接。但是必须谨慎处理系统中可能存在多个网卡的情况。 + +为了启用忙轮询,有两种选择: + +1. ``/proc/sys/net/core/busy_poll`` 可以设置为微秒数以在忙循环中等待事件。这是一个系统 + 范围的设置,将导致所有基于 epoll 的应用程序在调用 epoll_wait 时忙轮询。这可能不是理想 + 的情况,因为许多应用程序可能不需要忙轮询。 + +2. 使用最新内核的应用程序可以在 epoll 上下文的文件描述符上发出 ioctl 来设置(``EPIOCSPARAMS``) + 或获取(``EPIOCGPARAMS``) ``struct epoll_params``,用户程序定义如下: + +.. code-block:: c + + struct epoll_params { + uint32_t busy_poll_usecs; + uint16_t busy_poll_budget; + uint8_t prefer_busy_poll; + + /* 将结构填充到 64 位的倍数 */ + uint8_t __pad; + }; + +IRQ缓解 +------- + +虽然忙轮询旨在用于低延迟应用,但类似的机制可用于减少中断请求。 + +每秒高请求的应用程序(尤其是路由/转发应用程序和特别使用 AF_XDP 套接字的应用程序) +可能希望在处理完一个请求或一批数据包之前不被中断。 + +此类应用程序可以向内核承诺会定期执行忙轮询操作,而驱动程序应将设备的中断请求永久屏蔽。 +通过使用 ``SO_PREFER_BUSY_POLL`` 套接字选项可启用此模式。为避免系统出现异常,如果 +在 ``gro_flush_timeout`` 时间内没有进行任何忙轮询调用,该承诺将被撤销。对于基于 +epoll 的忙轮询应用程序,可以将 ``struct epoll_params`` 结构体中的 ``prefer_busy_poll`` +字段设置为 1,并使用 ``EPIOCSPARAMS`` 输入 / 输出控制(ioctl)操作来启用此模式。 +更多详情请参阅上述章节。 + +NAPI 忙轮询的 budget 低于默认值(这符合正常忙轮询的低延迟意图)。减少中断请求的场景中 +并非如此,因此 budget 可以通过 ``SO_BUSY_POLL_BUDGET`` 套接字选项进行调整。对于基于 +epoll 的忙轮询应用程序,可以通过调整 ``struct epoll_params`` 中的 ``busy_poll_budget`` +字段为特定值,并使用 ``EPIOCSPARAMS`` ioctl 在特定 epoll 上下文中设置。更多详细信 +息请参见上述部分。 + +需要注意的是,为 ``gro_flush_timeout`` 选择较大的值会延迟中断请求,以实现更好的批 +量处理,但在系统未满载时会增加延迟。为 ``gro_flush_timeout`` 选择较小的值可能会因 +设备中断请求和软中断处理而干扰尝试进行忙轮询的用户应用程序。应权衡这些因素后谨慎选择 +该值。基于 epoll 的忙轮询应用程序可以通过为 ``maxevents`` 选择合适的值来减少用户 +处理的干扰。 + +用户可能需要考虑使用另一种方法,IRQ 挂起,以帮助应对这些权衡问题。 + +IRQ挂起 +------- + +IRQ 挂起是一种机制,其中设备 IRQ 在 epoll 触发 NAPI 数据包处理期间被屏蔽。 + +只要应用程序对 epoll_wait 的调用成功获取事件,内核就会推迟 IRQ 挂起定时器。如果 +在忙轮询期间没有获取任何事件(例如,因为网络流量减少),则会禁用IRQ挂起功能,并启 +用上述减少中断请求的策略。 + +这允许用户在 CPU 消耗和网络处理效率之间取得平衡。 + +要使用此机制: + + 1. 每个 NAPI 的配置参数 ``irq-suspend-timeout`` 应设置为应用程序可以挂起 + IRQ 的最大时间(纳秒)。这通过 netlink 完成,如上所述。此超时时间作为一 + 种安全机制,如果应用程序停滞,将重新启动中断驱动程序的中断处理。此值应选择 + 为覆盖用户应用程序调用 epoll_wait 处理数据所需的时间,需注意的是,应用程 + 序可通过在调用 epoll_wait 时设置 ``max_events`` 来控制获取的数据量。 + + 2. sysfs 参数或每个 NAPI 的配置参数 ``gro_flush_timeout`` 和 ``napi_defer_hard_irqs`` + 可以设置为较低值。它们将用于在忙轮询未找到数据时延迟 IRQs。 + + 3. 必须将 ``prefer_busy_poll`` 标志设置为 true。如前文所述,可使用 ``EPIOCSPARAMS`` + ioctl操作来完成此设置。 + + 4. 应用程序按照上述方式使用 epoll 触发 NAPI 数据包处理。 + +如上所述,只要后续对 epoll_wait 的调用向用户空间返回事件,``irq-suspend-timeout`` +就会被推迟并且 IRQ 会被禁用。这允许应用程序在无干扰的情况下处理数据。 + +一旦 epoll_wait 的调用没有找到任何事件,IRQ 挂起会被自动禁用,并且 ``gro_flush_timeout`` +和 ``napi_defer_hard_irqs`` 缓解机制将开始起作用。 + +预期是 ``irq-suspend-timeout`` 的设置值会远大于 ``gro_flush_timeout``,因为 ``irq-suspend-timeout`` +应在一个用户空间处理周期内暂停中断请求。 + +虽然严格来说不必通过 ``napi_defer_hard_irqs`` 和 ``gro_flush_timeout`` 来执行 IRQ 挂起, +但强烈建议这样做。 + +中断请求挂起会使系统在轮询模式和由中断驱动的数据包传输模式之间切换。在网络繁忙期间,``irq-suspend-timeout`` +会覆盖 ``gro_flush_timeout``,使系统保持忙轮询状态,但是当 epoll 未发现任何事件时,``gro_flush_timeout`` +和 ``napi_defer_hard_irqs`` 的设置将决定下一步的操作。 + +有三种可能的网络处理和数据包交付循环: + +1) 硬中断 -> 软中断 -> NAPI 轮询;基本中断交付 +2) 定时器 -> 软中断 -> NAPI 轮询;延迟的 IRQ 处理 +3) epoll -> 忙轮询 -> NAPI 轮询;忙循环 + +循环 2 可以接管循环 1,如果设置了 ``gro_flush_timeout`` 和 ``napi_defer_hard_irqs``。 + +如果设置了 ``gro_flush_timeout`` 和 ``napi_defer_hard_irqs``,循环 2 和 3 将互相“争夺”控制权。 + +在繁忙时期,``irq-suspend-timeout`` 用作循环 2 的定时器,这基本上使网络处理倾向于循环 3。 + +如果不设置 ``gro_flush_timeout`` 和 ``napi_defer_hard_irqs``,循环 3 无法从循环 1 接管。 + +因此,建议设置 ``gro_flush_timeout`` 和 ``napi_defer_hard_irqs``,因为若不这样做,设置 +``irq-suspend-timeout`` 可能不会有明显效果。 + +.. _threaded_zh_CN: + +线程化NAPI +---------- + +线程化 NAPI 是一种操作模式,它使用专用的内核线程而非软件中断上下文来进行 NAPI 处理。这种配置 +是针对每个网络设备的,并且会影响该设备的所有 NAPI 实例。每个 NAPI 实例将生成一个单独的线程 +(称为 ``napi/${ifc-name}-${napi-id}`` )。 + +建议将每个内核线程固定到单个 CPU 上,这个 CPU 与处理中断的 CPU 相同。请注意,中断请求(IRQ) +和 NAPI 实例之间的映射关系可能并不简单(并且取决于驱动程序)。NAPI 实例 ID 的分配顺序将与内 +核线程的进程 ID 顺序相反。 + +线程化 NAPI 是通过向网络设备的 sysfs 目录中的 ``threaded`` 文件写入 0 或 1 来控制的。 + +.. rubric:: 脚注 + +.. [#] NAPI 最初在 2.4 Linux 中被称为 New API。 diff --git a/Documentation/translations/zh_CN/networking/netif-msg.rst b/Documentation/translations/zh_CN/networking/netif-msg.rst new file mode 100644 index 000000000000..877399b169fe --- /dev/null +++ b/Documentation/translations/zh_CN/networking/netif-msg.rst @@ -0,0 +1,92 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/netif-msg.rst + +:翻译: + + 王亚鑫 Wang Yaxin + +================ +网络接口消息级别 +================ + +网络接口消息级别设置的设计方案。 + +历史背景 +-------- + +调试消息接口的设计遵循并受制于向后兼容性及历史实践。理解其发展历史有助于把握 +当前实践,并将其与旧版驱动代码相关联。 + +自Linux诞生之初,每个网络设备驱动均包含一个本地整型变量以控制调试消息级别。 +消息级别范围为0至7,数值越大表示输出越详细。 + +消息级别的定义在3级之后未明确细化,但实际实现通常与指定级别相差±1。驱动程序 +成熟后,冗余的详细级别消息常被移除。 + + - 0 最简消息,仅显示致命错误的关键信息。 + - 1 标准消息,初始化状态。无运行时消息。 + - 2 特殊介质选择消息,通常由定时器驱动。 + - 3 接口开启和停止消息,包括正常状态信息。 + - 4 Tx/Rx帧错误消息及异常驱动操作。 + - 5 Tx数据包队列信息、中断事件。 + - 6 每个完成的Tx数据包和接收的Rx数据包状态。 + - 7 Tx/Rx数据包初始内容。 + +最初,该消息级别变量在各驱动中具有唯一名称(如"lance_debug"),便于通过 +内核符号调试器定位和修改其设置。模块化内核出现后,变量统一重命名为"debug", +并作为模块参数设置。 + +这种方法效果良好。然而,人们始终对附加功能存在需求。多年来,以下功能逐渐 +成为合理且易于实现的增强方案: + + - 通过ioctl()调用修改消息级别。 + - 按接口而非驱动设置消息级别。 + - 对发出的消息类型进行更具选择性的控制。 + +netif_msg 建议添加了这些功能,仅带来了轻微的复杂性增加和代码规模增长。 + +推荐方案如下: + + - 保留驱动级整型变量"debug"作为模块参数,默认值为'1'。 + + - 添加一个名为 "msg_enable" 的接口私有变量。该变量是位图而非级别, + 并按如下方式初始化:: + + 1 << debug + + 或更精确地说:: + + debug < 0 ? 0 : 1 << min(sizeof(int)-1, debug) + + 消息应从以下形式更改:: + + if (debug > 1) + printk(MSG_DEBUG "%s: ... + + 改为:: + + if (np->msg_enable & NETIF_MSG_LINK) + printk(MSG_DEBUG "%s: ... + +消息级别命名对应关系 + + + ========= =================== ============ + 旧级别 名称 位位置 + ========= =================== ============ + 1 NETIF_MSG_PROBE 0x0002 + 2 NETIF_MSG_LINK 0x0004 + 2 NETIF_MSG_TIMER 0x0004 + 3 NETIF_MSG_IFDOWN 0x0008 + 3 NETIF_MSG_IFUP 0x0008 + 4 NETIF_MSG_RX_ERR 0x0010 + 4 NETIF_MSG_TX_ERR 0x0010 + 5 NETIF_MSG_TX_QUEUED 0x0020 + 5 NETIF_MSG_INTR 0x0020 + 6 NETIF_MSG_TX_DONE 0x0040 + 6 NETIF_MSG_RX_STATUS 0x0040 + 7 NETIF_MSG_PKTDATA 0x0080 + ========= =================== ============ diff --git a/Documentation/translations/zh_CN/networking/netmem.rst b/Documentation/translations/zh_CN/networking/netmem.rst new file mode 100644 index 000000000000..fe351a240f02 --- /dev/null +++ b/Documentation/translations/zh_CN/networking/netmem.rst @@ -0,0 +1,92 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/netmem.rst + +:翻译: + + 王亚鑫 Wang Yaxin + +================== +网络驱动支持Netmem +================== + +本文档概述了网络驱动支持netmem(一种抽象内存类型)的要求,该内存类型 +支持设备内存 TCP 等功能。通过支持netmem,驱动可以灵活适配不同底层内 +存类型(如设备内存TCP),且无需或仅需少量修改。 + +Netmem的优势: + +* 灵活性:netmem 可由不同内存类型(如 struct page、DMA-buf)支持, + 使驱动程序能够支持设备内存 TCP 等各种用例。 +* 前瞻性:支持netmem的驱动可无缝适配未来依赖此功能的新特性。 +* 简化开发:驱动通过统一API与netmem交互,无需关注底层内存的实现差异。 + +驱动RX要求 +========== + +1. 驱动必须支持page_pool。 + +2. 驱动必须支持tcp-data-split ethtool选项。 + +3. 驱动必须使用page_pool netmem API处理有效载荷内存。当前netmem API + 与page API一一对应。转换时需要将page API替换为netmem API,并用驱动 + 中的netmem_refs跟踪内存而非 `struct page *`: + + - page_pool_alloc -> page_pool_alloc_netmem + - page_pool_get_dma_addr -> page_pool_get_dma_addr_netmem + - page_pool_put_page -> page_pool_put_netmem + + 目前并非所有页 pageAPI 都有对应的 netmem 等效接口。如果你的驱动程序 + 依赖某个尚未实现的 netmem API,请直接实现并提交至 netdev@邮件列表, + 或联系维护者及 almasrymina@google.com 协助添加该 netmem API。 + +4. 驱动必须设置以下PP_FLAGS: + + - PP_FLAG_DMA_MAP:驱动程序无法对 netmem 执行 DMA 映射。此时驱动 + 程序必须将 DMA 映射操作委托给 page_pool,由其判断何时适合(或不适合) + 进行 DMA 映射。 + - PP_FLAG_DMA_SYNC_DEV:驱动程序无法保证 netmem 的 DMA 地址一定能 + 完成 DMA 同步。此时驱动程序必须将 DMA 同步操作委托给 page_pool,由 + 其判断何时适合(或不适合)进行 DMA 同步。 + - PP_FLAG_ALLOW_UNREADABLE_NETMEM:仅当启用 tcp-data-split 时, + 驱动程序必须显式设置此标志。 + +5. 驱动不得假设netmem可读或基于页。当netmem_address()返回NULL时,表示 +内存不可读。驱动需正确处理不可读的netmem,例如,当netmem_address()返回 +NULL时,避免访问内容。 + + 理想情况下,驱动程序不应通过netmem_is_net_iov()等辅助函数检查底层 + netmem 类型,也不应通过netmem_to_page()或netmem_to_net_iov()将 + netmem 转换为其底层类型。在大多数情况下,系统会提供抽象这些复杂性的 + netmem 或 page_pool 辅助函数(并可根据需要添加更多)。 + +6. 驱动程序必须使用page_pool_dma_sync_netmem_for_cpu()代替dma_sync_single_range_for_cpu()。 +对于某些内存提供者,CPU 的 DMA 同步将由 page_pool 完成;而对于其他提供者 +(特别是 dmabuf 内存提供者),CPU 的 DMA 同步由使用 dmabuf API 的用户空 +间负责。驱动程序必须将整个 DMA 同步操作委托给 page_pool,以确保操作正确执行。 + +7. 避免在 page_pool 之上实现特定于驱动程序内存回收机制。由于 netmem 可能 +不由struct page支持,驱动程序不能保留struct page来进行自定义回收。不过, +可为此目的通过page_pool_fragment_netmem()或page_pool_ref_netmem()保留 +page_pool 引用,但需注意某些 netmem 类型的循环时间可能更长(例如零拷贝场景 +下用户空间持有引用的情况)。 + +驱动TX要求 +========== + +1. 驱动程序绝对不能直接把 netmem 的 dma_addr 传递给任何 dma-mapping API。这 +是由于 netmem 的 dma_addr 可能源自 dma-buf 这类和 dma-mapping API 不兼容的 +源头。 + +应当使用netmem_dma_unmap_page_attrs()和netmem_dma_unmap_addr_set()等辅助 +函数来替代dma_unmap_page[_attrs]()、dma_unmap_addr_set()。不管 dma_addr +来源如何,netmem 的这些变体都能正确处理 netmem dma_addr,在合适的时候会委托给 +dma-mapping API 去处理。 + +目前,并非所有的 dma-mapping API 都有对应的 netmem 版本。要是你的驱动程序需要 +使用某个还不存在的 netmem API,你可以自行添加并提交到 netdev@,也可以联系维护 +人员或者发送邮件至 almasrymina@google.com 寻求帮助。 + +2. 驱动程序应通过设置 netdev->netmem_tx = true 来表明自身支持 netmem 功能。 diff --git a/Documentation/translations/zh_CN/networking/vxlan.rst b/Documentation/translations/zh_CN/networking/vxlan.rst new file mode 100644 index 000000000000..e319eddfcdbe --- /dev/null +++ b/Documentation/translations/zh_CN/networking/vxlan.rst @@ -0,0 +1,85 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/vxlan.rst + +:翻译: + + 范雨 Fan Yu + +:校译: + + - 邱禹潭 Qiu Yutan + - 徐鑫 xu xin + +========================== +虚拟扩展本地局域网协议文档 +========================== + +VXLAN 协议是一种隧道协议,旨在解决 IEEE 802.1q 中 VLAN ID(4096)有限的问题。 +VXLAN 将标识符的大小扩展到 24 位(16777216)。 + +VXLAN 在 IETF RFC 7348 中进行了描述,并已由多家供应商设计实现。 +该协议通过 UDP 协议运行,并使用特定目的端口。 +本文档介绍了 Linux 内核隧道设备,Openvswitch 也有单独的 VXLAN 实现。 + +与大多数隧道不同,VXLAN 是 1 对 N 的网络,而不仅仅是点对点网络。 +VXLAN 设备可以通过类似于学习桥接器的方式动态学习另一端点的 IP 地址,也可以利用静态配置的转发条目。 + +VXLAN 的管理方式与它的两个近邻 GRE 和 VLAN 相似。 +配置 VXLAN 需要 iproute2 的版本与 VXLAN 首次向上游合并的内核版本相匹配。 + +1. 创建 vxlan 设备:: + + # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789 + +这将创建一个名为 vxlan0 的网络设备,该设备通过 eth1 使用组播组 239.1.1.1 处理转发表中没有对应条目的流量。 +目标端口号设置为 IANA 分配的值 4789,VXLAN 的 Linux 实现早于 IANA 选择标准目的端口号的时间。 +因此默认使用 Linux 选择的值,以保持向后兼容性。 + +2. 删除 vxlan 设备:: + + # ip link delete vxlan0 + +3. 查看 vxlan 设备信息:: + + # ip -d link show vxlan0 + +使用新的 bridge 命令可以创建、销毁和显示 vxlan 转发表。 + +1. 创建vxlan转发表项:: + + # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0 + +2. 删除vxlan转发表项:: + + # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0 + +3. 显示vxlan转发表项:: + + # bridge fdb show dev vxlan0 + +以下网络接口控制器特性可能表明对 UDP 隧道相关的卸载支持(最常见的是 VXLAN 功能, +但是对特定封装协议的支持取决于网络接口控制器): + + - `tx-udp_tnl-segmentation` + - `tx-udp_tnl-csum-segmentation` + 对 UDP 封装帧执行 TCP 分段卸载的能力 + + - `rx-udp_tunnel-port-offload` + 在接收端解析 UDP 封装帧,使网络接口控制器能够执行协议感知卸载, + 例如内部帧的校验和验证卸载(只有不带协议感知卸载的网络接口控制器才需要) + +对于支持 `rx-udp_tunnel-port-offload` 的设备,可使用 `ethtool` 查询当前卸载端口的列表:: + + $ ethtool --show-tunnels eth0 + Tunnel information for eth0: + UDP port table 0: + Size: 4 + Types: vxlan + No entries + UDP port table 1: + Size: 4 + Types: geneve, vxlan-gpe + Entries (1): + port 1230, vxlan-gpe diff --git a/Documentation/translations/zh_CN/networking/xfrm_proc.rst b/Documentation/translations/zh_CN/networking/xfrm_proc.rst new file mode 100644 index 000000000000..a2ae86c44707 --- /dev/null +++ b/Documentation/translations/zh_CN/networking/xfrm_proc.rst @@ -0,0 +1,126 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/networking/xfrm_proc.rst + +:翻译: + + 王亚鑫 Wang Yaxin + +================================= +XFRM proc - /proc/net/xfrm_* 文件 +================================= + +作者:Masahide NAKAMURA + + +转换统计信息 +------------ + +`xfrm_proc` 提供一组统计计数器,显示转换过程中丢弃的数据包及其原因。 +这些计数器属于Linux私有MIB的一部分,可通过 `/proc/net/xfrm_stat` +查看。 + +入站错误 +~~~~~~~~ + +XfrmInError: + 未匹配其他类别的所有错误 + +XfrmInBufferError: + 缓冲区不足 + +XfrmInHdrError: + 头部错误 + +XfrmInNoStates: + 未找到状态 + (入站SPI、地址或SA的IPsec协议不匹配) + +XfrmInStateProtoError: + 转换协议相关的错误 + (如SA密钥错误) + +XfrmInStateModeError: + 转换模式相关的错误 + +XfrmInStateSeqError: + 序列号错误 + 序列号超出窗口范围 + +XfrmInStateExpired: + 状态已过期 + +XfrmInStateMismatch: + 状态选项不匹配 + (如UDP封装类型不匹配) + +XfrmInStateInvalid: + 无效状态 + +XfrmInTmplMismatch: + 状态模板不匹配 + (如入站SA正确但SP规则错误) + +XfrmInNoPols: + 未找到状态的对应策略 + (如入站SA正确但无SP规则) + +XfrmInPolBlock: + 丢弃的策略 + +XfrmInPolError: + 错误的策略 + +XfrmAcquireError: + 状态未完全获取即被使用 + +XfrmFwdHdrError: + 转发路由禁止 + +XfrmInStateDirError: + 状态方向不匹配 + (输入路径查找到输出状态,预期是输入状态或者无方向) + +出站错误 +~~~~~~~~ +XfrmOutError: + 未匹配其他类别的所有错误 + +XfrmOutBundleGenError: + 捆绑包生成错误 + +XfrmOutBundleCheckError: + 捆绑包校验错误 + +XfrmOutNoStates: + 未找到状态 + +XfrmOutStateProtoError: + 转换协议特定错误 + +XfrmOutStateModeError: + 转换模式特定错误 + +XfrmOutStateSeqError: + 序列号错误 + (序列号溢出) + +XfrmOutStateExpired: + 状态已过期 + +XfrmOutPolBlock: + 丢弃策略 + +XfrmOutPolDead: + 失效策略 + +XfrmOutPolError: + 错误策略 + +XfrmOutStateInvalid: + 无效状态(可能已过期) + +XfrmOutStateDirError: + 状态方向不匹配(输出路径查找到输入状态,预期为输出状态或无方向) diff --git a/Documentation/translations/zh_CN/process/1.Intro.rst b/Documentation/translations/zh_CN/process/1.Intro.rst index 4f9284cbe33b..e314cce49d27 100644 --- a/Documentation/translations/zh_CN/process/1.Intro.rst +++ b/Documentation/translations/zh_CN/process/1.Intro.rst @@ -182,11 +182,11 @@ Andrew Morton, Andrew Price, Tsugikazu Shibata 和 Jochen Voß 。 可以获得所有版权所有者的同意(或者从内核中删除他们的代码)。因此,尤其是在 可预见的将来,许可证不大可能迁移到GPL的版本3。 -所有贡献给内核的代码都必须是合法的免费软件。因此,不接受匿名(或化名)贡献 -者的代码。所有贡献者都需要在他们的代码上“sign off(签发)”,声明代码可以 -在GPL下与内核一起分发。无法提供未被其所有者许可为免费软件的代码,或可能为 -内核造成版权相关问题的代码(例如,由缺乏适当保护的反向工程工作派生的代码) -不能被接受。 +所有贡献给内核的代码都必须是合法的免费软件。因此,出于这个原因,身份不明的 +贡献者或匿名贡献者提交的代码将不予接受。所有贡献者都需要在他们的代码上 +“sign off(签发)”,声明代码可以在GPL下与内核一起分发。无法提供未被其所有者 +许可为免费软件的代码,或可能为内核造成版权相关问题的代码(例如,由缺乏适当 +保护的反向工程工作派生的代码)不能被接受。 有关版权问题的提问在Linux开发邮件列表中很常见。这样的问题通常会得到不少答案, 但请记住,回答这些问题的人不是律师,不能提供法律咨询。如果您有关于Linux源代码 diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst index e68c9de0f7f8..31b0e2c994f6 100644 --- a/Documentation/translations/zh_CN/process/2.Process.rst +++ b/Documentation/translations/zh_CN/process/2.Process.rst @@ -292,12 +292,11 @@ Quilt 是一个补丁管理系统,而不是源代码管理系统。它不会 一个潜在的危险,他们可能会被一堆电子邮件淹没、违反Linux列表上使用的约定, 或者两者兼而有之。 -大多数内核邮件列表都在vger.kernel.org上运行;主列表位于: +大多数内核邮件列表都托管在 kernel.org;主列表位于: - http://vger.kernel.org/vger-lists.html + https://subspace.kernel.org -不过,也有一些列表托管在别处;其中一些列表位于 -redhat.com/mailman/listinfo。 +其他地方也有邮件列表;请查看 MAINTAINERS 文件,获取与特定子系统相关的列表。 当然,内核开发的核心邮件列表是linux-kernel。这个列表是一个令人生畏的地方: 每天的信息量可以达到500条,噪音很高,谈话技术性很强,且参与者并不总是表现出 diff --git a/Documentation/translations/zh_CN/process/5.Posting.rst b/Documentation/translations/zh_CN/process/5.Posting.rst index 6c83a8f40310..ce37cf6a60e2 100644 --- a/Documentation/translations/zh_CN/process/5.Posting.rst +++ b/Documentation/translations/zh_CN/process/5.Posting.rst @@ -177,10 +177,21 @@ - Reported-by: 指定报告此补丁修复的问题的用户;此标记用于表示感谢。 + - Suggested-by: 表示该补丁思路由所提及的人提出,确保其创意贡献获得认可。 + 这有望激励他们在未来继续提供帮助。 + - Cc:指定某人收到了补丁的副本,并有机会对此发表评论。 在补丁中添加标签时要小心:只有Cc:才适合在没有指定人员明确许可的情况下添加。 +在补丁中添加上述标签时需谨慎,因为除了 Cc:、Reported-by: 和 Suggested-by:, +所有其他标签都需要被提及者的明确许可。对于这三个标签,若根据 lore 归档或提交 +历史记录,相关人员使用该姓名和电子邮件地址为 Linux 内核做出过贡献,则隐含许可 +已足够 -- 对于 Reported-by: 和 Suggested-by:,需确保报告或建议是公开进行的。 +请注意,从这个意义上讲,bugzilla.kernel.org 属于公开场合,但其使用的电子邮件地址 +属于私人信息;因此,除非相关人员曾在早期贡献中使用过这些邮箱,否则请勿在标签中 +公开它们。 + 寄送补丁 -------- diff --git a/Documentation/translations/zh_CN/process/6.Followthrough.rst b/Documentation/translations/zh_CN/process/6.Followthrough.rst index 2a127e737b6a..3d19c59ca6e4 100644 --- a/Documentation/translations/zh_CN/process/6.Followthrough.rst +++ b/Documentation/translations/zh_CN/process/6.Followthrough.rst @@ -49,6 +49,11 @@ 变。他们真的,几乎毫无例外地,致力于创造他们所能做到的最好的内核;他们并 没有试图给雇主的竞争对手造成不适。 + - 请准备好应对看似“愚蠢”的代码风格修改请求,以及将部分代码拆分到内核 + 共享模块的要求。维护者的职责之一是保持整体风格的一致性。有时这意味着, + 你在驱动中为解决某一问题而采用的巧妙取巧方案,实际上需要被提炼为通用的 + 内核特性,以便未来复用。 + 所有这些归根结底就是,当审阅者向您发送评论时,您需要注意他们正在进行的技术 评论。不要让他们的表达方式或你自己的骄傲阻止此事。当你在一个补丁上得到评论 时,花点时间去理解评论人想说什么。如果可能的话,请修复审阅者要求您修复的内 diff --git a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst index 57beca02181c..92cc06dd5f4e 100644 --- a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst +++ b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst @@ -113,6 +113,8 @@ Git提供了一些强大的工具,可以让您重写开发历史。一个不 更改。在这方面 git request-pull 命令非常有用;它将按照其他开发人员所期望的 格式化请求,并检查以确保您已记得将这些更改推送到公共服务器。 +.. _cn_development_advancedtopics_reviews: + 审阅补丁 -------- @@ -126,8 +128,20 @@ Git提供了一些强大的工具,可以让您重写开发历史。一个不 的建议是:把审阅评论当成问题而不是批评。询问“在这条路径中如何释放锁?” 总是比说“这里的锁是错误的”更好。 +当出现分歧时,另一个有用的技巧是邀请他人参与讨论。如果交流数次后讨论陷入僵局, +可征求其他评审者或维护者的意见。通常,与某一评审者意见一致的人往往会保持沉默, +除非被主动询问。众人意见会产生成倍的影响力。 + 不同的开发人员将从不同的角度审查代码。部分人会主要关注代码风格以及代码行是 否有尾随空格。其他人会主要关注补丁作为一个整体实现的变更是否对内核有好处。 同时也有人会检查是否存在锁问题、堆栈使用过度、可能的安全问题、在其他地方 发现的代码重复、足够的文档、对性能的不利影响、用户空间ABI更改等。所有类型 的检查,只要它们能引导更好的代码进入内核,都是受欢迎和值得的。 + +使用诸如 ``Reviewed-by`` 这类特定标签并无严格要求。事实上,即便提供了标签,也 +更鼓励用平实的英文撰写评审意见,因为这样的内容信息量更大,例如,“我查看了此次 +提交中 A、B、C 等方面的内容,认为没有问题。”显然,以某种形式提供评审信息或回复 +是必要的,否则维护者将完全无法知晓评审者是否已查看过补丁! + +最后但同样重要的是,补丁评审可能会变成一个聚焦于指出问题的负面过程。请偶尔给予 +称赞,尤其是对新手贡献者! diff --git a/Documentation/translations/zh_CN/staging/index.rst b/Documentation/translations/zh_CN/staging/index.rst index bb55c81c84a3..6d68fabce175 100644 --- a/Documentation/translations/zh_CN/staging/index.rst +++ b/Documentation/translations/zh_CN/staging/index.rst @@ -13,6 +13,7 @@ .. toctree:: :maxdepth: 2 + speculation xz TODOList: @@ -21,6 +22,5 @@ TODOList: * lzo * remoteproc * rpmsg -* speculation * static-keys * tee diff --git a/Documentation/translations/zh_CN/staging/speculation.rst b/Documentation/translations/zh_CN/staging/speculation.rst new file mode 100644 index 000000000000..c36d33f67897 --- /dev/null +++ b/Documentation/translations/zh_CN/staging/speculation.rst @@ -0,0 +1,85 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/staging/speculation.rst + +:翻译: + + 崔巍 Cui Wei + +======== +推测执行 +======== + +本文档解释了推测执行的潜在影响,以及如何使用通用API来减轻不良影响。 + +------------------------------------------------------------------------------ + +为提高性能并减少平均延迟,许多现代处理器都采用分支预测等推测执行技术,执行结果 +可能在后续阶段被丢弃。 + +通常情况下,我们无法从架构状态(如寄存器内容)观察到推测执行。然而,在某些情况 +下从微架构状态观察其影响是可能的,例如数据是否存在于缓存中。这种状态可能会形成 +侧信道,通过观察侧信道可以提取秘密信息。 + +例如,在分支预测存在的情况下,边界检查可能被推测执行的代码忽略。考虑以下代码:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +在arm64上,可以编译成如下汇编序列:: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +处理器有可能误预测条件分支,并推测性装载array[index],即使index >= MAX_ARRAY_ELEMS。 +这个值随后会被丢弃,但推测的装载可能会影响微架构状态,随后可被测量到。 + +涉及多个依赖内存访问的更复杂序列可能会导致敏感信息泄露。以前面的示例为基础,考虑 +以下代码:: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +根据推测,对load_array()的第一次调用可能会返回一个越界地址的值,而第二次调用将影响 +依赖于该值的微架构状态。这可能会提供一个任意读取原语。 + +缓解推测执行侧信道 +================== + +内核提供了一个通用API以确保即使在推测情况下也能遵守边界检查。受推测执行侧信道影响 +的架构应当实现这些原语。 + +中的array_index_nospec()辅助函数可用于防止信息通过侧信道泄漏。 + +调用array_index_nospec(index, size)将返回一个经过净化的索引值,即使在CPU推测执行 +条件下,该值也会被严格限制在[0, size)范围内。 + +这可以用来保护前面的load_array()示例:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst index 1998dc146c56..5f90af1fb573 100644 --- a/Documentation/usb/gadget-testing.rst +++ b/Documentation/usb/gadget-testing.rst @@ -874,7 +874,7 @@ where uvc-gadget is this program: with these patches: - http://www.spinics.net/lists/linux-usb/msg99220.html + https://lore.kernel.org/r/1386675637-18243-1-git-send-email-r.baldyga@samsung.com/ host:: diff --git a/Documentation/userspace-api/fwctl/fwctl.rst b/Documentation/userspace-api/fwctl/fwctl.rst index fdcfe418a83f..a74eab8d14c6 100644 --- a/Documentation/userspace-api/fwctl/fwctl.rst +++ b/Documentation/userspace-api/fwctl/fwctl.rst @@ -54,7 +54,7 @@ operated by the block layer but also comes with a set of RPCs to administer the construction of drives within the HW RAID. In the past when devices were more single function, individual subsystems would -grow different approaches to solving some of these common problems. For instance +grow different approaches to solving some of these common problems. For instance, monitoring device health, manipulating its FLASH, debugging the FW, provisioning, all have various unique interfaces across the kernel. @@ -87,7 +87,7 @@ device today may broadly have several function-level scopes: 3. Multiple VM functions tightly scoped within the VM The device may create a logical parent/child relationship between these scopes. -For instance a child VM's FW may be within the scope of the hypervisor FW. It is +For instance, a child VM's FW may be within the scope of the hypervisor FW. It is quite common in the VFIO world that the hypervisor environment has a complex provisioning/profiling/configuration responsibility for the function VFIO assigns to the VM. @@ -105,19 +105,19 @@ some general scopes of action (see enum fwctl_rpc_scope): 3. Write access to function & child debug information strictly compatible with the principles of kernel lockdown and kernel integrity protection. Triggers - a kernel Taint. + a kernel taint. - 4. Full debug device access. Triggers a kernel Taint, requires CAP_SYS_RAWIO. + 4. Full debug device access. Triggers a kernel taint, requires CAP_SYS_RAWIO. User space will provide a scope label on each RPC and the kernel must enforce the above CAPs and taints based on that scope. A combination of kernel and FW can enforce that RPCs are placed in the correct scope by user space. -Denied behavior ---------------- +Disallowed behavior +------------------- There are many things this interface must not allow user space to do (without a -Taint or CAP), broadly derived from the principles of kernel lockdown. Some +taint or CAP), broadly derived from the principles of kernel lockdown. Some examples: 1. DMA to/from arbitrary memory, hang the system, compromise FW integrity with @@ -138,8 +138,8 @@ examples: fwctl is not a replacement for device direct access subsystems like uacce or VFIO. -Operations exposed through fwctl's non-taining interfaces should be fully -sharable with other users of the device. For instance exposing a RPC through +Operations exposed through fwctl's non-tainting interfaces should be fully +sharable with other users of the device. For instance, exposing a RPC through fwctl should never prevent a kernel subsystem from also concurrently using that same RPC or hardware unit down the road. In such cases fwctl will be less important than proper kernel subsystems that eventually emerge. Mistakes in this @@ -225,12 +225,12 @@ subsystems. Each device type must be mindful of Linux's philosophy for stable ABI. The FW RPC interface does not have to meet a strictly stable ABI, but it does need to -meet an expectation that userspace tools that are deployed and in significant +meet an expectation that user space tools that are deployed and in significant use don't needlessly break. FW upgrade and kernel upgrade should keep widely deployed tooling working. Development and debugging focused RPCs under more permissive scopes can have -less stabilitiy if the tools using them are only run under exceptional +less stability if the tools using them are only run under exceptional circumstances and not for every day use of the device. Debugging tools may even require exact version matching as they may require something similar to DWARF debug information from the FW binary. @@ -261,7 +261,7 @@ Some examples: - HW RAID controllers. This includes RPCs to do things like compose drives into a RAID volume, configure RAID parameters, monitor the HW and more. - - Baseboard managers. RPCs for configuring settings in the device and more + - Baseboard managers. RPCs for configuring settings in the device and more. - NVMe vendor command capsules. nvme-cli provides access to some monitoring functions that different products have defined, but more exist. @@ -269,15 +269,15 @@ Some examples: - CXL also has a NVMe-like vendor command system. - DRM allows user space drivers to send commands to the device via kernel - mediation + mediation. - RDMA allows user space drivers to directly push commands to the device - without kernel involvement + without kernel involvement. - Various “raw” APIs, raw HID (SDL2), raw USB, NVMe Generic Interface, etc. The first 4 are examples of areas that fwctl intends to cover. The latter three -are examples of denied behavior as they fully overlap with the primary purpose +are examples of disallowed behavior as they fully overlap with the primary purpose of a kernel subsystem. Some key lessons learned from these past efforts are the importance of having a diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 4f1532a251d2..406a9f4d0869 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -10,12 +10,14 @@ Michael Elizabeth Chastain If you are adding new ioctl's to the kernel, you should use the _IO macros defined in : - ====== == ============================================ - _IO an ioctl with no parameters - _IOW an ioctl with write parameters (copy_from_user) - _IOR an ioctl with read parameters (copy_to_user) - _IOWR an ioctl with both write and read parameters. - ====== == ============================================ + ====== =========================== + macro parameters + ====== =========================== + _IO none + _IOW write (read from userspace) + _IOR read (write to userpace) + _IOWR write and read + ====== =========================== 'Write' and 'read' are from the user's point of view, just like the system calls 'write' and 'read'. For example, a SET_FOO ioctl would @@ -23,9 +25,9 @@ be _IOW, although the kernel would actually read data from user space; a GET_FOO ioctl would be _IOR, although the kernel would actually write data to user space. -The first argument to _IO, _IOW, _IOR, or _IOWR is an identifying letter -or number from the table below. Because of the large number of drivers, -many drivers share a partial letter with other drivers. +The first argument to the macros is an identifying letter or number from +the table below. Because of the large number of drivers, many drivers +share a partial letter with other drivers. If you are writing a driver for a new device and need a letter, pick an unused block with enough room for expansion: 32 to 256 ioctl commands @@ -33,12 +35,14 @@ should suffice. You can register the block by patching this file and submitting the patch through :doc:`usual patch submission process `. -The second argument to _IO, _IOW, _IOR, or _IOWR is a sequence number -to distinguish ioctls from each other. The third argument to _IOW, -_IOR, or _IOWR is the type of the data going into the kernel or coming -out of the kernel (e.g. 'int' or 'struct foo'). NOTE! Do NOT use -sizeof(arg) as the third argument as this results in your ioctl thinking -it passes an argument of type size_t. +The second argument is a sequence number to distinguish ioctls from each +other. The third argument (not applicable to _IO) is the type of the data +going into the kernel or coming out of the kernel (e.g. 'int' or +'struct foo'). + +.. note:: + Do NOT use sizeof(arg) as the third argument as this results in your + ioctl thinking it passes an argument of type size_t. Some devices use their major number as the identifier; this is OK, as long as it is unique. Some devices are irregular and don't follow any @@ -51,7 +55,7 @@ Following this convention is good because: error rather than some unexpected behaviour. (2) The 'strace' build procedure automatically finds ioctl numbers - defined with _IO, _IOW, _IOR, or _IOWR. + defined with the macros. (3) 'strace' can decode numbers back into useful names when the numbers are unique. @@ -65,344 +69,344 @@ Following this convention is good because: This table lists ioctls visible from userland, excluding ones from drivers/staging/. -==== ===== ======================================================= ================================================================ -Code Seq# Include File Comments +==== ===== ========================================================= ================================================================ +Code Seq# Include File Comments (hex) -==== ===== ======================================================= ================================================================ -0x00 00-1F linux/fs.h conflict! -0x00 00-1F scsi/scsi_ioctl.h conflict! -0x00 00-1F linux/fb.h conflict! -0x00 00-1F linux/wavefront.h conflict! +==== ===== ========================================================= ================================================================ +0x00 00-1F linux/fs.h conflict! +0x00 00-1F scsi/scsi_ioctl.h conflict! +0x00 00-1F linux/fb.h conflict! +0x00 00-1F linux/wavefront.h conflict! 0x02 all linux/fd.h 0x03 all linux/hdreg.h -0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these. +0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these. 0x06 all linux/lp.h 0x07 9F-D0 linux/vmw_vmci_defs.h, uapi/linux/vm_sockets.h 0x09 all linux/raid/md_u.h 0x10 00-0F drivers/char/s390/vmcp.h 0x10 10-1F arch/s390/include/uapi/sclp_ctl.h 0x10 20-2F arch/s390/include/uapi/asm/hypfs.h -0x12 all linux/fs.h BLK* ioctls +0x12 all linux/fs.h BLK* ioctls linux/blkpg.h linux/blkzoned.h linux/blk-crypto.h -0x15 all linux/fs.h FS_IOC_* ioctls -0x1b all InfiniBand Subsystem - +0x15 all linux/fs.h FS_IOC_* ioctls +0x1b all InfiniBand Subsystem + 0x20 all drivers/cdrom/cm206.h 0x22 all scsi/sg.h -0x3E 00-0F linux/counter.h +0x3E 00-0F linux/counter.h '!' 00-1F uapi/linux/seccomp.h -'#' 00-3F IEEE 1394 Subsystem - Block for the entire subsystem +'#' 00-3F IEEE 1394 Subsystem + Block for the entire subsystem '$' 00-0F linux/perf_counter.h, linux/perf_event.h -'%' 00-0F include/uapi/linux/stm.h System Trace Module subsystem - +'%' 00-0F include/uapi/linux/stm.h System Trace Module subsystem + '&' 00-07 drivers/firewire/nosy-user.h -'*' 00-1F uapi/linux/user_events.h User Events Subsystem - -'1' 00-1F linux/timepps.h PPS kit from Ulrich Windl - +'*' 00-1F uapi/linux/user_events.h User Events Subsystem + +'1' 00-1F linux/timepps.h PPS kit from Ulrich Windl + '2' 01-04 linux/i2o.h -'3' 00-0F drivers/s390/char/raw3270.h conflict! -'3' 00-1F linux/suspend_ioctls.h, conflict! +'3' 00-0F drivers/s390/char/raw3270.h conflict! +'3' 00-1F linux/suspend_ioctls.h, conflict! kernel/power/user.c -'8' all SNP8023 advanced NIC card - +'8' all SNP8023 advanced NIC card + ';' 64-7F linux/vfio.h ';' 80-FF linux/iommufd.h -'=' 00-3f uapi/linux/ptp_clock.h -'@' 00-0F linux/radeonfb.h conflict! -'@' 00-0F drivers/video/aty/aty128fb.c conflict! -'A' 00-1F linux/apm_bios.h conflict! -'A' 00-0F linux/agpgart.h, conflict! +'=' 00-3f uapi/linux/ptp_clock.h +'@' 00-0F linux/radeonfb.h conflict! +'@' 00-0F drivers/video/aty/aty128fb.c conflict! +'A' 00-1F linux/apm_bios.h conflict! +'A' 00-0F linux/agpgart.h, conflict! drivers/char/agp/compat_ioctl.h -'A' 00-7F sound/asound.h conflict! -'B' 00-1F linux/cciss_ioctl.h conflict! -'B' 00-0F include/linux/pmu.h conflict! -'B' C0-FF advanced bbus -'B' 00-0F xen/xenbus_dev.h conflict! -'C' all linux/soundcard.h conflict! -'C' 01-2F linux/capi.h conflict! -'C' F0-FF drivers/net/wan/cosa.h conflict! +'A' 00-7F sound/asound.h conflict! +'B' 00-1F linux/cciss_ioctl.h conflict! +'B' 00-0F include/linux/pmu.h conflict! +'B' C0-FF advanced bbus +'B' 00-0F xen/xenbus_dev.h conflict! +'C' all linux/soundcard.h conflict! +'C' 01-2F linux/capi.h conflict! +'C' F0-FF drivers/net/wan/cosa.h conflict! 'D' all arch/s390/include/asm/dasd.h -'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022 +'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022 'D' 05 drivers/scsi/pmcraid.h -'E' all linux/input.h conflict! -'E' 00-0F xen/evtchn.h conflict! -'F' all linux/fb.h conflict! -'F' 01-02 drivers/scsi/pmcraid.h conflict! -'F' 20 drivers/video/fsl-diu-fb.h conflict! -'F' 20 linux/ivtvfb.h conflict! -'F' 20 linux/matroxfb.h conflict! -'F' 20 drivers/video/aty/atyfb_base.c conflict! -'F' 00-0F video/da8xx-fb.h conflict! -'F' 80-8F linux/arcfb.h conflict! -'F' DD video/sstfb.h conflict! -'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict! -'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict! -'H' 00-7F linux/hiddev.h conflict! -'H' 00-0F linux/hidraw.h conflict! -'H' 01 linux/mei.h conflict! -'H' 02 linux/mei.h conflict! -'H' 03 linux/mei.h conflict! -'H' 00-0F sound/asound.h conflict! -'H' 20-40 sound/asound_fm.h conflict! -'H' 80-8F sound/sfnt_info.h conflict! -'H' 10-8F sound/emu10k1.h conflict! -'H' 10-1F sound/sb16_csp.h conflict! -'H' 10-1F sound/hda_hwdep.h conflict! -'H' 40-4F sound/hdspm.h conflict! -'H' 40-4F sound/hdsp.h conflict! +'E' all linux/input.h conflict! +'E' 00-0F xen/evtchn.h conflict! +'F' all linux/fb.h conflict! +'F' 01-02 drivers/scsi/pmcraid.h conflict! +'F' 20 drivers/video/fsl-diu-fb.h conflict! +'F' 20 linux/ivtvfb.h conflict! +'F' 20 linux/matroxfb.h conflict! +'F' 20 drivers/video/aty/atyfb_base.c conflict! +'F' 00-0F video/da8xx-fb.h conflict! +'F' 80-8F linux/arcfb.h conflict! +'F' DD video/sstfb.h conflict! +'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict! +'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict! +'H' 00-7F linux/hiddev.h conflict! +'H' 00-0F linux/hidraw.h conflict! +'H' 01 linux/mei.h conflict! +'H' 02 linux/mei.h conflict! +'H' 03 linux/mei.h conflict! +'H' 00-0F sound/asound.h conflict! +'H' 20-40 sound/asound_fm.h conflict! +'H' 80-8F sound/sfnt_info.h conflict! +'H' 10-8F sound/emu10k1.h conflict! +'H' 10-1F sound/sb16_csp.h conflict! +'H' 10-1F sound/hda_hwdep.h conflict! +'H' 40-4F sound/hdspm.h conflict! +'H' 40-4F sound/hdsp.h conflict! 'H' 90 sound/usb/usx2y/usb_stream.h -'H' 00-0F uapi/misc/habanalabs.h conflict! +'H' 00-0F uapi/misc/habanalabs.h conflict! 'H' A0 uapi/linux/usb/cdc-wdm.h -'H' C0-F0 net/bluetooth/hci.h conflict! -'H' C0-DF net/bluetooth/hidp/hidp.h conflict! -'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict! -'H' C0-DF net/bluetooth/bnep/bnep.h conflict! -'H' F1 linux/hid-roccat.h +'H' C0-F0 net/bluetooth/hci.h conflict! +'H' C0-DF net/bluetooth/hidp/hidp.h conflict! +'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict! +'H' C0-DF net/bluetooth/bnep/bnep.h conflict! +'H' F1 linux/hid-roccat.h 'H' F8-FA sound/firewire.h -'I' all linux/isdn.h conflict! -'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict! -'I' 40-4F linux/mISDNif.h conflict! +'I' all linux/isdn.h conflict! +'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict! +'I' 40-4F linux/mISDNif.h conflict! 'K' all linux/kd.h -'L' 00-1F linux/loop.h conflict! -'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict! -'L' E0-FF linux/ppdd.h encrypted disk device driver - -'M' all linux/soundcard.h conflict! -'M' 01-16 mtd/mtd-abi.h conflict! +'L' 00-1F linux/loop.h conflict! +'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict! +'L' E0-FF linux/ppdd.h encrypted disk device driver + +'M' all linux/soundcard.h conflict! +'M' 01-16 mtd/mtd-abi.h conflict! and drivers/mtd/mtdchar.c 'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h -'M' 00-0F drivers/video/fsl-diu-fb.h conflict! +'M' 00-0F drivers/video/fsl-diu-fb.h conflict! 'N' 00-1F drivers/usb/scanner.h 'N' 40-7F drivers/block/nvme.c -'N' 80-8F uapi/linux/ntsync.h NT synchronization primitives - -'O' 00-06 mtd/ubi-user.h UBI -'P' all linux/soundcard.h conflict! -'P' 60-6F sound/sscape_ioctl.h conflict! -'P' 00-0F drivers/usb/class/usblp.c conflict! -'P' 01-09 drivers/misc/pci_endpoint_test.c conflict! -'P' 00-0F xen/privcmd.h conflict! -'P' 00-05 linux/tps6594_pfsm.h conflict! +'N' 80-8F uapi/linux/ntsync.h NT synchronization primitives + +'O' 00-06 mtd/ubi-user.h UBI +'P' all linux/soundcard.h conflict! +'P' 60-6F sound/sscape_ioctl.h conflict! +'P' 00-0F drivers/usb/class/usblp.c conflict! +'P' 01-09 drivers/misc/pci_endpoint_test.c conflict! +'P' 00-0F xen/privcmd.h conflict! +'P' 00-05 linux/tps6594_pfsm.h conflict! 'Q' all linux/soundcard.h -'R' 00-1F linux/random.h conflict! -'R' 01 linux/rfkill.h conflict! +'R' 00-1F linux/random.h conflict! +'R' 01 linux/rfkill.h conflict! 'R' 20-2F linux/trace_mmap.h 'R' C0-DF net/bluetooth/rfcomm.h 'R' E0 uapi/linux/fsl_mc.h -'S' all linux/cdrom.h conflict! -'S' 80-81 scsi/scsi_ioctl.h conflict! -'S' 82-FF scsi/scsi.h conflict! -'S' 00-7F sound/asequencer.h conflict! -'T' all linux/soundcard.h conflict! -'T' 00-AF sound/asound.h conflict! -'T' all arch/x86/include/asm/ioctls.h conflict! -'T' C0-DF linux/if_tun.h conflict! -'U' all sound/asound.h conflict! -'U' 00-CF linux/uinput.h conflict! +'S' all linux/cdrom.h conflict! +'S' 80-81 scsi/scsi_ioctl.h conflict! +'S' 82-FF scsi/scsi.h conflict! +'S' 00-7F sound/asequencer.h conflict! +'T' all linux/soundcard.h conflict! +'T' 00-AF sound/asound.h conflict! +'T' all arch/x86/include/asm/ioctls.h conflict! +'T' C0-DF linux/if_tun.h conflict! +'U' all sound/asound.h conflict! +'U' 00-CF linux/uinput.h conflict! 'U' 00-EF linux/usbdevice_fs.h 'U' C0-CF drivers/bluetooth/hci_uart.h -'V' all linux/vt.h conflict! -'V' all linux/videodev2.h conflict! -'V' C0 linux/ivtvfb.h conflict! -'V' C0 linux/ivtv.h conflict! -'V' C0 media/si4713.h conflict! -'W' 00-1F linux/watchdog.h conflict! -'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) -'W' 00-3F sound/asound.h conflict! +'V' all linux/vt.h conflict! +'V' all linux/videodev2.h conflict! +'V' C0 linux/ivtvfb.h conflict! +'V' C0 linux/ivtv.h conflict! +'V' C0 media/si4713.h conflict! +'W' 00-1F linux/watchdog.h conflict! +'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) +'W' 00-3F sound/asound.h conflict! 'W' 40-5F drivers/pci/switch/switchtec.c 'W' 60-61 linux/watch_queue.h -'X' all fs/xfs/xfs_fs.h, conflict! +'X' all fs/xfs/xfs_fs.h, conflict! fs/xfs/linux-2.6/xfs_ioctl32.h, include/linux/falloc.h, linux/fs.h, -'X' all fs/ocfs2/ocfs_fs.h conflict! +'X' all fs/ocfs2/ocfs_fs.h conflict! 'Z' 14-15 drivers/message/fusion/mptctl.h -'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices - -'a' all linux/atm*.h, linux/sonet.h ATM on linux - -'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver -'b' 00-FF conflict! bit3 vme host bridge - -'b' 00-0F linux/dma-buf.h conflict! -'c' 00-7F linux/comstats.h conflict! -'c' 00-7F linux/coda.h conflict! -'c' 00-1F linux/chio.h conflict! -'c' 80-9F arch/s390/include/asm/chsc.h conflict! +'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices + +'a' all linux/atm*.h, linux/sonet.h ATM on linux + +'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver +'b' 00-FF conflict! bit3 vme host bridge + +'b' 00-0F linux/dma-buf.h conflict! +'c' 00-7F linux/comstats.h conflict! +'c' 00-7F linux/coda.h conflict! +'c' 00-1F linux/chio.h conflict! +'c' 80-9F arch/s390/include/asm/chsc.h conflict! 'c' A0-AF arch/x86/include/asm/msr.h conflict! -'d' 00-FF linux/char/drm/drm.h conflict! -'d' 02-40 pcmcia/ds.h conflict! +'d' 00-FF linux/char/drm/drm.h conflict! +'d' 02-40 pcmcia/ds.h conflict! 'd' F0-FF linux/digi1.h -'e' all linux/digi1.h conflict! -'f' 00-1F linux/ext2_fs.h conflict! -'f' 00-1F linux/ext3_fs.h conflict! -'f' 00-0F fs/jfs/jfs_dinode.h conflict! -'f' 00-0F fs/ext4/ext4.h conflict! -'f' 00-0F linux/fs.h conflict! -'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict! +'e' all linux/digi1.h conflict! +'f' 00-1F linux/ext2_fs.h conflict! +'f' 00-1F linux/ext3_fs.h conflict! +'f' 00-0F fs/jfs/jfs_dinode.h conflict! +'f' 00-0F fs/ext4/ext4.h conflict! +'f' 00-0F linux/fs.h conflict! +'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict! 'f' 13-27 linux/fscrypt.h 'f' 81-8F linux/fsverity.h 'g' 00-0F linux/usb/gadgetfs.h 'g' 20-2F linux/usb/g_printer.h -'h' 00-7F conflict! Charon filesystem - -'h' 00-1F linux/hpet.h conflict! +'h' 00-7F conflict! Charon filesystem + +'h' 00-1F linux/hpet.h conflict! 'h' 80-8F fs/hfsplus/ioctl.c -'i' 00-3F linux/i2o-dev.h conflict! -'i' 0B-1F linux/ipmi.h conflict! +'i' 00-3F linux/i2o-dev.h conflict! +'i' 0B-1F linux/ipmi.h conflict! 'i' 80-8F linux/i8k.h -'i' 90-9F `linux/iio/*.h` IIO +'i' 90-9F `linux/iio/*.h` IIO 'j' 00-3F linux/joystick.h -'k' 00-0F linux/spi/spidev.h conflict! -'k' 00-05 video/kyro.h conflict! -'k' 10-17 linux/hsi/hsi_char.h HSI character device -'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system - -'l' 40-7F linux/udf_fs_i.h in development: - -'m' 00-09 linux/mmtimer.h conflict! -'m' all linux/mtio.h conflict! -'m' all linux/soundcard.h conflict! -'m' all linux/synclink.h conflict! -'m' 00-19 drivers/message/fusion/mptctl.h conflict! -'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! +'k' 00-0F linux/spi/spidev.h conflict! +'k' 00-05 video/kyro.h conflict! +'k' 10-17 linux/hsi/hsi_char.h HSI character device +'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system + +'l' 40-7F linux/udf_fs_i.h in development: + +'m' 00-09 linux/mmtimer.h conflict! +'m' all linux/mtio.h conflict! +'m' all linux/soundcard.h conflict! +'m' all linux/synclink.h conflict! +'m' 00-19 drivers/message/fusion/mptctl.h conflict! +'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! 'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c -'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 -'n' E0-FF linux/matroxfb.h matroxfb -'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 -'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) -'o' 40-41 mtd/ubi-user.h UBI -'o' 01-A1 `linux/dvb/*.h` DVB -'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this) -'p' 00-1F linux/rtc.h conflict! +'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 +'n' E0-FF linux/matroxfb.h matroxfb +'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 +'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) +'o' 40-41 mtd/ubi-user.h UBI +'o' 01-A1 `linux/dvb/*.h` DVB +'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this) +'p' 00-1F linux/rtc.h conflict! 'p' 40-7F linux/nvram.h -'p' 80-9F linux/ppdev.h user-space parport - -'p' A1-A5 linux/pps.h LinuxPPS -'p' B1-B3 linux/pps_gen.h LinuxPPS - +'p' 80-9F linux/ppdev.h user-space parport + +'p' A1-A5 linux/pps.h LinuxPPS +'p' B1-B3 linux/pps_gen.h LinuxPPS + 'q' 00-1F linux/serio.h -'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK - linux/ixjuser.h +'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK + linux/ixjuser.h 'r' 00-1F linux/msdos_fs.h and fs/fat/dir.c 's' all linux/cdk.h 't' 00-7F linux/ppp-ioctl.h 't' 80-8F linux/isdn_ppp.h -'t' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM -'u' 00-1F linux/smb_fs.h gone -'u' 00-2F linux/ublk_cmd.h conflict! -'u' 20-3F linux/uvcvideo.h USB video class host driver -'u' 40-4f linux/udmabuf.h userspace dma-buf misc device -'v' 00-1F linux/ext2_fs.h conflict! -'v' 00-1F linux/fs.h conflict! -'v' 00-0F linux/sonypi.h conflict! -'v' 00-0F media/v4l2-subdev.h conflict! -'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API -'v' C0-FF linux/meye.h conflict! -'w' all CERN SCI driver -'y' 00-1F packet based user level communications - -'z' 00-3F CAN bus card conflict! - -'z' 40-7F CAN bus card conflict! - -'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict! +'t' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM +'u' 00-1F linux/smb_fs.h gone +'u' 00-2F linux/ublk_cmd.h conflict! +'u' 20-3F linux/uvcvideo.h USB video class host driver +'u' 40-4f linux/udmabuf.h userspace dma-buf misc device +'v' 00-1F linux/ext2_fs.h conflict! +'v' 00-1F linux/fs.h conflict! +'v' 00-0F linux/sonypi.h conflict! +'v' 00-0F media/v4l2-subdev.h conflict! +'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API +'v' C0-FF linux/meye.h conflict! +'w' all CERN SCI driver +'y' 00-1F packet based user level communications + +'z' 00-3F CAN bus card conflict! + +'z' 40-7F CAN bus card conflict! + +'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict! '|' 00-7F linux/media.h -'|' 80-9F samples/ Any sample and example drivers +'|' 80-9F samples/ Any sample and example drivers 0x80 00-1F linux/fb.h 0x81 00-1F linux/vduse.h 0x89 00-06 arch/x86/include/asm/sockios.h 0x89 0B-DF linux/sockios.h -0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range -0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range +0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range +0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range 0x8A 00-1F linux/eventpoll.h 0x8B all linux/wireless.h -0x8C 00-3F WiNRADiO driver - +0x8C 00-3F WiNRADiO driver + 0x90 00 drivers/cdrom/sbpcd.h 0x92 00-0F drivers/usb/mon/mon_bin.c 0x93 60-7F linux/auto_fs.h -0x94 all fs/btrfs/ioctl.h Btrfs filesystem - and linux/fs.h some lifted to vfs/generic -0x97 00-7F fs/ceph/ioctl.h Ceph file system -0x99 00-0F 537-Addinboard driver - +0x94 all fs/btrfs/ioctl.h Btrfs filesystem + and linux/fs.h some lifted to vfs/generic +0x97 00-7F fs/ceph/ioctl.h Ceph file system +0x99 00-0F 537-Addinboard driver + 0x9A 00-0F include/uapi/fwctl/fwctl.h -0xA0 all linux/sdp/sdp.h Industrial Device Project - -0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver -0xA2 all uapi/linux/acrn.h ACRN hypervisor -0xA3 80-8F Port ACL in development: - +0xA0 all linux/sdp/sdp.h Industrial Device Project + +0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver +0xA2 all uapi/linux/acrn.h ACRN hypervisor +0xA3 80-8F Port ACL in development: + 0xA3 90-9F linux/dtlk.h -0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem -0xA4 00-1F uapi/asm/sgx.h -0xA5 01-05 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator - -0xA5 20-2F linux/surface_aggregator/dtx.h Microsoft Surface DTX driver - +0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem +0xA4 00-1F uapi/asm/sgx.h +0xA5 01-05 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator + +0xA5 20-2F linux/surface_aggregator/dtx.h Microsoft Surface DTX driver + 0xAA 00-3F linux/uapi/linux/userfaultfd.h 0xAB 00-1F linux/nbd.h 0xAC 00-1F linux/raw.h -0xAD 00 Netfilter device in development: - -0xAE 00-1F linux/kvm.h Kernel-based Virtual Machine - -0xAE 40-FF linux/kvm.h Kernel-based Virtual Machine - -0xAE 20-3F linux/nitro_enclaves.h Nitro Enclaves -0xAF 00-1F linux/fsl_hypervisor.h Freescale hypervisor -0xB0 all RATIO devices in development: - -0xB1 00-1F PPPoX - -0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API - -0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API - -0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API - -0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API - -0xB2 08 powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API - +0xAD 00 Netfilter device in development: + +0xAE 00-1F linux/kvm.h Kernel-based Virtual Machine + +0xAE 40-FF linux/kvm.h Kernel-based Virtual Machine + +0xAE 20-3F linux/nitro_enclaves.h Nitro Enclaves +0xAF 00-1F linux/fsl_hypervisor.h Freescale hypervisor +0xB0 all RATIO devices in development: + +0xB1 00-1F PPPoX + +0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API + +0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API + +0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API + +0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API + +0xB2 08 arch/powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API + 0xB3 00 linux/mmc/ioctl.h -0xB4 00-0F linux/gpio.h -0xB5 00-0F uapi/linux/rpmsg.h +0xB4 00-0F linux/gpio.h +0xB5 00-0F uapi/linux/rpmsg.h 0xB6 all linux/fpga-dfl.h -0xB7 all uapi/linux/remoteproc_cdev.h -0xB7 all uapi/linux/nsfs.h > -0xB8 01-02 uapi/misc/mrvl_cn10k_dpi.h Marvell CN10K DPI driver -0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver - +0xB7 all uapi/linux/remoteproc_cdev.h +0xB7 all uapi/linux/nsfs.h > +0xB8 01-02 uapi/misc/mrvl_cn10k_dpi.h Marvell CN10K DPI driver +0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver + 0xC0 00-0F linux/usb/iowarrior.h -0xCA 00-0F uapi/misc/cxl.h Dead since 6.15 +0xCA 00-0F uapi/misc/cxl.h Dead since 6.15 0xCA 10-2F uapi/misc/ocxl.h -0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15 -0xCB 00-1F CBM serial IEC bus in development: - -0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver -0xCD 01 linux/reiserfs_fs.h Dead since 6.13 -0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices +0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15 +0xCB 00-1F CBM serial IEC bus in development: + +0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver +0xCD 01 linux/reiserfs_fs.h Dead since 6.13 +0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices 0xCF 02 fs/smb/client/cifs_ioctl.h 0xDB 00-0F drivers/char/mwave/mwavepub.h -0xDD 00-3F ZFCP device driver see drivers/s390/scsi/ - +0xDD 00-3F ZFCP device driver see drivers/s390/scsi/ + 0xE5 00-3F linux/fuse.h -0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver -0xEE 00-09 uapi/linux/pfrut.h Platform Firmware Runtime Update and Telemetry -0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) - -0xF6 all LTTng Linux Trace Toolkit Next Generation - -0xF8 all arch/x86/include/uapi/asm/amd_hsmp.h AMD HSMP EPYC system management interface driver - -0xF9 00-0F uapi/misc/amd-apml.h AMD side band system management interface driver - +0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver +0xEE 00-09 uapi/linux/pfrut.h Platform Firmware Runtime Update and Telemetry +0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) + +0xF6 all LTTng Linux Trace Toolkit Next Generation + +0xF8 all arch/x86/include/uapi/asm/amd_hsmp.h AMD HSMP EPYC system management interface driver + +0xF9 00-0F uapi/misc/amd-apml.h AMD side band system management interface driver + 0xFD all linux/dm-ioctl.h 0xFE all linux/isst_if.h -==== ===== ======================================================= ================================================================ +==== ===== ========================================================= ================================================================ diff --git a/Documentation/userspace-api/sysfs-platform_profile.rst b/Documentation/userspace-api/sysfs-platform_profile.rst index 7f013356118a..6613e188242a 100644 --- a/Documentation/userspace-api/sysfs-platform_profile.rst +++ b/Documentation/userspace-api/sysfs-platform_profile.rst @@ -18,9 +18,9 @@ API for selecting the platform profile of these automatic mechanisms. Note that this API is only for selecting the platform profile, it is NOT a goal of this API to allow monitoring the resulting performance characteristics. Monitoring performance is best done with device/vendor -specific tools such as e.g. turbostat. +specific tools, e.g. turbostat. -Specifically when selecting a high performance profile the actual achieved +Specifically, when selecting a high performance profile the actual achieved performance may be limited by various factors such as: the heat generated by other components, room temperature, free air flow at the bottom of a laptop, etc. It is explicitly NOT a goal of this API to let userspace know @@ -44,7 +44,7 @@ added. Drivers which wish to introduce new profile names must: "Custom" profile support ======================== The platform_profile class also supports profiles advertising a "custom" -profile. This is intended to be set by drivers when the setttings in the +profile. This is intended to be set by drivers when the settings in the driver have been modified in a way that a standard profile doesn't represent the current state. diff --git a/MAINTAINERS b/MAINTAINERS index 4f03e230f3c5..b0845415ddc0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -158,7 +158,7 @@ S: Maintained W: http://github.com/v9fs Q: http://patchwork.kernel.org/project/v9fs-devel/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git -T: git git://github.com/martinetd/linux.git +T: git https://github.com/martinetd/linux.git F: Documentation/filesystems/9p.rst F: fs/9p/ F: include/net/9p/ @@ -2598,7 +2598,7 @@ M: Hans Ulli Kroll M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/ulli-kroll/linux.git +T: git https://github.com/ulli-kroll/linux.git F: Documentation/devicetree/bindings/arm/gemini.yaml F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt @@ -2805,7 +2805,7 @@ M: Vladimir Zapolskiy M: Piotr Wojtaszczyk L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/vzapolskiy/linux-lpc32xx.git +T: git https://github.com/vzapolskiy/linux-lpc32xx.git F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml F: arch/arm/boot/dts/nxp/lpc/lpc32* F: arch/arm/mach-lpc32xx/ @@ -2979,7 +2979,7 @@ M: Romain Perier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://linux-chenxing.org/ -T: git git://github.com/linux-chenxing/linux.git +T: git https://github.com/linux-chenxing/linux.git F: Documentation/devicetree/bindings/arm/mstar/* F: Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml F: Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml @@ -3909,7 +3909,7 @@ ATHEROS 71XX/9XXX GPIO DRIVER M: Alban Bedel S: Maintained W: https://github.com/AlbanBedel/linux -T: git git://github.com/AlbanBedel/linux +T: git https://github.com/AlbanBedel/linux.git F: Documentation/devicetree/bindings/gpio/qca,ar7100-gpio.yaml F: drivers/gpio/gpio-ath79.c @@ -3917,7 +3917,7 @@ ATHEROS 71XX/9XXX USB PHY DRIVER M: Alban Bedel S: Maintained W: https://github.com/AlbanBedel/linux -T: git git://github.com/AlbanBedel/linux +T: git https://github.com/AlbanBedel/linux.git F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt F: drivers/phy/qualcomm/phy-ath79-usb.c @@ -3982,7 +3982,7 @@ F: drivers/net/ethernet/cadence/ ATMEL MAXTOUCH DRIVER M: Nick Dyer S: Maintained -T: git git://github.com/ndyer/linux.git +T: git https://github.com/ndyer/linux.git F: Documentation/devicetree/bindings/input/atmel,maxtouch.yaml F: drivers/input/touchscreen/atmel_mxt_ts.c @@ -19919,7 +19919,7 @@ L: linux-pm@vger.kernel.org S: Supported W: https://01.org/pm-graph B: https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools -T: git git://github.com/intel/pm-graph +T: git https://github.com/intel/pm-graph.git F: tools/power/pm-graph PM6764TR DRIVER @@ -20310,8 +20310,8 @@ M: Haojian Zhuang M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/hzhuang1/linux.git -T: git git://github.com/rjarzmik/linux.git +T: git https://github.com/hzhuang1/linux.git +T: git https://github.com/rjarzmik/linux.git F: arch/arm/boot/dts/intel/pxa/ F: arch/arm/mach-pxa/ F: drivers/dma/pxa* @@ -23117,7 +23117,7 @@ M: Casey Schaufler L: linux-security-module@vger.kernel.org S: Maintained W: http://schaufler-ca.com -T: git git://github.com/cschaufler/smack-next +T: git https://github.com/cschaufler/smack-next.git F: Documentation/admin-guide/LSM/Smack.rst F: security/smack/ @@ -25458,7 +25458,7 @@ TRADITIONAL CHINESE DOCUMENTATION M: Hu Haowen <2023002089@link.tyut.edu.cn> S: Maintained W: https://github.com/srcres258/linux-doc -T: git git://github.com/srcres258/linux-doc.git doc-zh-tw +T: git https://github.com/srcres258/linux-doc.git doc-zh-tw F: Documentation/translations/zh_TW/ TRIGGER SOURCE - ADI UTIL SIGMA DELTA SPI diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 06c4de602b2f..7d40b51933d1 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -60,6 +60,14 @@ static inline struct dma_pool *dma_pool_create(const char *name, NUMA_NO_NODE); } +/** + * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory. + * @pool: dma pool that will produce the block + * @mem_flags: GFP_* bitmask + * @handle: pointer to dma address of block + * + * Same as dma_pool_alloc(), but the returned memory is zeroed. + */ static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { diff --git a/mm/dmapool.c b/mm/dmapool.c index 5be8cc1c6529..5d8af6e29127 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -200,7 +200,7 @@ static void pool_block_push(struct dma_pool *pool, struct dma_block *block, /** - * dma_pool_create_node - Creates a pool of consistent memory blocks, for dma. + * dma_pool_create_node - Creates a pool of coherent DMA memory blocks. * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. @@ -210,7 +210,7 @@ static void pool_block_push(struct dma_pool *pool, struct dma_block *block, * Context: not in_interrupt() * * Given one of these pools, dma_pool_alloc() - * may be used to allocate memory. Such memory will all have "consistent" + * may be used to allocate memory. Such memory will all have coherent * DMA mappings, accessible by the device and its driver without using * cache flushing primitives. The actual size of blocks allocated may be * larger than requested because of alignment. @@ -395,7 +395,7 @@ void dma_pool_destroy(struct dma_pool *pool) EXPORT_SYMBOL(dma_pool_destroy); /** - * dma_pool_alloc - get a block of consistent memory + * dma_pool_alloc - get a block of coherent memory * @pool: dma pool that will produce the block * @mem_flags: GFP_* bitmask * @handle: pointer to dma address of block diff --git a/scripts/checktransupdate.py b/scripts/checktransupdate.py index 578c3fecfdfd..e39529e46c3d 100755 --- a/scripts/checktransupdate.py +++ b/scripts/checktransupdate.py @@ -24,6 +24,7 @@ commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs") """ import os +import re import time import logging from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction @@ -69,6 +70,38 @@ def get_origin_from_trans(origin_path, t_from_head): return o_from_t +def get_origin_from_trans_smartly(origin_path, t_from_head): + """Get the latest origin commit from the formatted translation commit: + (1) update to commit HASH (TITLE) + (2) Update the translation through commit HASH (TITLE) + """ + # catch flag for 12-bit commit hash + HASH = r'([0-9a-f]{12})' + # pattern 1: contains "update to commit HASH" + pat_update_to = re.compile(rf'update to commit {HASH}') + # pattern 2: contains "Update the translation through commit HASH" + pat_update_translation = re.compile(rf'Update the translation through commit {HASH}') + + origin_commit_hash = None + for line in t_from_head["message"]: + # check if the line matches the first pattern + match = pat_update_to.search(line) + if match: + origin_commit_hash = match.group(1) + break + # check if the line matches the second pattern + match = pat_update_translation.search(line) + if match: + origin_commit_hash = match.group(1) + break + if origin_commit_hash is None: + return None + o_from_t = get_latest_commit_from(origin_path, origin_commit_hash) + if o_from_t is not None: + logging.debug("tracked origin commit id: %s", o_from_t["hash"]) + return o_from_t + + def get_commits_count_between(opath, commit1, commit2): """Get the commits count between two commits for the specified file""" command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}" @@ -108,7 +141,10 @@ def check_per_file(file_path): logging.error("Cannot find the latest commit for %s", file_path) return - o_from_t = get_origin_from_trans(opath, t_from_head) + o_from_t = get_origin_from_trans_smartly(opath, t_from_head) + # notice, o_from_t from get_*_smartly() is always more accurate than from get_*() + if o_from_t is None: + o_from_t = get_origin_from_trans(opath, t_from_head) if o_from_t is None: logging.error("Error: Cannot find the latest origin commit for %s", file_path) diff --git a/scripts/kernel-doc.py b/scripts/kernel-doc.py index 12ae66f40bd7..fc3d46ef519f 100755 --- a/scripts/kernel-doc.py +++ b/scripts/kernel-doc.py @@ -271,6 +271,16 @@ def main(): logger.addHandler(handler) + python_ver = sys.version_info[:2] + if python_ver < (3,6): + logger.warning("Python 3.6 or later is required by kernel-doc") + + # Return 0 here to avoid breaking compilation + sys.exit(0) + + if python_ver < (3,7): + logger.warning("Python 3.7 or later is required for correct results") + if args.man: out_style = ManFormat(modulename=args.modulename) elif args.none: diff --git a/scripts/lib/kdoc/kdoc_files.py b/scripts/lib/kdoc/kdoc_files.py index 9be4a64df71d..9e09b45b02fa 100644 --- a/scripts/lib/kdoc/kdoc_files.py +++ b/scripts/lib/kdoc/kdoc_files.py @@ -275,8 +275,8 @@ class KernelFiles(): self.config.log.warning("No kernel-doc for file %s", fname) continue - for name, arg in self.results[fname]: - m = self.out_msg(fname, name, arg) + for arg in self.results[fname]: + m = self.out_msg(fname, arg.name, arg) if m is None: ln = arg.get("ln", 0) diff --git a/scripts/lib/kdoc/kdoc_item.py b/scripts/lib/kdoc/kdoc_item.py new file mode 100644 index 000000000000..b3b225764550 --- /dev/null +++ b/scripts/lib/kdoc/kdoc_item.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# A class that will, eventually, encapsulate all of the parsed data that we +# then pass into the output modules. +# + +class KdocItem: + def __init__(self, name, type, start_line, **other_stuff): + self.name = name + self.type = type + self.declaration_start_line = start_line + self.sections = {} + self.sections_start_lines = {} + self.parameterlist = [] + self.parameterdesc_start_lines = [] + self.parameterdescs = {} + self.parametertypes = {} + # + # Just save everything else into our own dict so that the output + # side can grab it directly as before. As we move things into more + # structured data, this will, hopefully, fade away. + # + self.other_stuff = other_stuff + + def get(self, key, default = None): + return self.other_stuff.get(key, default) + + def __getitem__(self, key): + return self.get(key) + + # + # Tracking of section and parameter information. + # + def set_sections(self, sections, start_lines): + self.sections = sections + self.section_start_lines = start_lines + + def set_params(self, names, descs, types, starts): + self.parameterlist = names + self.parameterdescs = descs + self.parametertypes = types + self.parameterdesc_start_lines = starts diff --git a/scripts/lib/kdoc/kdoc_output.py b/scripts/lib/kdoc/kdoc_output.py index 86102e628d91..ea8914537ba0 100644 --- a/scripts/lib/kdoc/kdoc_output.py +++ b/scripts/lib/kdoc/kdoc_output.py @@ -124,9 +124,7 @@ class OutputFormat: Output warnings for identifiers that will be displayed. """ - warnings = args.get('warnings', []) - - for log_msg in warnings: + for log_msg in args.warnings: self.config.warning(log_msg) def check_doc(self, name, args): @@ -184,7 +182,7 @@ class OutputFormat: self.data = "" - dtype = args.get('type', "") + dtype = args.type if dtype == "doc": self.out_doc(fname, name, args) @@ -338,12 +336,7 @@ class RestFormat(OutputFormat): starts by putting out the name of the doc section itself, but that tends to duplicate a header already in the template file. """ - - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - section_start_lines = args.get('section_start_lines', {}) - - for section in sectionlist: + for section, text in args.sections.items(): # Skip sections that are in the nosymbol_table if section in self.nosymbol: continue @@ -355,8 +348,8 @@ class RestFormat(OutputFormat): else: self.data += f'{self.lineprefix}**{section}**\n\n' - self.print_lineno(section_start_lines.get(section, 0)) - self.output_highlight(sections[section]) + self.print_lineno(args.section_start_lines.get(section, 0)) + self.output_highlight(text) self.data += "\n" self.data += "\n" @@ -372,24 +365,19 @@ class RestFormat(OutputFormat): func_macro = args.get('func_macro', False) if func_macro: - signature = args['function'] + signature = name else: if args.get('functiontype'): signature = args['functiontype'] + " " - signature += args['function'] + " (" - - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - parameterdesc_start_lines = args.get('parameterdesc_start_lines', {}) - - ln = args.get('declaration_start_line', 0) + signature += name + " (" + ln = args.declaration_start_line count = 0 - for parameter in parameterlist: + for parameter in args.parameterlist: if count != 0: signature += ", " count += 1 - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if function_pointer.search(dtype): signature += function_pointer.group(1) + parameter + function_pointer.group(3) @@ -401,7 +389,7 @@ class RestFormat(OutputFormat): self.print_lineno(ln) if args.get('typedef') or not args.get('functiontype'): - self.data += f".. c:macro:: {args['function']}\n\n" + self.data += f".. c:macro:: {name}\n\n" if args.get('typedef'): self.data += " **Typedef**: " @@ -424,26 +412,26 @@ class RestFormat(OutputFormat): # function prototypes apart self.lineprefix = " " - if parameterlist: + if args.parameterlist: self.data += ".. container:: kernelindent\n\n" self.data += f"{self.lineprefix}**Parameters**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = KernRe(r'\[.*').sub('', parameter) - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if dtype: self.data += f"{self.lineprefix}``{dtype}``\n" else: self.data += f"{self.lineprefix}``{parameter}``\n" - self.print_lineno(parameterdesc_start_lines.get(parameter_name, 0)) + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) self.lineprefix = " " - if parameter_name in parameterdescs and \ - parameterdescs[parameter_name] != KernelDoc.undescribed: + if parameter_name in args.parameterdescs and \ + args.parameterdescs[parameter_name] != KernelDoc.undescribed: - self.output_highlight(parameterdescs[parameter_name]) + self.output_highlight(args.parameterdescs[parameter_name]) self.data += "\n" else: self.data += f"{self.lineprefix}*undescribed*\n\n" @@ -455,10 +443,7 @@ class RestFormat(OutputFormat): def out_enum(self, fname, name, args): oldprefix = self.lineprefix - name = args.get('enum', '') - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - ln = args.get('declaration_start_line', 0) + ln = args.declaration_start_line self.data += f"\n\n.. c:enum:: {name}\n\n" @@ -472,11 +457,11 @@ class RestFormat(OutputFormat): self.lineprefix = outer + " " self.data += f"{outer}**Constants**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: self.data += f"{outer}``{parameter}``\n" - if parameterdescs.get(parameter, '') != KernelDoc.undescribed: - self.output_highlight(parameterdescs[parameter]) + if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed: + self.output_highlight(args.parameterdescs[parameter]) else: self.data += f"{self.lineprefix}*undescribed*\n\n" self.data += "\n" @@ -487,8 +472,7 @@ class RestFormat(OutputFormat): def out_typedef(self, fname, name, args): oldprefix = self.lineprefix - name = args.get('typedef', '') - ln = args.get('declaration_start_line', 0) + ln = args.declaration_start_line self.data += f"\n\n.. c:type:: {name}\n\n" @@ -504,15 +488,10 @@ class RestFormat(OutputFormat): def out_struct(self, fname, name, args): - name = args.get('struct', "") purpose = args.get('purpose', "") declaration = args.get('definition', "") - dtype = args.get('type', "struct") - ln = args.get('declaration_start_line', 0) - - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - parameterdesc_start_lines = args.get('parameterdesc_start_lines', {}) + dtype = args.type + ln = args.declaration_start_line self.data += f"\n\n.. c:{dtype}:: {name}\n\n" @@ -536,21 +515,21 @@ class RestFormat(OutputFormat): self.lineprefix = " " self.data += f"{self.lineprefix}**Members**\n\n" - for parameter in parameterlist: + for parameter in args.parameterlist: if not parameter or parameter.startswith("#"): continue parameter_name = parameter.split("[", maxsplit=1)[0] - if parameterdescs.get(parameter_name) == KernelDoc.undescribed: + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: continue - self.print_lineno(parameterdesc_start_lines.get(parameter_name, 0)) + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) self.data += f"{self.lineprefix}``{parameter}``\n" self.lineprefix = " " - self.output_highlight(parameterdescs[parameter_name]) + self.output_highlight(args.parameterdescs[parameter_name]) self.lineprefix = " " self.data += "\n" @@ -636,46 +615,38 @@ class ManFormat(OutputFormat): self.data += line + "\n" def out_doc(self, fname, name, args): - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - if not self.check_doc(name, args): return self.data += f'.TH "{self.modulename}" 9 "{self.modulename}" "{self.man_date}" "API Manual" LINUX' + "\n" - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) def out_function(self, fname, name, args): """output function in man""" - parameterlist = args.get('parameterlist', []) - parameterdescs = args.get('parameterdescs', {}) - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - - self.data += f'.TH "{args["function"]}" 9 "{args["function"]}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n" + self.data += f'.TH "{name}" 9 "{name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"{args['function']} \\- {args['purpose']}\n" + self.data += f"{name} \\- {args['purpose']}\n" self.data += ".SH SYNOPSIS\n" if args.get('functiontype', ''): - self.data += f'.B "{args["functiontype"]}" {args["function"]}' + "\n" + self.data += f'.B "{args["functiontype"]}" {name}' + "\n" else: - self.data += f'.B "{args["function"]}' + "\n" + self.data += f'.B "{name}' + "\n" count = 0 parenth = "(" post = "," - for parameter in parameterlist: - if count == len(parameterlist) - 1: + for parameter in args.parameterlist: + if count == len(args.parameterlist) - 1: post = ");" - dtype = args['parametertypes'].get(parameter, "") + dtype = args.parametertypes.get(parameter, "") if function_pointer.match(dtype): # Pointer-to-function self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n" @@ -686,38 +657,32 @@ class ManFormat(OutputFormat): count += 1 parenth = "" - if parameterlist: + if args.parameterlist: self.data += ".SH ARGUMENTS\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = re.sub(r'\[.*', '', parameter) self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(parameterdescs.get(parameter_name, "")) + self.output_highlight(args.parameterdescs.get(parameter_name, "")) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section.upper()}"' + "\n" - self.output_highlight(sections[section]) + self.output_highlight(text) def out_enum(self, fname, name, args): - - name = args.get('enum', '') - parameterlist = args.get('parameterlist', []) - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - - self.data += f'.TH "{self.modulename}" 9 "enum {args["enum"]}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{self.modulename}" 9 "enum {name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"enum {args['enum']} \\- {args['purpose']}\n" + self.data += f"enum {name} \\- {args['purpose']}\n" self.data += ".SH SYNOPSIS\n" - self.data += f"enum {args['enum']}" + " {\n" + self.data += f"enum {name}" + " {\n" count = 0 - for parameter in parameterlist: + for parameter in args.parameterlist: self.data += f'.br\n.BI " {parameter}"' + "\n" - if count == len(parameterlist) - 1: + if count == len(args.parameterlist) - 1: self.data += "\n};\n" else: self.data += ", \n.br\n" @@ -726,68 +691,59 @@ class ManFormat(OutputFormat): self.data += ".SH Constants\n" - for parameter in parameterlist: + for parameter in args.parameterlist: parameter_name = KernRe(r'\[.*').sub('', parameter) self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(args['parameterdescs'].get(parameter_name, "")) + self.output_highlight(args.parameterdescs.get(parameter_name, "")) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections[section]) + self.output_highlight(text) def out_typedef(self, fname, name, args): module = self.modulename - typedef = args.get('typedef') purpose = args.get('purpose') - sectionlist = args.get('sectionlist', []) - sections = args.get('sections', {}) - self.data += f'.TH "{module}" 9 "{typedef}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{module}" 9 "{name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"typedef {typedef} \\- {purpose}\n" + self.data += f"typedef {name} \\- {purpose}\n" - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) def out_struct(self, fname, name, args): module = self.modulename - struct_type = args.get('type') - struct_name = args.get('struct') purpose = args.get('purpose') definition = args.get('definition') - sectionlist = args.get('sectionlist', []) - parameterlist = args.get('parameterlist', []) - sections = args.get('sections', {}) - parameterdescs = args.get('parameterdescs', {}) - self.data += f'.TH "{module}" 9 "{struct_type} {struct_name}" "{self.man_date}" "API Manual" LINUX' + "\n" + self.data += f'.TH "{module}" 9 "{args.type} {name}" "{self.man_date}" "API Manual" LINUX' + "\n" self.data += ".SH NAME\n" - self.data += f"{struct_type} {struct_name} \\- {purpose}\n" + self.data += f"{args.type} {name} \\- {purpose}\n" # Replace tabs with two spaces and handle newlines declaration = definition.replace("\t", " ") declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration) self.data += ".SH SYNOPSIS\n" - self.data += f"{struct_type} {struct_name} " + "{" + "\n.br\n" + self.data += f"{args.type} {name} " + "{" + "\n.br\n" self.data += f'.BI "{declaration}\n' + "};\n.br\n\n" self.data += ".SH Members\n" - for parameter in parameterlist: + for parameter in args.parameterlist: if parameter.startswith("#"): continue parameter_name = re.sub(r"\[.*", "", parameter) - if parameterdescs.get(parameter_name) == KernelDoc.undescribed: + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: continue self.data += f'.IP "{parameter}" 12' + "\n" - self.output_highlight(parameterdescs.get(parameter_name)) + self.output_highlight(args.parameterdescs.get(parameter_name)) - for section in sectionlist: + for section, text in args.sections.items(): self.data += f'.SH "{section}"' + "\n" - self.output_highlight(sections.get(section)) + self.output_highlight(text) diff --git a/scripts/lib/kdoc/kdoc_parser.py b/scripts/lib/kdoc/kdoc_parser.py index 3115558925ac..fe730099eca8 100644 --- a/scripts/lib/kdoc/kdoc_parser.py +++ b/scripts/lib/kdoc/kdoc_parser.py @@ -12,11 +12,12 @@ Read a C language source or header FILE and extract embedded documentation comments """ +import sys import re from pprint import pformat from kdoc_re import NestedMatch, KernRe - +from kdoc_item import KdocItem # # Regular expressions used to parse kernel-doc markups at KernelDoc class. @@ -42,12 +43,13 @@ doc_decl = doc_com + KernRe(r'(\w+)', cache=False) # @{section-name}: # while trying to not match literal block starts like "example::" # +known_section_names = 'description|context|returns?|notes?|examples?' +known_sections = KernRe(known_section_names, flags = re.I) doc_sect = doc_com + \ - KernRe(r'\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:([^:].*)?$', - flags=re.I, cache=False) + KernRe(r'\s*(\@[.\w]+|\@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$', + flags=re.I, cache=False) doc_content = doc_com_body + KernRe(r'(.*)', cache=False) -doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False) doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False) doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False) doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False) @@ -60,6 +62,25 @@ export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+" type_param = KernRe(r"\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False) +# +# Tests for the beginning of a kerneldoc block in its various forms. +# +doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False) +doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False) +doc_begin_func = KernRe(str(doc_com) + # initial " * ' + r"(?:\w+\s*\*\s*)?" + # type (not captured) + r'(?:define\s+)?' + # possible "define" (not captured) + r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)" + r'(?:[-:].*)?$', # description (not captured) + cache = False) + +# +# A little helper to get rid of excess white space +# +multi_space = KernRe(r'\s\s+') +def trim_whitespace(s): + return multi_space.sub(' ', s.strip()) + class state: """ State machine enums @@ -68,40 +89,26 @@ class state: # Parser states NORMAL = 0 # normal code NAME = 1 # looking for function name - BODY_MAYBE = 2 # body - or maybe more description + DECLARATION = 2 # We have seen a declaration which might not be done BODY = 3 # the body of the comment - BODY_WITH_BLANK_LINE = 4 # the body which has a blank line + SPECIAL_SECTION = 4 # doc section ending with a blank line PROTO = 5 # scanning prototype DOCBLOCK = 6 # documentation block - INLINE = 7 # gathering doc outside main block + INLINE_NAME = 7 # gathering doc outside main block + INLINE_TEXT = 8 # reading the body of inline docs name = [ "NORMAL", "NAME", - "BODY_MAYBE", + "DECLARATION", "BODY", - "BODY_WITH_BLANK_LINE", + "SPECIAL_SECTION", "PROTO", "DOCBLOCK", - "INLINE", + "INLINE_NAME", + "INLINE_TEXT", ] - # Inline documentation state - INLINE_NA = 0 # not applicable ($state != INLINE) - INLINE_NAME = 1 # looking for member name (@foo:) - INLINE_TEXT = 2 # looking for member documentation - INLINE_END = 3 # done - INLINE_ERROR = 4 # error - Comment without header was found. - # Spit a warning as it's not - # proper kernel-doc and ignore the rest. - - inline_name = [ - "", - "_NAME", - "_TEXT", - "_END", - "_ERROR", - ] SECTION_DEFAULT = "Description" # default section @@ -110,10 +117,7 @@ class KernelEntry: def __init__(self, config, ln): self.config = config - self.contents = "" - self.function = "" - self.sectcheck = "" - self.struct_actual = "" + self._contents = [] self.prototype = "" self.warnings = [] @@ -124,7 +128,6 @@ class KernelEntry: self.parameterdesc_start_lines = {} self.section_start_lines = {} - self.sectionlist = [] self.sections = {} self.anon_struct_union = False @@ -133,10 +136,17 @@ class KernelEntry: # State flags self.brcount = 0 - - self.in_doc_sect = False self.declaration_start_line = ln + 1 + # + # Management of section contents + # + def add_text(self, text): + self._contents.append(text) + + def contents(self): + return '\n'.join(self._contents) + '\n' + # TODO: rename to emit_message after removal of kernel-doc.pl def emit_msg(self, log_msg, warning=True): """Emit a message""" @@ -151,13 +161,27 @@ class KernelEntry: self.warnings.append(log_msg) return + # + # Begin a new section. + # + def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False): + if dump: + self.dump_section(start_new = True) + self.section = title + self.new_start_line = line_no + def dump_section(self, start_new=True): """ Dumps section contents to arrays/hashes intended for that purpose. """ - + # + # If we have accumulated no contents in the default ("description") + # section, don't bother. + # + if self.section == SECTION_DEFAULT and not self._contents: + return name = self.section - contents = self.contents + contents = self.contents() if type_param.match(name): name = type_param.group(1) @@ -165,14 +189,6 @@ class KernelEntry: self.parameterdescs[name] = contents self.parameterdesc_start_lines[name] = self.new_start_line - self.sectcheck += name + " " - self.new_start_line = 0 - - elif name == "@...": - name = "..." - self.parameterdescs[name] = contents - self.sectcheck += name + " " - self.parameterdesc_start_lines[name] = self.new_start_line self.new_start_line = 0 else: @@ -181,10 +197,10 @@ class KernelEntry: if name != SECTION_DEFAULT: self.emit_msg(self.new_start_line, f"duplicate section name '{name}'\n") - self.sections[name] += contents + # Treat as a new paragraph - add a blank line + self.sections[name] += '\n' + contents else: self.sections[name] = contents - self.sectionlist.append(name) self.section_start_lines[name] = self.new_start_line self.new_start_line = 0 @@ -192,7 +208,7 @@ class KernelEntry: if start_new: self.section = SECTION_DEFAULT - self.contents = "" + self._contents = [] class KernelDoc: @@ -203,7 +219,6 @@ class KernelDoc: # Section names - section_intro = "Introduction" section_context = "Context" section_return = "Return" @@ -217,7 +232,6 @@ class KernelDoc: # Initial state for the state machines self.state = state.NORMAL - self.inline_doc_state = state.INLINE_NA # Store entry currently being processed self.entry = None @@ -225,6 +239,14 @@ class KernelDoc: # Place all potential outputs into an array self.entries = [] + # + # We need Python 3.7 for its "dicts remember the insertion + # order" guarantee + # + if sys.version_info.major == 3 and sys.version_info.minor < 7: + self.emit_msg(0, + 'Python 3.7 or later is required for correct results') + def emit_msg(self, ln, msg, warning=True): """Emit a message""" @@ -255,32 +277,20 @@ class KernelDoc: The actual output and output filters will be handled elsewhere """ - # The implementation here is different than the original kernel-doc: - # instead of checking for output filters or actually output anything, - # it just stores the declaration content at self.entries, as the - # output will happen on a separate class. - # - # For now, we're keeping the same name of the function just to make - # easier to compare the source code of both scripts - - args["declaration_start_line"] = self.entry.declaration_start_line - args["type"] = dtype - args["warnings"] = self.entry.warnings - - # TODO: use colletions.OrderedDict to remove sectionlist - - sections = args.get('sections', {}) - sectionlist = args.get('sectionlist', []) + item = KdocItem(name, dtype, self.entry.declaration_start_line, **args) + item.warnings = self.entry.warnings # Drop empty sections # TODO: improve empty sections logic to emit warnings + sections = self.entry.sections for section in ["Description", "Return"]: - if section in sectionlist: - if not sections[section].rstrip(): - del sections[section] - sectionlist.remove(section) - - self.entries.append((name, args)) + if section in sections and not sections[section].rstrip(): + del sections[section] + item.set_sections(sections, self.entry.section_start_lines) + item.set_params(self.entry.parameterlist, self.entry.parameterdescs, + self.entry.parametertypes, + self.entry.parameterdesc_start_lines) + self.entries.append(item) self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args)) @@ -294,7 +304,6 @@ class KernelDoc: # State flags self.state = state.NORMAL - self.inline_doc_state = state.INLINE_NA def push_parameter(self, ln, decl_type, param, dtype, org_arg, declaration_name): @@ -367,15 +376,6 @@ class KernelDoc: org_arg = KernRe(r'\s\s+').sub(' ', org_arg) self.entry.parametertypes[param] = org_arg - def save_struct_actual(self, actual): - """ - Strip all spaces from the actual param so that it looks like - one string item. - """ - - actual = KernRe(r'\s*').sub("", actual, count=1) - - self.entry.struct_actual += actual + " " def create_parameter_list(self, ln, decl_type, args, splitter, declaration_name): @@ -421,7 +421,6 @@ class KernelDoc: param = arg dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) @@ -438,7 +437,6 @@ class KernelDoc: dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) @@ -471,7 +469,6 @@ class KernelDoc: param = r.group(1) - self.save_struct_actual(r.group(2)) self.push_parameter(ln, decl_type, r.group(2), f"{dtype} {r.group(1)}", arg, declaration_name) @@ -483,52 +480,27 @@ class KernelDoc: continue if dtype != "": # Skip unnamed bit-fields - self.save_struct_actual(r.group(1)) self.push_parameter(ln, decl_type, r.group(1), f"{dtype}:{r.group(2)}", arg, declaration_name) else: - self.save_struct_actual(param) self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name) - def check_sections(self, ln, decl_name, decl_type, sectcheck, prmscheck): + def check_sections(self, ln, decl_name, decl_type): """ Check for errors inside sections, emitting warnings if not found parameters are described. """ - - sects = sectcheck.split() - prms = prmscheck.split() - err = False - - for sx in range(len(sects)): # pylint: disable=C0200 - err = True - for px in range(len(prms)): # pylint: disable=C0200 - prm_clean = prms[px] - prm_clean = KernRe(r'\[.*\]').sub('', prm_clean) - prm_clean = attribute.sub('', prm_clean) - - # ignore array size in a parameter string; - # however, the original param string may contain - # spaces, e.g.: addr[6 + 2] - # and this appears in @prms as "addr[6" since the - # parameter list is split at spaces; - # hence just ignore "[..." for the sections check; - prm_clean = KernRe(r'\[.*').sub('', prm_clean) - - if prm_clean == sects[sx]: - err = False - break - - if err: + for section in self.entry.sections: + if section not in self.entry.parameterlist and \ + not known_sections.search(section): if decl_type == 'function': dname = f"{decl_type} parameter" else: dname = f"{decl_type} member" - self.emit_msg(ln, - f"Excess {dname} '{sects[sx]}' description in '{decl_name}'") + f"Excess {dname} '{section}' description in '{decl_name}'") def check_return_section(self, ln, declaration_name, return_type): """ @@ -783,8 +755,7 @@ class KernelDoc: self.create_parameter_list(ln, decl_type, members, ';', declaration_name) - self.check_sections(ln, declaration_name, decl_type, - self.entry.sectcheck, self.entry.struct_actual) + self.check_sections(ln, declaration_name, decl_type) # Adjust declaration for better display declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration) @@ -820,15 +791,7 @@ class KernelDoc: level += 1 self.output_declaration(decl_type, declaration_name, - struct=declaration_name, definition=declaration, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) def dump_enum(self, ln, proto): @@ -846,39 +809,48 @@ class KernelDoc: # Strip #define macros inside enums proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto) - members = None - declaration_name = None - + # + # Parse out the name and members of the enum. Typedef form first. + # r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;') if r.search(proto): declaration_name = r.group(2) members = r.group(1).rstrip() + # + # Failing that, look for a straight enum + # else: r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}') if r.match(proto): declaration_name = r.group(1) members = r.group(2).rstrip() - - if not members: - self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") - return - + # + # OK, this isn't going to work. + # + else: + self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") + return + # + # Make sure we found what we were expecting. + # if self.entry.identifier != declaration_name: if self.entry.identifier == "": self.emit_msg(ln, f"{proto}: wrong kernel-doc identifier on prototype") else: self.emit_msg(ln, - f"expecting prototype for enum {self.entry.identifier}. Prototype was for enum {declaration_name} instead") + f"expecting prototype for enum {self.entry.identifier}. " + f"Prototype was for enum {declaration_name} instead") return if not declaration_name: declaration_name = "(anonymous)" - + # + # Parse out the name of each enum member, and verify that we + # have a description for it. + # member_set = set() - - members = KernRe(r'\([^;]*?[\)]').sub('', members) - + members = KernRe(r'\([^;)]*\)').sub('', members) for arg in members.split(','): if not arg: continue @@ -889,20 +861,15 @@ class KernelDoc: self.emit_msg(ln, f"Enum value '{arg}' not described in enum '{declaration_name}'") member_set.add(arg) - + # + # Ensure that every described member actually exists in the enum. + # for k in self.entry.parameterdescs: if k not in member_set: self.emit_msg(ln, f"Excess enum value '%{k}' description in '{declaration_name}'") self.output_declaration('enum', declaration_name, - enum=declaration_name, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) def dump_declaration(self, ln, prototype): @@ -912,18 +879,13 @@ class KernelDoc: if self.entry.decl_type == "enum": self.dump_enum(ln, prototype) - return - - if self.entry.decl_type == "typedef": + elif self.entry.decl_type == "typedef": self.dump_typedef(ln, prototype) - return - - if self.entry.decl_type in ["union", "struct"]: + elif self.entry.decl_type in ["union", "struct"]: self.dump_struct(ln, prototype) - return - - self.output_declaration(self.entry.decl_type, prototype, - entry=self.entry) + else: + # This would be a bug + self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}') def dump_function(self, ln, prototype): """ @@ -1057,38 +1019,20 @@ class KernelDoc: f"expecting prototype for {self.entry.identifier}(). Prototype was for {declaration_name}() instead") return - prms = " ".join(self.entry.parameterlist) - self.check_sections(ln, declaration_name, "function", - self.entry.sectcheck, prms) + self.check_sections(ln, declaration_name, "function") self.check_return_section(ln, declaration_name, return_type) if 'typedef' in return_type: self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=True, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose, func_macro=func_macro) else: self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=False, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose, func_macro=func_macro) @@ -1125,16 +1069,8 @@ class KernelDoc: self.create_parameter_list(ln, decl_type, args, ',', declaration_name) self.output_declaration(decl_type, declaration_name, - function=declaration_name, typedef=True, functiontype=return_type, - parameterlist=self.entry.parameterlist, - parameterdescs=self.entry.parameterdescs, - parametertypes=self.entry.parametertypes, - parameterdesc_start_lines=self.entry.parameterdesc_start_lines, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) return @@ -1154,10 +1090,6 @@ class KernelDoc: return self.output_declaration('typedef', declaration_name, - typedef=declaration_name, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines, purpose=self.entry.declaration_purpose) return @@ -1172,17 +1104,28 @@ class KernelDoc: with a staticmethod decorator. """ + # We support documenting some exported symbols with different + # names. A horrible hack. + suffixes = [ '_noprof' ] + # Note: it accepts only one EXPORT_SYMBOL* per line, as having # multiple export lines would violate Kernel coding style. if export_symbol.search(line): symbol = export_symbol.group(2) - function_set.add(symbol) - return - - if export_symbol_ns.search(line): + elif export_symbol_ns.search(line): symbol = export_symbol_ns.group(2) - function_set.add(symbol) + else: + return False + # + # Found an export, trim out any special suffixes + # + for suffix in suffixes: + # Be backward compatible with Python < 3.9 + if symbol.endswith(suffix): + symbol = symbol[:-len(suffix)] + function_set.add(symbol) + return True def process_normal(self, ln, line): """ @@ -1194,7 +1137,6 @@ class KernelDoc: # start a new entry self.reset_state(ln) - self.entry.in_doc_sect = False # next line is always the function name self.state = state.NAME @@ -1203,80 +1145,60 @@ class KernelDoc: """ STATE_NAME: Looking for the "name - description" line """ - + # + # Check for a DOC: block and handle them specially. + # if doc_block.search(line): - self.entry.new_start_line = ln if not doc_block.group(1): - self.entry.section = self.section_intro + self.entry.begin_section(ln, "Introduction") else: - self.entry.section = doc_block.group(1) + self.entry.begin_section(ln, doc_block.group(1)) self.entry.identifier = self.entry.section self.state = state.DOCBLOCK - return - - if doc_decl.search(line): + # + # Otherwise we're looking for a normal kerneldoc declaration line. + # + elif doc_decl.search(line): self.entry.identifier = doc_decl.group(1) - self.entry.is_kernel_comment = False - - decl_start = str(doc_com) # comment block asterisk - fn_type = r"(?:\w+\s*\*\s*)?" # type (for non-functions) - parenthesis = r"(?:\(\w*\))?" # optional parenthesis on function - decl_end = r"(?:[-:].*)" # end of the name part - - # test for pointer declaration type, foo * bar() - desc - r = KernRe(fr"^{decl_start}([\w\s]+?){parenthesis}?\s*{decl_end}?$") - if r.search(line): - self.entry.identifier = r.group(1) # Test for data declaration - r = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)") - if r.search(line): - self.entry.decl_type = r.group(1) - self.entry.identifier = r.group(2) - self.entry.is_kernel_comment = True + if doc_begin_data.search(line): + self.entry.decl_type = doc_begin_data.group(1) + self.entry.identifier = doc_begin_data.group(2) + # + # Look for a function description + # + elif doc_begin_func.search(line): + self.entry.identifier = doc_begin_func.group(1) + self.entry.decl_type = "function" + # + # We struck out. + # else: - # Look for foo() or static void foo() - description; - # or misspelt identifier - - r1 = KernRe(fr"^{decl_start}{fn_type}(\w+)\s*{parenthesis}\s*{decl_end}?$") - r2 = KernRe(fr"^{decl_start}{fn_type}(\w+[^-:]*){parenthesis}\s*{decl_end}$") - - for r in [r1, r2]: - if r.search(line): - self.entry.identifier = r.group(1) - self.entry.decl_type = "function" - - r = KernRe(r"define\s+") - self.entry.identifier = r.sub("", self.entry.identifier) - self.entry.is_kernel_comment = True - break - - self.entry.identifier = self.entry.identifier.strip(" ") - - self.state = state.BODY - - # if there's no @param blocks need to set up default section here - self.entry.section = SECTION_DEFAULT - self.entry.new_start_line = ln + 1 - - r = KernRe("[-:](.*)") - if r.search(line): - # strip leading/trailing/multiple spaces - self.entry.descr = r.group(1).strip(" ") - - r = KernRe(r"\s+") - self.entry.descr = r.sub(" ", self.entry.descr) - self.entry.declaration_purpose = self.entry.descr - self.state = state.BODY_MAYBE - else: - self.entry.declaration_purpose = "" - - if not self.entry.is_kernel_comment: self.emit_msg(ln, f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}") self.state = state.NORMAL + return + # + # OK, set up for a new kerneldoc entry. + # + self.state = state.BODY + self.entry.identifier = self.entry.identifier.strip(" ") + # if there's no @param blocks need to set up default section here + self.entry.begin_section(ln + 1) + # + # Find the description portion, which *should* be there but + # isn't always. + # (We should be able to capture this from the previous parsing - someday) + # + r = KernRe("[-:](.*)") + if r.search(line): + self.entry.declaration_purpose = trim_whitespace(r.group(1)) + self.state = state.DECLARATION + else: + self.entry.declaration_purpose = "" if not self.entry.declaration_purpose and self.config.wshort_desc: self.emit_msg(ln, @@ -1291,60 +1213,51 @@ class KernelDoc: self.emit_msg(ln, f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}", warning=False) - - return - + # # Failed to find an identifier. Emit a warning - self.emit_msg(ln, f"Cannot find identifier on line:\n{line}") - - def process_body(self, ln, line): - """ - STATE_BODY and STATE_BODY_MAYBE: the bulk of a kerneldoc comment. - """ - - if self.state == state.BODY_WITH_BLANK_LINE: - r = KernRe(r"\s*\*\s?\S") - if r.match(line): - self.dump_section() - self.entry.section = SECTION_DEFAULT - self.entry.new_start_line = ln - self.entry.contents = "" + # + else: + self.emit_msg(ln, f"Cannot find identifier on line:\n{line}") + # + # Helper function to determine if a new section is being started. + # + def is_new_section(self, ln, line): if doc_sect.search(line): - self.entry.in_doc_sect = True + self.state = state.BODY + # + # Pick out the name of our new section, tweaking it if need be. + # newsection = doc_sect.group(1) - - if newsection.lower() in ["description", "context"]: - newsection = newsection.title() - - # Special case: @return is a section, not a param description - if newsection.lower() in ["@return", "@returns", - "return", "returns"]: + if newsection.lower() == 'description': + newsection = 'Description' + elif newsection.lower() == 'context': + newsection = 'Context' + self.state = state.SPECIAL_SECTION + elif newsection.lower() in ["@return", "@returns", + "return", "returns"]: newsection = "Return" - - # Perl kernel-doc has a check here for contents before sections. - # the logic there is always false, as in_doc_sect variable is - # always true. So, just don't implement Wcontents_before_sections - - # .title() + self.state = state.SPECIAL_SECTION + elif newsection[0] == '@': + self.state = state.SPECIAL_SECTION + # + # Initialize the contents, and get the new section going. + # newcontents = doc_sect.group(2) if not newcontents: newcontents = "" - - if self.entry.contents.strip("\n"): - self.dump_section() - - self.entry.new_start_line = ln - self.entry.section = newsection + self.dump_section() + self.entry.begin_section(ln, newsection) self.entry.leading_space = None - self.entry.contents = newcontents.lstrip() - if self.entry.contents: - self.entry.contents += "\n" - - self.state = state.BODY - return + self.entry.add_text(newcontents.lstrip()) + return True + return False + # + # Helper function to detect (and effect) the end of a kerneldoc comment. + # + def is_comment_end(self, ln, line): if doc_end.search(line): self.dump_section() @@ -1357,100 +1270,128 @@ class KernelDoc: self.entry.new_start_line = ln + 1 self.state = state.PROTO + return True + return False + + + def process_decl(self, ln, line): + """ + STATE_DECLARATION: We've seen the beginning of a declaration + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + # + # Look for anything with the " * " line beginning. + # + if doc_content.search(line): + cont = doc_content.group(1) + # + # A blank line means that we have moved out of the declaration + # part of the comment (without any "special section" parameter + # descriptions). + # + if cont == "": + self.state = state.BODY + # + # Otherwise we have more of the declaration section to soak up. + # + else: + self.entry.declaration_purpose = \ + trim_whitespace(self.entry.declaration_purpose + ' ' + cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") + + + def process_special(self, ln, line): + """ + STATE_SPECIAL_SECTION: a section ending with a blank line + """ + # + # If we have hit a blank line (only the " * " marker), then this + # section is done. + # + if KernRe(r"\s*\*\s*$").match(line): + self.entry.begin_section(ln, dump = True) + self.state = state.BODY + return + # + # Not a blank line, look for the other ways to end the section. + # + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + # + # OK, we should have a continuation of the text for this section. + # + if doc_content.search(line): + cont = doc_content.group(1) + # + # If the lines of text after the first in a special section have + # leading white space, we need to trim it out or Sphinx will get + # confused. For the second line (the None case), see what we + # find there and remember it. + # + if self.entry.leading_space is None: + r = KernRe(r'^(\s+)') + if r.match(cont): + self.entry.leading_space = len(r.group(1)) + else: + self.entry.leading_space = 0 + # + # Otherwise, before trimming any leading chars, be *sure* + # that they are white space. We should maybe warn if this + # isn't the case. + # + for i in range(0, self.entry.leading_space): + if cont[i] != " ": + self.entry.leading_space = i + break + # + # Add the trimmed result to the section and we're done. + # + self.entry.add_text(cont[self.entry.leading_space:]) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") + + def process_body(self, ln, line): + """ + STATE_BODY: the bulk of a kerneldoc comment. + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): return if doc_content.search(line): cont = doc_content.group(1) + self.entry.add_text(cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") - if cont == "": - if self.entry.section == self.section_context: - self.dump_section() + def process_inline_name(self, ln, line): + """STATE_INLINE_NAME: beginning of docbook comments within a prototype.""" - self.entry.new_start_line = ln - self.state = state.BODY - else: - if self.entry.section != SECTION_DEFAULT: - self.state = state.BODY_WITH_BLANK_LINE - else: - self.state = state.BODY + if doc_inline_sect.search(line): + self.entry.begin_section(ln, doc_inline_sect.group(1)) + self.entry.add_text(doc_inline_sect.group(2).lstrip()) + self.state = state.INLINE_TEXT + elif doc_inline_end.search(line): + self.dump_section() + self.state = state.PROTO + elif doc_content.search(line): + self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}") + self.state = state.PROTO + # else ... ?? - self.entry.contents += "\n" - - elif self.state == state.BODY_MAYBE: - - # Continued declaration purpose - self.entry.declaration_purpose = self.entry.declaration_purpose.rstrip() - self.entry.declaration_purpose += " " + cont - - r = KernRe(r"\s+") - self.entry.declaration_purpose = r.sub(' ', - self.entry.declaration_purpose) - - else: - if self.entry.section.startswith('@') or \ - self.entry.section == self.section_context: - if self.entry.leading_space is None: - r = KernRe(r'^(\s+)') - if r.match(cont): - self.entry.leading_space = len(r.group(1)) - else: - self.entry.leading_space = 0 - - # Double-check if leading space are realy spaces - pos = 0 - for i in range(0, self.entry.leading_space): - if cont[i] != " ": - break - pos += 1 - - cont = cont[pos:] - - # NEW LOGIC: - # In case it is different, update it - if self.entry.leading_space != pos: - self.entry.leading_space = pos - - self.entry.contents += cont + "\n" - return - - # Unknown line, ignore - self.emit_msg(ln, f"bad line: {line}") - - def process_inline(self, ln, line): - """STATE_INLINE: docbook comments within a prototype.""" - - if self.inline_doc_state == state.INLINE_NAME and \ - doc_inline_sect.search(line): - self.entry.section = doc_inline_sect.group(1) - self.entry.new_start_line = ln - - self.entry.contents = doc_inline_sect.group(2).lstrip() - if self.entry.contents != "": - self.entry.contents += "\n" - - self.inline_doc_state = state.INLINE_TEXT - # Documentation block end */ - return + def process_inline_text(self, ln, line): + """STATE_INLINE_TEXT: docbook comments within a prototype.""" if doc_inline_end.search(line): - if self.entry.contents not in ["", "\n"]: - self.dump_section() - + self.dump_section() self.state = state.PROTO - self.inline_doc_state = state.INLINE_NA - return - - if doc_content.search(line): - if self.inline_doc_state == state.INLINE_TEXT: - self.entry.contents += doc_content.group(1) + "\n" - if not self.entry.contents.strip(" ").rstrip("\n"): - self.entry.contents = "" - - elif self.inline_doc_state == state.INLINE_NAME: - self.emit_msg(ln, - f"Incorrect use of kernel-doc format: {line}") - - self.inline_doc_state = state.INLINE_ERROR + elif doc_content.search(line): + self.entry.add_text(doc_content.group(1)) + # else ... ?? def syscall_munge(self, ln, proto): # pylint: disable=W0613 """ @@ -1532,105 +1473,94 @@ class KernelDoc: """Ancillary routine to process a function prototype""" # strip C99-style comments to end of line - r = KernRe(r"\/\/.*$", re.S) - line = r.sub('', line) - + line = KernRe(r"\/\/.*$", re.S).sub('', line) + # + # Soak up the line's worth of prototype text, stopping at { or ; if present. + # if KernRe(r'\s*#\s*define').match(line): self.entry.prototype = line - elif line.startswith('#'): - # Strip other macros like #ifdef/#ifndef/#endif/... - pass - else: + elif not line.startswith('#'): # skip other preprocessor stuff r = KernRe(r'([^\{]*)') if r.match(line): self.entry.prototype += r.group(1) + " " - + # + # If we now have the whole prototype, clean it up and declare victory. + # if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line): - # strip comments - r = KernRe(r'/\*.*?\*/') - self.entry.prototype = r.sub('', self.entry.prototype) - - # strip newlines/cr's - r = KernRe(r'[\r\n]+') - self.entry.prototype = r.sub(' ', self.entry.prototype) - - # strip leading spaces - r = KernRe(r'^\s+') - self.entry.prototype = r.sub('', self.entry.prototype) - + # strip comments and surrounding spaces + self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip() + # # Handle self.entry.prototypes for function pointers like: # int (*pcs_config)(struct foo) - + # by turning it into + # int pcs_config(struct foo) + # r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)') self.entry.prototype = r.sub(r'\1\2', self.entry.prototype) - + # + # Handle special declaration syntaxes + # if 'SYSCALL_DEFINE' in self.entry.prototype: self.entry.prototype = self.syscall_munge(ln, self.entry.prototype) - - r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') - if r.search(self.entry.prototype): - self.entry.prototype = self.tracepoint_munge(ln, - self.entry.prototype) - + else: + r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') + if r.search(self.entry.prototype): + self.entry.prototype = self.tracepoint_munge(ln, + self.entry.prototype) + # + # ... and we're done + # self.dump_function(ln, self.entry.prototype) self.reset_state(ln) def process_proto_type(self, ln, line): """Ancillary routine to process a type""" - # Strip newlines/cr's. - line = KernRe(r'[\r\n]+', re.S).sub(' ', line) - - # Strip leading spaces - line = KernRe(r'^\s+', re.S).sub('', line) - - # Strip trailing spaces - line = KernRe(r'\s+$', re.S).sub('', line) - - # Strip C99-style comments to the end of the line - line = KernRe(r"\/\/.*$", re.S).sub('', line) + # Strip C99-style comments and surrounding whitespace + line = KernRe(r"//.*$", re.S).sub('', line).strip() + if not line: + return # nothing to see here # To distinguish preprocessor directive from regular declaration later. if line.startswith('#'): line += ";" - - r = KernRe(r'([^\{\};]*)([\{\};])(.*)') - while True: - if r.search(line): - if self.entry.prototype: - self.entry.prototype += " " - self.entry.prototype += r.group(1) + r.group(2) - - self.entry.brcount += r.group(2).count('{') - self.entry.brcount -= r.group(2).count('}') - - self.entry.brcount = max(self.entry.brcount, 0) - - if r.group(2) == ';' and self.entry.brcount == 0: + # + # Split the declaration on any of { } or ;, and accumulate pieces + # until we hit a semicolon while not inside {brackets} + # + r = KernRe(r'(.*?)([{};])') + for chunk in r.split(line): + if chunk: # Ignore empty matches + self.entry.prototype += chunk + # + # This cries out for a match statement ... someday after we can + # drop Python 3.9 ... + # + if chunk == '{': + self.entry.brcount += 1 + elif chunk == '}': + self.entry.brcount -= 1 + elif chunk == ';' and self.entry.brcount <= 0: self.dump_declaration(ln, self.entry.prototype) self.reset_state(ln) - break - - line = r.group(3) - else: - self.entry.prototype += line - break + return + # + # We hit the end of the line while still in the declaration; put + # in a space to represent the newline. + # + self.entry.prototype += ' ' def process_proto(self, ln, line): """STATE_PROTO: reading a function/whatever prototype.""" if doc_inline_oneline.search(line): - self.entry.section = doc_inline_oneline.group(1) - self.entry.contents = doc_inline_oneline.group(2) - - if self.entry.contents != "": - self.entry.contents += "\n" - self.dump_section(start_new=False) + self.entry.begin_section(ln, doc_inline_oneline.group(1)) + self.entry.add_text(doc_inline_oneline.group(2)) + self.dump_section() elif doc_inline_start.search(line): - self.state = state.INLINE - self.inline_doc_state = state.INLINE_NAME + self.state = state.INLINE_NAME elif self.entry.decl_type == 'function': self.process_proto_function(ln, line) @@ -1643,14 +1573,11 @@ class KernelDoc: if doc_end.search(line): self.dump_section() - self.output_declaration("doc", self.entry.identifier, - sectionlist=self.entry.sectionlist, - sections=self.entry.sections, - section_start_lines=self.entry.section_start_lines) + self.output_declaration("doc", self.entry.identifier) self.reset_state(ln) elif doc_content.search(line): - self.entry.contents += doc_content.group(1) + "\n" + self.entry.add_text(doc_content.group(1)) def parse_export(self): """ @@ -1671,6 +1598,22 @@ class KernelDoc: return export_table + # + # The state/action table telling us which function to invoke in + # each state. + # + state_actions = { + state.NORMAL: process_normal, + state.NAME: process_name, + state.BODY: process_body, + state.DECLARATION: process_decl, + state.SPECIAL_SECTION: process_special, + state.INLINE_NAME: process_inline_name, + state.INLINE_TEXT: process_inline_text, + state.PROTO: process_proto, + state.DOCBLOCK: process_docblock, + } + def parse_kdoc(self): """ Open and process each line of a C source file. @@ -1681,7 +1624,6 @@ class KernelDoc: Besides parsing kernel-doc tags, it also parses export symbols. """ - cont = False prev = "" prev_ln = None export_table = set() @@ -1697,23 +1639,18 @@ class KernelDoc: if self.state == state.PROTO: if line.endswith("\\"): prev += line.rstrip("\\") - cont = True - if not prev_ln: prev_ln = ln - continue - if cont: + if prev: ln = prev_ln line = prev + line prev = "" - cont = False prev_ln = None - self.config.log.debug("%d %s%s: %s", + self.config.log.debug("%d %s: %s", ln, state.name[self.state], - state.inline_name[self.inline_doc_state], line) # This is an optimization over the original script. @@ -1721,25 +1658,11 @@ class KernelDoc: # it was read twice. Here, we use the already-existing # loop to parse exported symbols as well. # - # TODO: It should be noticed that not all states are - # needed here. On a future cleanup, process export only - # at the states that aren't handling comment markups. - self.process_export(export_table, line) + if (self.state != state.NORMAL) or \ + not self.process_export(export_table, line): + # Hand this line to the appropriate state handler + self.state_actions[self.state](self, ln, line) - # Hand this line to the appropriate state handler - if self.state == state.NORMAL: - self.process_normal(ln, line) - elif self.state == state.NAME: - self.process_name(ln, line) - elif self.state in [state.BODY, state.BODY_MAYBE, - state.BODY_WITH_BLANK_LINE]: - self.process_body(ln, line) - elif self.state == state.INLINE: # scanning for inline parameters - self.process_inline(ln, line) - elif self.state == state.PROTO: - self.process_proto(ln, line) - elif self.state == state.DOCBLOCK: - self.process_docblock(ln, line) except OSError: self.config.log.error(f"Error: Cannot open file {self.fname}") diff --git a/scripts/lib/kdoc/kdoc_re.py b/scripts/lib/kdoc/kdoc_re.py index e81695b273bf..612223e1e723 100644 --- a/scripts/lib/kdoc/kdoc_re.py +++ b/scripts/lib/kdoc/kdoc_re.py @@ -29,12 +29,9 @@ class KernRe: """ Adds a new regex or re-use it from the cache. """ - - if string in re_cache: - self.regex = re_cache[string] - else: + self.regex = re_cache.get(string, None) + if not self.regex: self.regex = re.compile(string, flags=flags) - if self.cache: re_cache[string] = self.regex diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index ad9945ccb0cf..3f8d6925e896 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -245,6 +245,10 @@ sub check_missing_tex($) sub get_sphinx_fname() { + if ($ENV{'SPHINXBUILD'}) { + return $ENV{'SPHINXBUILD'}; + } + my $fname = "sphinx-build"; return $fname if findprog($fname); @@ -409,7 +413,7 @@ sub give_redhat_hints() my $old = 0; my $rel; my $noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"; - $rel = $1 if ($system_release =~ /release\s+(\d+)/); + $rel = $1 if ($system_release =~ /(release|Linux)\s+(\d+)/); if (!($system_release =~ /Fedora/)) { $map{"virtualenv"} = "python-virtualenv"; diff --git a/scripts/test_doc_build.py b/scripts/test_doc_build.py new file mode 100755 index 000000000000..47b4606569f9 --- /dev/null +++ b/scripts/test_doc_build.py @@ -0,0 +1,513 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab +# +# pylint: disable=R0903,R0912,R0913,R0914,R0917,C0301 + +""" +Install minimal supported requirements for different Sphinx versions +and optionally test the build. +""" + +import argparse +import asyncio +import os.path +import shutil +import sys +import time +import subprocess + +# Minimal python version supported by the building system. + +PYTHON = os.path.basename(sys.executable) + +min_python_bin = None + +for i in range(9, 13): + p = f"python3.{i}" + if shutil.which(p): + min_python_bin = p + break + +if not min_python_bin: + min_python_bin = PYTHON + +# Starting from 8.0, Python 3.9 is not supported anymore. +PYTHON_VER_CHANGES = {(8, 0, 0): PYTHON} + +DEFAULT_VERSIONS_TO_TEST = [ + (3, 4, 3), # Minimal supported version + (5, 3, 0), # CentOS Stream 9 / AlmaLinux 9 + (6, 1, 1), # Debian 12 + (7, 2, 1), # openSUSE Leap 15.6 + (7, 2, 6), # Ubuntu 24.04 LTS + (7, 4, 7), # Ubuntu 24.10 + (7, 3, 0), # openSUSE Tumbleweed + (8, 1, 3), # Fedora 42 + (8, 2, 3) # Latest version - covers rolling distros +] + +# Sphinx versions to be installed and their incremental requirements +SPHINX_REQUIREMENTS = { + # Oldest versions we support for each package required by Sphinx 3.4.3 + (3, 4, 3): { + "docutils": "0.16", + "alabaster": "0.7.12", + "babel": "2.8.0", + "certifi": "2020.6.20", + "docutils": "0.16", + "idna": "2.10", + "imagesize": "1.2.0", + "Jinja2": "2.11.2", + "MarkupSafe": "1.1.1", + "packaging": "20.4", + "Pygments": "2.6.1", + "PyYAML": "5.1", + "requests": "2.24.0", + "snowballstemmer": "2.0.0", + "sphinxcontrib-applehelp": "1.0.2", + "sphinxcontrib-devhelp": "1.0.2", + "sphinxcontrib-htmlhelp": "1.0.3", + "sphinxcontrib-jsmath": "1.0.1", + "sphinxcontrib-qthelp": "1.0.3", + "sphinxcontrib-serializinghtml": "1.1.4", + "urllib3": "1.25.9", + }, + + # Update package dependencies to a more modern base. The goal here + # is to avoid to many incremental changes for the next entries + (3, 5, 0): { + "alabaster": "0.7.13", + "babel": "2.17.0", + "certifi": "2025.6.15", + "idna": "3.10", + "imagesize": "1.4.1", + "packaging": "25.0", + "Pygments": "2.8.1", + "requests": "2.32.4", + "snowballstemmer": "3.0.1", + "sphinxcontrib-applehelp": "1.0.4", + "sphinxcontrib-htmlhelp": "2.0.1", + "sphinxcontrib-serializinghtml": "1.1.5", + "urllib3": "2.0.0", + }, + + # Starting from here, ensure all docutils versions are covered with + # supported Sphinx versions. Other packages are upgraded only when + # required by pip + (4, 0, 0): { + "PyYAML": "5.1", + }, + (4, 1, 0): { + "docutils": "0.17", + "Pygments": "2.19.1", + "Jinja2": "3.0.3", + "MarkupSafe": "2.0", + }, + (4, 3, 0): {}, + (4, 4, 0): {}, + (4, 5, 0): { + "docutils": "0.17.1", + }, + (5, 0, 0): {}, + (5, 1, 0): {}, + (5, 2, 0): { + "docutils": "0.18", + "Jinja2": "3.1.2", + "MarkupSafe": "2.0", + "PyYAML": "5.3.1", + }, + (5, 3, 0): { + "docutils": "0.18.1", + }, + (6, 0, 0): {}, + (6, 1, 0): {}, + (6, 2, 0): { + "PyYAML": "5.4.1", + }, + (7, 0, 0): {}, + (7, 1, 0): {}, + (7, 2, 0): { + "docutils": "0.19", + "PyYAML": "6.0.1", + "sphinxcontrib-serializinghtml": "1.1.9", + }, + (7, 2, 6): { + "docutils": "0.20", + }, + (7, 3, 0): { + "alabaster": "0.7.14", + "PyYAML": "6.0.1", + "tomli": "2.0.1", + }, + (7, 4, 0): { + "docutils": "0.20.1", + "PyYAML": "6.0.1", + }, + (8, 0, 0): { + "docutils": "0.21", + }, + (8, 1, 0): { + "docutils": "0.21.1", + "PyYAML": "6.0.1", + "sphinxcontrib-applehelp": "1.0.7", + "sphinxcontrib-devhelp": "1.0.6", + "sphinxcontrib-htmlhelp": "2.0.6", + "sphinxcontrib-qthelp": "1.0.6", + }, + (8, 2, 0): { + "docutils": "0.21.2", + "PyYAML": "6.0.1", + "sphinxcontrib-serializinghtml": "1.1.9", + }, +} + + +class AsyncCommands: + """Excecute command synchronously""" + + def __init__(self, fp=None): + + self.stdout = None + self.stderr = None + self.output = None + self.fp = fp + + def log(self, out, verbose, is_info=True): + out = out.removesuffix('\n') + + if verbose: + if is_info: + print(out) + else: + print(out, file=sys.stderr) + + if self.fp: + self.fp.write(out + "\n") + + async def _read(self, stream, verbose, is_info): + """Ancillary routine to capture while displaying""" + + while stream is not None: + line = await stream.readline() + if line: + out = line.decode("utf-8", errors="backslashreplace") + self.log(out, verbose, is_info) + if is_info: + self.stdout += out + else: + self.stderr += out + else: + break + + async def run(self, cmd, capture_output=False, check=False, + env=None, verbose=True): + + """ + Execute an arbitrary command, handling errors. + + Please notice that this class is not thread safe + """ + + self.stdout = "" + self.stderr = "" + + self.log("$ " + " ".join(cmd), verbose) + + proc = await asyncio.create_subprocess_exec(cmd[0], + *cmd[1:], + env=env, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE) + + # Handle input and output in realtime + await asyncio.gather( + self._read(proc.stdout, verbose, True), + self._read(proc.stderr, verbose, False), + ) + + await proc.wait() + + if check and proc.returncode > 0: + raise subprocess.CalledProcessError(returncode=proc.returncode, + cmd=" ".join(cmd), + output=self.stdout, + stderr=self.stderr) + + if capture_output: + if proc.returncode > 0: + self.log(f"Error {proc.returncode}", verbose=True, is_info=False) + return "" + + return self.output + + ret = subprocess.CompletedProcess(args=cmd, + returncode=proc.returncode, + stdout=self.stdout, + stderr=self.stderr) + + return ret + + +class SphinxVenv: + """ + Installs Sphinx on one virtual env per Sphinx version with a minimal + set of dependencies, adjusting them to each specific version. + """ + + def __init__(self): + """Initialize instance variables""" + + self.built_time = {} + self.first_run = True + + async def _handle_version(self, args, fp, + cur_ver, cur_requirements, python_bin): + """Handle a single Sphinx version""" + + cmd = AsyncCommands(fp) + + ver = ".".join(map(str, cur_ver)) + + if not self.first_run and args.wait_input and args.build: + ret = input("Press Enter to continue or 'a' to abort: ").strip().lower() + if ret == "a": + print("Aborted.") + sys.exit() + else: + self.first_run = False + + venv_dir = f"Sphinx_{ver}" + req_file = f"requirements_{ver}.txt" + + cmd.log(f"\nSphinx {ver} with {python_bin}", verbose=True) + + # Create venv + await cmd.run([python_bin, "-m", "venv", venv_dir], + verbose=args.verbose, check=True) + pip = os.path.join(venv_dir, "bin/pip") + + # Create install list + reqs = [] + for pkg, verstr in cur_requirements.items(): + reqs.append(f"{pkg}=={verstr}") + + reqs.append(f"Sphinx=={ver}") + + await cmd.run([pip, "install"] + reqs, check=True, verbose=args.verbose) + + # Freeze environment + result = await cmd.run([pip, "freeze"], verbose=False, check=True) + + # Pip install succeeded. Write requirements file + if args.req_file: + with open(req_file, "w", encoding="utf-8") as fp: + fp.write(result.stdout) + + if args.build: + start_time = time.time() + + # Prepare a venv environment + env = os.environ.copy() + bin_dir = os.path.join(venv_dir, "bin") + env["PATH"] = bin_dir + ":" + env["PATH"] + env["VIRTUAL_ENV"] = venv_dir + if "PYTHONHOME" in env: + del env["PYTHONHOME"] + + # Test doc build + await cmd.run(["make", "cleandocs"], env=env, check=True) + make = ["make"] + + if args.output: + sphinx_build = os.path.realpath(f"{bin_dir}/sphinx-build") + make += [f"O={args.output}", f"SPHINXBUILD={sphinx_build}"] + + if args.make_args: + make += args.make_args + + make += args.targets + + if args.verbose: + cmd.log(f". {bin_dir}/activate", verbose=True) + await cmd.run(make, env=env, check=True, verbose=True) + if args.verbose: + cmd.log("deactivate", verbose=True) + + end_time = time.time() + elapsed_time = end_time - start_time + hours, minutes = divmod(elapsed_time, 3600) + minutes, seconds = divmod(minutes, 60) + + hours = int(hours) + minutes = int(minutes) + seconds = int(seconds) + + self.built_time[ver] = f"{hours:02d}:{minutes:02d}:{seconds:02d}" + + cmd.log(f"Finished doc build for Sphinx {ver}. Elapsed time: {self.built_time[ver]}", verbose=True) + + async def run(self, args): + """ + Navigate though multiple Sphinx versions, handling each of them + on a loop. + """ + + if args.log: + fp = open(args.log, "w", encoding="utf-8") + if not args.verbose: + args.verbose = False + else: + fp = None + if not args.verbose: + args.verbose = True + + cur_requirements = {} + python_bin = min_python_bin + + vers = set(SPHINX_REQUIREMENTS.keys()) | set(args.versions) + + for cur_ver in sorted(vers): + if cur_ver in SPHINX_REQUIREMENTS: + new_reqs = SPHINX_REQUIREMENTS[cur_ver] + cur_requirements.update(new_reqs) + + if cur_ver in PYTHON_VER_CHANGES: # pylint: disable=R1715 + python_bin = PYTHON_VER_CHANGES[cur_ver] + + if cur_ver not in args.versions: + continue + + if args.min_version: + if cur_ver < args.min_version: + continue + + if args.max_version: + if cur_ver > args.max_version: + break + + await self._handle_version(args, fp, cur_ver, cur_requirements, + python_bin) + + if args.build: + cmd = AsyncCommands(fp) + cmd.log("\nSummary:", verbose=True) + for ver, elapsed_time in sorted(self.built_time.items()): + cmd.log(f"\tSphinx {ver} elapsed time: {elapsed_time}", + verbose=True) + + if fp: + fp.close() + +def parse_version(ver_str): + """Convert a version string into a tuple.""" + + return tuple(map(int, ver_str.split("."))) + + +DEFAULT_VERS = " - " +DEFAULT_VERS += "\n - ".join(map(lambda v: f"{v[0]}.{v[1]}.{v[2]}", + DEFAULT_VERSIONS_TO_TEST)) + +SCRIPT = os.path.relpath(__file__) + +DESCRIPTION = f""" +This tool allows creating Python virtual environments for different +Sphinx versions that are supported by the Linux Kernel build system. + +Besides creating the virtual environment, it can also test building +the documentation using "make htmldocs" (and/or other doc targets). + +If called without "--versions" argument, it covers the versions shipped +on major distros, plus the lowest supported version: + +{DEFAULT_VERS} + +A typical usage is to run: + + {SCRIPT} -m -l sphinx_builds.log + +This will create one virtual env for the default version set and run +"make htmldocs" for each version, creating a log file with the +excecuted commands on it. + +NOTE: The build time can be very long, specially on old versions. Also, there +is a known bug with Sphinx version 6.0.x: each subprocess uses a lot of +memory. That, together with "-jauto" may cause OOM killer to cause +failures at the doc generation. To minimize the risk, you may use the +"-a" command line parameter to constrain the built directories and/or +reduce the number of threads from "-jauto" to, for instance, "-j4": + + {SCRIPT} -m -V 6.0.1 -a "SPHINXDIRS=process" "SPHINXOPTS='-j4'" + +""" + +MAKE_TARGETS = [ + "htmldocs", + "texinfodocs", + "infodocs", + "latexdocs", + "pdfdocs", + "epubdocs", + "xmldocs", +] + +async def main(): + """Main program""" + + parser = argparse.ArgumentParser(description=DESCRIPTION, + formatter_class=argparse.RawDescriptionHelpFormatter) + + ver_group = parser.add_argument_group("Version range options") + + ver_group.add_argument('-V', '--versions', nargs="*", + default=DEFAULT_VERSIONS_TO_TEST,type=parse_version, + help='Sphinx versions to test') + ver_group.add_argument('--min-version', "--min", type=parse_version, + help='Sphinx minimal version') + ver_group.add_argument('--max-version', "--max", type=parse_version, + help='Sphinx maximum version') + ver_group.add_argument('-f', '--full', action='store_true', + help='Add all Sphinx (major,minor) supported versions to the version range') + + build_group = parser.add_argument_group("Build options") + + build_group.add_argument('-b', '--build', action='store_true', + help='Build documentation') + build_group.add_argument('-a', '--make-args', nargs="*", + help='extra arguments for make, like SPHINXDIRS=netlink/specs', + ) + build_group.add_argument('-t', '--targets', nargs="+", choices=MAKE_TARGETS, + default=[MAKE_TARGETS[0]], + help="make build targets. Default: htmldocs.") + build_group.add_argument("-o", '--output', + help="output directory for the make O=OUTPUT") + + other_group = parser.add_argument_group("Other options") + + other_group.add_argument('-r', '--req-file', action='store_true', + help='write a requirements.txt file') + other_group.add_argument('-l', '--log', + help='Log command output on a file') + other_group.add_argument('-v', '--verbose', action='store_true', + help='Verbose all commands') + other_group.add_argument('-i', '--wait-input', action='store_true', + help='Wait for an enter before going to the next version') + + args = parser.parse_args() + + if not args.make_args: + args.make_args = [] + + sphinx_versions = sorted(list(SPHINX_REQUIREMENTS.keys())) + + if args.full: + args.versions += list(SPHINX_REQUIREMENTS.keys()) + + venv = SphinxVenv() + await venv.run(args) + + +# Call main method +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/ver_linux b/scripts/ver_linux index 1a8ee4ff0e32..d6f2362d3792 100755 --- a/scripts/ver_linux +++ b/scripts/ver_linux @@ -25,8 +25,6 @@ BEGIN { printversion("Module-init-tools", version("depmod -V")) printversion("E2fsprogs", version("tune2fs")) printversion("Jfsutils", version("fsck.jfs -V")) - printversion("Reiserfsprogs", version("reiserfsck -V")) - printversion("Reiser4fsprogs", version("fsck.reiser4 -V")) printversion("Xfsprogs", version("xfs_db -V")) printversion("Pcmciautils", version("pccardctl -V")) printversion("Pcmcia-cs", version("cardmgr -V"))