diff options
125 files changed, 3808 insertions, 1925 deletions
diff --git a/Documentation/ABI/README b/Documentation/ABI/README index 8bac9cb09a6d..ef0e6d11e919 100644 --- a/Documentation/ABI/README +++ b/Documentation/ABI/README @@ -1,4 +1,5 @@ -This directory attempts to document the ABI between the Linux kernel and +This part of the documentation inside Documentation/ABI directory +attempts to document the ABI between the Linux kernel and userspace, and the relative stability of these interfaces. Due to the everchanging nature of Linux, and the differing maturity levels, these interfaces should be used by userspace programs in different ways. diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill index f25174eafd55..20cb688af173 100644 --- a/Documentation/ABI/removed/sysfs-class-rfkill +++ b/Documentation/ABI/removed/sysfs-class-rfkill @@ -4,7 +4,7 @@ For details to this subsystem look at Documentation/driver-api/rfkill.rst. What: /sys/class/rfkill/rfkill[0-9]+/claim Date: 09-Jul-2007 -KernelVersion v2.6.22 +KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org Description: This file was deprecated because there no longer was a way to claim just control over a single rfkill instance. diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill index 037979f7dc4b..67b605e3dd16 100644 --- a/Documentation/ABI/stable/sysfs-class-rfkill +++ b/Documentation/ABI/stable/sysfs-class-rfkill @@ -16,7 +16,7 @@ Description: The rfkill class subsystem folder. What: /sys/class/rfkill/rfkill[0-9]+/name Date: 09-Jul-2007 -KernelVersion v2.6.22 +KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org Description: Name assigned by driver to this key (interface or driver name). Values: arbitrary string. @@ -24,7 +24,7 @@ Values: arbitrary string. What: /sys/class/rfkill/rfkill[0-9]+/type Date: 09-Jul-2007 -KernelVersion v2.6.22 +KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org Description: Driver type string ("wlan", "bluetooth", etc). Values: See include/linux/rfkill.h. @@ -32,7 +32,7 @@ Values: See include/linux/rfkill.h. What: /sys/class/rfkill/rfkill[0-9]+/persistent Date: 09-Jul-2007 -KernelVersion v2.6.22 +KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org Description: Whether the soft blocked state is initialised from non-volatile storage at startup. @@ -44,7 +44,7 @@ Values: A numeric value: What: /sys/class/rfkill/rfkill[0-9]+/state Date: 09-Jul-2007 -KernelVersion v2.6.22 +KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org Description: Current state of the transmitter. This file was scheduled to be removed in 2014, but due to its @@ -67,7 +67,7 @@ Values: A numeric value. What: /sys/class/rfkill/rfkill[0-9]+/hard Date: 12-March-2010 -KernelVersion v2.6.34 +KernelVersion: v2.6.34 Contact: linux-wireless@vger.kernel.org Description: Current hardblock state. This file is read only. Values: A numeric value. @@ -81,7 +81,7 @@ Values: A numeric value. What: /sys/class/rfkill/rfkill[0-9]+/soft Date: 12-March-2010 -KernelVersion v2.6.34 +KernelVersion: v2.6.34 Contact: linux-wireless@vger.kernel.org Description: Current softblock state. This file is read and write. Values: A numeric value. diff --git a/Documentation/ABI/stable/sysfs-devices-system-cpu b/Documentation/ABI/stable/sysfs-devices-system-cpu index 902392d7eddf..cf78bd99f6c8 100644 --- a/Documentation/ABI/stable/sysfs-devices-system-cpu +++ b/Documentation/ABI/stable/sysfs-devices-system-cpu @@ -24,12 +24,6 @@ Description: Default value for the Data Stream Control Register (DSCR) on If set by a process it will be inherited by child processes. Values: 64 bit unsigned integer (bit field) -What: /sys/devices/system/cpu/cpuX/topology/physical_package_id -Description: physical package id of cpuX. Typically corresponds to a physical - socket number, but the actual value is architecture and platform - dependent. -Values: integer - What: /sys/devices/system/cpu/cpuX/topology/die_id Description: the CPU die ID of cpuX. Typically it is the hardware platform's identifier (rather than the kernel's). The actual value is @@ -86,10 +80,6 @@ What: /sys/devices/system/cpu/cpuX/topology/die_cpus Description: internal kernel map of CPUs within the same die. Values: hexadecimal bitmask. -What: /sys/devices/system/cpu/cpuX/topology/ppin -Description: per-socket protected processor inventory number -Values: hexadecimal. - What: /sys/devices/system/cpu/cpuX/topology/die_cpus_list Description: human-readable list of CPUs within the same die. The format is like 0-3, 8-11, 14,17. diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index f2ec42949a54..4a355e6747ae 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -246,14 +246,14 @@ Description: Controls whether PRS disable is turned on for the workqueue. capability. What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy -Date May 25, 2021 +Date: May 25, 2021 KernelVersion: 5.14.0 Contact: dmaengine@vger.kernel.org Description: Show the current number of entries in this WQ if WQ Occupancy Support bit WQ capabilities is 1. What: /sys/bus/dsa/devices/wq<m>.<n>/enqcmds_retries -Date Oct 29, 2021 +Date: Oct 29, 2021 KernelVersion: 5.17.0 Contact: dmaengine@vger.kernel.org Description: Indicate the number of retires for an enqcmds submission on a sharedwq. diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi2 b/Documentation/ABI/testing/configfs-usb-gadget-midi2 index 0eac3aaba137..d76a52e2ca7f 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-midi2 +++ b/Documentation/ABI/testing/configfs-usb-gadget-midi2 @@ -47,7 +47,7 @@ Description: midi1_first_group The first UMP Group number for MIDI 1.0 (0-15) midi1_num_groups The number of groups for MIDI 1.0 (0-16) ui_hint 0: unknown, 1: receiver, 2: sender, 3: both - midi_ci_verison Supported MIDI-CI version number (8 bit) + midi_ci_version Supported MIDI-CI version number (8 bit) is_midi1 Legacy MIDI 1.0 device (0, 1 or 2) sysex8_streams Max number of SysEx8 streams (8 bit) active Active FB flag (0 or 1) diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti index bf2869c413e7..a97b70f588da 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti @@ -1,241 +1,241 @@ What: /sys/bus/coresight/devices/<cti-name>/enable Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Enable/Disable the CTI hardware. What: /sys/bus/coresight/devices/<cti-name>/powered Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Indicate if the CTI hardware is powered. What: /sys/bus/coresight/devices/<cti-name>/ctmid Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Display the associated CTM ID What: /sys/bus/coresight/devices/<cti-name>/nr_trigger_cons Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Number of devices connected to triggers on this CTI What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/name Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Name of connected device <N> What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_signals Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Input trigger signals from connected device <N> What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_types Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Functional types for the input trigger signals from connected device <N> What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_signals Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Output trigger signals to connected device <N> What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_types Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Functional types for the output trigger signals to connected device <N> What: /sys/bus/coresight/devices/<cti-name>/regs/inout_sel Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Select the index for inen and outen registers. What: /sys/bus/coresight/devices/<cti-name>/regs/inen Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Read or write the CTIINEN register selected by inout_sel. What: /sys/bus/coresight/devices/<cti-name>/regs/outen Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Read or write the CTIOUTEN register selected by inout_sel. What: /sys/bus/coresight/devices/<cti-name>/regs/gate Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Read or write CTIGATE register. What: /sys/bus/coresight/devices/<cti-name>/regs/asicctl Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Read or write ASICCTL register. What: /sys/bus/coresight/devices/<cti-name>/regs/intack Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Write the INTACK register. What: /sys/bus/coresight/devices/<cti-name>/regs/appset Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Set CTIAPPSET register to activate channel. Read back to determine current value of register. What: /sys/bus/coresight/devices/<cti-name>/regs/appclear Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Write APPCLEAR register to deactivate channel. What: /sys/bus/coresight/devices/<cti-name>/regs/apppulse Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Write APPPULSE to pulse a channel active for one clock cycle. What: /sys/bus/coresight/devices/<cti-name>/regs/chinstatus Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Read current status of channel inputs. What: /sys/bus/coresight/devices/<cti-name>/regs/choutstatus Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) read current status of channel outputs. What: /sys/bus/coresight/devices/<cti-name>/regs/triginstatus Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) read current status of input trigger signals What: /sys/bus/coresight/devices/<cti-name>/regs/trigoutstatus Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) read current status of output trigger signals. What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_attach Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Attach a CTI input trigger to a CTM channel. What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_detach Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Detach a CTI input trigger from a CTM channel. What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_attach Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Attach a CTI output trigger to a CTM channel. What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_detach Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Detach a CTI output trigger from a CTM channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_enable Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Enable CTIGATE for single channel (Write) or list enabled channels through the gate (R). What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_disable Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Disable CTIGATE for single channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_set Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Activate a single channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_clear Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Deactivate a single channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_pulse Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Pulse a single channel - activate for a single clock cycle. What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_filtered Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) List of output triggers filtered across all connections. What: /sys/bus/coresight/devices/<cti-name>/channels/trig_filter_enable Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Enable or disable trigger output signal filtering. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_inuse Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) show channels with at least one attached trigger signal. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_free Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) show channels with no attached trigger signals. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_sel Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (RW) Write channel number to select a channel to view, read to see selected channel number. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_in Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Read to see input triggers connected to selected view channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_out Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Read) Read to see output triggers connected to selected view channel. What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_reset Date: March 2020 -KernelVersion 5.7 +KernelVersion: 5.7 Contact: Mike Leach or Mathieu Poirier Description: (Write) Clear all channel / trigger programming. diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm index bf710ea6e0ef..53cb454b60d0 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm @@ -1,6 +1,6 @@ What: /sys/bus/coresight/devices/<tpdm-name>/integration_test Date: January 2023 -KernelVersion 6.2 +KernelVersion: 6.2 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Run integration test for tpdm. Integration test @@ -14,7 +14,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/reset_dataset Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Reset the dataset of the tpdm. @@ -24,7 +24,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_type Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the trigger type of the DSB for tpdm. @@ -35,7 +35,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_ts Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the trigger timestamp of the DSB for tpdm. @@ -46,7 +46,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_mode Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the programming mode of the DSB for tpdm. @@ -60,7 +60,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_idx Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the index number of the edge detection for the DSB @@ -69,7 +69,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_val Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: Write a data to control the edge detection corresponding to @@ -85,7 +85,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_mask Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: Write a data to mask the edge detection corresponding to the index @@ -97,21 +97,21 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcr[0:15] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: Read a set of the edge control value of the DSB in TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcmr[0:7] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: Read a set of the edge control mask of the DSB in TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpr[0:7] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the value of the trigger pattern for the DSB @@ -119,7 +119,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpmr[0:7] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the mask of the trigger pattern for the DSB @@ -127,21 +127,21 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:7] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the value of the pattern for the DSB subunit TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:7] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the mask of the pattern for the DSB subunit TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/enable_ts Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Set the pattern timestamp of DSB tpdm. Read @@ -153,7 +153,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/set_type Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Set the pattern type of DSB tpdm. Read @@ -165,7 +165,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_msr/msr[0:31] Date: March 2023 -KernelVersion 6.7 +KernelVersion: 6.7 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the MSR(mux select register) for the DSB subunit @@ -173,7 +173,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/cmb_mode Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Set the data collection mode of CMB tpdm. Continuous change creates CMB data set elements on every CMBCLK edge. @@ -187,7 +187,7 @@ Description: (Write) Set the data collection mode of CMB tpdm. Continuous What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpr[0:1] Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the value of the trigger pattern for the CMB @@ -195,7 +195,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpmr[0:1] Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the mask of the trigger pattern for the CMB @@ -203,21 +203,21 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:1] Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the value of the pattern for the CMB subunit TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:1] Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the mask of the pattern for the CMB subunit TPDM. What: /sys/bus/coresight/devices/<tpdm-name>/cmb_patt/enable_ts Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (Write) Set the pattern timestamp of CMB tpdm. Read @@ -229,7 +229,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_ts Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the trigger timestamp of the CMB for tpdm. @@ -240,7 +240,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/cmb_ts_all Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Read or write the status of timestamp upon all interface. @@ -252,7 +252,7 @@ Description: What: /sys/bus/coresight/devices/<tpdm-name>/cmb_msr/msr[0:31] Date: January 2024 -KernelVersion 6.9 +KernelVersion: 6.9 Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> Description: (RW) Set/Get the MSR(mux select register) for the CMB subunit diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 3e1630c70d8a..e44bb614964b 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -347,7 +347,7 @@ Description: Used to control configure extension list: - [c] means add/del cold file extension What: /sys/fs/f2fs/<disk>/unusable -Date April 2019 +Date: April 2019 Contact: "Daniel Rosenberg" <drosen@google.com> Description: If checkpoint=disable, it displays the number of blocks that are unusable. @@ -355,7 +355,7 @@ Description: If checkpoint=disable, it displays the number of blocks that would be unusable if checkpoint=disable were to be set. What: /sys/fs/f2fs/<disk>/encoding -Date July 2019 +Date: July 2019 Contact: "Daniel Rosenberg" <drosen@google.com> Description: Displays name and version of the encoding set for the filesystem. If no encoding is set, displays (none) diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index a3942b1036e2..2192478e83cf 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power @@ -131,7 +131,7 @@ Description: CAUTION: Using it will cause your machine's real-time (CMOS) clock to be set to a random invalid time after a resume. -What; /sys/power/pm_trace_dev_match +What: /sys/power/pm_trace_dev_match Date: October 2010 Contact: James Hogan <jhogan@kernel.org> Description: diff --git a/Documentation/Makefile b/Documentation/Makefile index 52c6c5a3efa9..63094646df28 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -12,7 +12,7 @@ endif # Check for broken ABI files ifeq ($(CONFIG_WARN_ABI_ERRORS),y) -$(shell $(srctree)/scripts/get_abi.pl validate --dir $(srctree)/Documentation/ABI) +$(shell $(srctree)/scripts/get_abi.py --dir $(srctree)/Documentation/ABI validate) endif # You can set these variables from the command line. diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index b557cf1c820d..70b02f30013a 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -165,7 +165,7 @@ Configuring the kernel "make xconfig" Qt based configuration tool. - "make gconfig" GTK+ based configuration tool. + "make gconfig" GTK based configuration tool. "make oldconfig" Default all questions based on the contents of your existing ./.config file and asking about diff --git a/Documentation/admin-guide/abi-obsolete-files.rst b/Documentation/admin-guide/abi-obsolete-files.rst new file mode 100644 index 000000000000..3061a916b4b5 --- /dev/null +++ b/Documentation/admin-guide/abi-obsolete-files.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Obsolete ABI Files +================== + +.. kernel-abi:: obsolete + :no-symbols: diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst index 594e697aa1b2..640f3903e847 100644 --- a/Documentation/admin-guide/abi-obsolete.rst +++ b/Documentation/admin-guide/abi-obsolete.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: GPL-2.0 + ABI obsolete symbols ==================== @@ -7,5 +9,5 @@ marked to be removed at some later point in time. The description of the interface will document the reason why it is obsolete and when it can be expected to be removed. -.. kernel-abi:: ABI/obsolete - :rst: +.. kernel-abi:: obsolete + :no-files: diff --git a/Documentation/admin-guide/abi-removed-files.rst b/Documentation/admin-guide/abi-removed-files.rst new file mode 100644 index 000000000000..f1bdfadd2ec4 --- /dev/null +++ b/Documentation/admin-guide/abi-removed-files.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Removed ABI Files +================= + +.. kernel-abi:: removed + :no-symbols: diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst index f9e000c81828..88832d3eacd6 100644 --- a/Documentation/admin-guide/abi-removed.rst +++ b/Documentation/admin-guide/abi-removed.rst @@ -1,5 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + ABI removed symbols =================== -.. kernel-abi:: ABI/removed - :rst: +.. kernel-abi:: removed + :no-files: diff --git a/Documentation/admin-guide/abi-stable-files.rst b/Documentation/admin-guide/abi-stable-files.rst new file mode 100644 index 000000000000..f867738fc178 --- /dev/null +++ b/Documentation/admin-guide/abi-stable-files.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Stable ABI Files +================ + +.. kernel-abi:: stable + :no-symbols: diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst index fc3361d847b1..528c68401f4b 100644 --- a/Documentation/admin-guide/abi-stable.rst +++ b/Documentation/admin-guide/abi-stable.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: GPL-2.0 + ABI stable symbols ================== @@ -10,5 +12,5 @@ for at least 2 years. Most interfaces (like syscalls) are expected to never change and always be available. -.. kernel-abi:: ABI/stable - :rst: +.. kernel-abi:: stable + :no-files: diff --git a/Documentation/admin-guide/abi-testing-files.rst b/Documentation/admin-guide/abi-testing-files.rst new file mode 100644 index 000000000000..1da868e42fdb --- /dev/null +++ b/Documentation/admin-guide/abi-testing-files.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Testing ABI Files +================= + +.. kernel-abi:: testing + :no-symbols: diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst index 19767926b344..6153ebd38e2d 100644 --- a/Documentation/admin-guide/abi-testing.rst +++ b/Documentation/admin-guide/abi-testing.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: GPL-2.0 + ABI testing symbols =================== @@ -16,5 +18,5 @@ Programs that use these interfaces are strongly encouraged to add their name to the description of these interfaces, so that the kernel developers can easily notify them if any changes occur. -.. kernel-abi:: ABI/testing - :rst: +.. kernel-abi:: testing + :no-files: diff --git a/Documentation/admin-guide/abi.rst b/Documentation/admin-guide/abi.rst index bcab3ef2597c..c6039359e585 100644 --- a/Documentation/admin-guide/abi.rst +++ b/Documentation/admin-guide/abi.rst @@ -1,7 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0 + ===================== Linux ABI description ===================== +.. kernel-abi:: README + +ABI symbols +----------- + .. toctree:: :maxdepth: 2 @@ -9,3 +16,14 @@ Linux ABI description abi-testing abi-obsolete abi-removed + +ABI files +--------- + +.. toctree:: + :maxdepth: 2 + + abi-stable-files + abi-testing-files + abi-obsolete-files + abi-removed-files diff --git a/Documentation/admin-guide/gpio/gpio-sim.rst b/Documentation/admin-guide/gpio/gpio-sim.rst index 1cc5567a4bbe..35d49ccd49e0 100644 --- a/Documentation/admin-guide/gpio/gpio-sim.rst +++ b/Documentation/admin-guide/gpio/gpio-sim.rst @@ -71,7 +71,7 @@ specific lines. The name of those subdirectories must take the form of: ``'line<offset>'`` (e.g. ``'line0'``, ``'line20'``, etc.) as the name will be used by the module to assign the config to the specific line at given offset. -Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in +Once the configuration is complete, the ``'live'`` attribute must be set to 1 in order to instantiate the chip. It can be set back to 0 to destroy the simulated chip. The module will synchronously wait for the new simulated device to be successfully probed and if this doesn't happen, writing to ``'live'`` will diff --git a/Documentation/admin-guide/gpio/gpio-virtuser.rst b/Documentation/admin-guide/gpio/gpio-virtuser.rst index 2aca70db9f3b..7e7c0df51640 100644 --- a/Documentation/admin-guide/gpio/gpio-virtuser.rst +++ b/Documentation/admin-guide/gpio/gpio-virtuser.rst @@ -92,7 +92,7 @@ struct. The first two take string values as arguments: Activating GPIO consumers ------------------------- -Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in +Once the configuration is complete, the ``'live'`` attribute must be set to 1 in order to instantiate the consumer. It can be set back to 0 to destroy the virtual device. The module will synchronously wait for the new simulated device to be successfully probed and if this doesn't happen, writing to ``'live'`` will diff --git a/Documentation/admin-guide/highuid.rst b/Documentation/admin-guide/highuid.rst deleted file mode 100644 index 6ee70465c0ea..000000000000 --- a/Documentation/admin-guide/highuid.rst +++ /dev/null @@ -1,80 +0,0 @@ -=================================================== -Notes on the change from 16-bit UIDs to 32-bit UIDs -=================================================== - -:Author: Chris Wing <wingc@umich.edu> -:Last updated: January 11, 2000 - -- kernel code MUST take into account __kernel_uid_t and __kernel_uid32_t - when communicating between user and kernel space in an ioctl or data - structure. - -- kernel code should use uid_t and gid_t in kernel-private structures and - code. - -What's left to be done for 32-bit UIDs on all Linux architectures: - -- Disk quotas have an interesting limitation that is not related to the - maximum UID/GID. They are limited by the maximum file size on the - underlying filesystem, because quota records are written at offsets - corresponding to the UID in question. - Further investigation is needed to see if the quota system can cope - properly with huge UIDs. If it can deal with 64-bit file offsets on all - architectures, this should not be a problem. - -- Decide whether or not to keep backwards compatibility with the system - accounting file, or if we should break it as the comments suggest - (currently, the old 16-bit UID and GID are still written to disk, and - part of the former pad space is used to store separate 32-bit UID and - GID) - -- Need to validate that OS emulation calls the 16-bit UID - compatibility syscalls, if the OS being emulated used 16-bit UIDs, or - uses the 32-bit UID system calls properly otherwise. - - This affects at least: - - - iBCS on Intel - - - sparc32 emulation on sparc64 - (need to support whatever new 32-bit UID system calls are added to - sparc32) - -- Validate that all filesystems behave properly. - - At present, 32-bit UIDs _should_ work for: - - - ext2 - - ufs - - isofs - - nfs - - coda - - udf - - Ioctl() fixups have been made for: - - - ncpfs - - smbfs - - Filesystems with simple fixups to prevent 16-bit UID wraparound: - - - minix - - sysv - - qnx4 - - Other filesystems have not been checked yet. - -- The ncpfs and smpfs filesystems cannot presently use 32-bit UIDs in - all ioctl()s. Some new ioctl()s have been added with 32-bit UIDs, but - more are needed. (as well as new user<->kernel data structures) - -- The ELF core dump format only supports 16-bit UIDs on arm, i386, m68k, - sh, and sparc32. Fixing this is probably not that important, but would - require adding a new ELF section. - -- The ioctl()s used to control the in-kernel NFS server only support - 16-bit UIDs on arm, i386, m68k, sh, and sparc32. - -- make sure that the UID mapping feature of AX25 networking works properly - (it should be safe because it's always used a 32-bit integer to - communicate between user and kernel) diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst index c8af32a8f800..259d79fbeb94 100644 --- a/Documentation/admin-guide/index.rst +++ b/Documentation/admin-guide/index.rst @@ -187,7 +187,6 @@ A few hard-to-categorize and generally obsolete documents. .. toctree:: :maxdepth: 1 - highuid ldm unicode diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 609a3201fd4e..9453196ade51 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -2,62 +2,39 @@ I/O statistics fields ===================== -Since 2.4.20 (and some versions before, with patches), and 2.5.45, -more extensive disk statistics have been introduced to help measure disk -activity. Tools such as ``sar`` and ``iostat`` typically interpret these and do -the work for you, but in case you are interested in creating your own -tools, the fields are explained here. - -In 2.4 now, the information is found as additional fields in -``/proc/partitions``. In 2.6 and upper, the same information is found in two -places: one is in the file ``/proc/diskstats``, and the other is within -the sysfs file system, which must be mounted in order to obtain -the information. Throughout this document we'll assume that sysfs -is mounted on ``/sys``, although of course it may be mounted anywhere. -Both ``/proc/diskstats`` and sysfs use the same source for the information -and so should not differ. - -Here are examples of these different formats:: - - 2.4: - 3 0 39082680 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 - 3 1 9221278 hda1 35486 0 35496 38030 0 0 0 0 0 38030 38030 - - 2.6+ sysfs: - 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 - 35486 38030 38030 38030 - - 2.6+ diskstats: - 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 - 3 1 hda1 35486 38030 38030 38030 - - 4.18+ diskstats: - 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 0 0 0 0 - -On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have -a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``. - -The advantage of one over the other is that the sysfs choice works well -if you are watching a known, small set of disks. ``/proc/diskstats`` may -be a better choice if you are watching a large number of disks because -you'll avoid the overhead of 50, 100, or 500 or more opens/closes with -each snapshot of your disk statistics. - -In 2.4, the statistics fields are those after the device name. In -the above example, the first field of statistics would be 446216. -By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll -find just the 15 fields, beginning with 446216. If you look at -``/proc/diskstats``, the 15 fields will be preceded by the major and -minor device numbers, and device name. Each of these formats provides -15 fields of statistics, each meaning exactly the same things. -All fields except field 9 are cumulative since boot. Field 9 should -go to zero as I/Os complete; all others only increase (unless they -overflow and wrap). Wrapping might eventually occur on a very busy -or long-lived system; so applications should be prepared to deal with -it. Regarding wrapping, the types of the fields are either unsigned -int (32 bit) or unsigned long (32-bit or 64-bit, depending on your -machine) as noted per-field below. Unless your observations are very -spread in time, these fields should not wrap twice before you notice it. +The kernel exposes disk statistics via ``/proc/diskstats`` and +``/sys/block/<device>/stat``. These stats are usually accessed via tools +such as ``sar`` and ``iostat``. + +Here are examples using a disk with two partitions:: + + /proc/diskstats: + 259 0 nvme0n1 255999 814 12369153 47919 996852 81 36123024 425995 0 301795 580470 0 0 0 0 60602 106555 + 259 1 nvme0n1p1 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0 + 259 2 nvme0n1p2 255401 1 12343477 47799 996004 0 36014736 425784 0 344336 473584 0 0 0 0 0 0 + + /sys/block/nvme0n1/stat: + 255999 814 12369153 47919 996858 81 36123056 426009 0 301809 580491 0 0 0 0 60605 106562 + + /sys/block/nvme0n1/nvme0n1p1/stat: + 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0 + +Both files contain the same 17 statistics. ``/sys/block/<device>/stat`` +contains the fields for ``<device>``. In ``/proc/diskstats`` the fields +are prefixed with the major and minor device numbers and the device +name. In the example above, the first stat value for ``nvme0n1`` is +255999 in both files. + +The sysfs ``stat`` file is efficient for monitoring a small, known set +of disks. If you're tracking a large number of devices, +``/proc/diskstats`` is often the better choice since it avoids the +overhead of opening and closing multiple files for each snapshot. + +All fields are cumulative, monotonic counters, except for field 9, which +resets to zero as I/Os complete. The remaining fields reset at boot, on +device reattachment or reinitialization, or when the underlying counter +overflows. Applications reading these counters should detect and handle +resets when comparing stat snapshots. Each set of stats only applies to the indicated device; if you want system-wide stats you'll have to find all the devices and sum them all up. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 41d4cf206ec1..7042fbe26a60 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6084,7 +6084,7 @@ is assumed to be I/O ports; otherwise it is memory. reserve_mem= [RAM] - Format: nn[KNG]:<align>:<label> + Format: nn[KMG]:<align>:<label> Reserve physical memory and label it with a name that other subsystems can use to access it. This is typically used for systems that do not wipe the RAM, and this command diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst index 2ed79f41a411..d0502691dfa1 100644 --- a/Documentation/admin-guide/thunderbolt.rst +++ b/Documentation/admin-guide/thunderbolt.rst @@ -28,7 +28,7 @@ should be a userspace tool that handles all the low-level details, keeps a database of the authorized devices and prompts users for new connections. More details about the sysfs interface for Thunderbolt devices can be -found in ``Documentation/ABI/testing/sysfs-bus-thunderbolt``. +found in Documentation/ABI/testing/sysfs-bus-thunderbolt. Those users who just want to connect any device without any sort of manual work can add following line to diff --git a/Documentation/admin-guide/workload-tracing.rst b/Documentation/admin-guide/workload-tracing.rst index 6be38c1b9c5b..d6313890ee41 100644 --- a/Documentation/admin-guide/workload-tracing.rst +++ b/Documentation/admin-guide/workload-tracing.rst @@ -82,7 +82,7 @@ Install tools to build Linux kernel and tools in kernel repository. scripts/ver_linux is a good way to check if your system already has the necessary tools:: - sudo apt-get build-essentials flex bison yacc + sudo apt-get install build-essential flex bison yacc sudo apt install libelf-dev systemtap-sdt-dev libslang2-dev libperl-dev libdw-dev cscope is a good tool to browse kernel sources. Let's install it now:: diff --git a/Documentation/arch/arm64/amu.rst b/Documentation/arch/arm64/amu.rst index 01f2de2b0450..ac1b3f0e211d 100644 --- a/Documentation/arch/arm64/amu.rst +++ b/Documentation/arch/arm64/amu.rst @@ -80,7 +80,7 @@ bypass the setting of AMUSERENR_EL0 to trap accesses from EL0 (userspace) to EL1 (kernel). Therefore, firmware should still ensure accesses to AMU registers are not trapped in EL2/EL3. -The fixed counters of AMUv1 are accessible though the following system +The fixed counters of AMUv1 are accessible through the following system register definitions: - SYS_AMEVCNTR0_CORE_EL0 diff --git a/Documentation/arch/arm64/asymmetric-32bit.rst b/Documentation/arch/arm64/asymmetric-32bit.rst index 1ca2b359a907..57b8d7476f71 100644 --- a/Documentation/arch/arm64/asymmetric-32bit.rst +++ b/Documentation/arch/arm64/asymmetric-32bit.rst @@ -55,7 +55,7 @@ sysfs The subset of CPUs capable of running 32-bit tasks is described in ``/sys/devices/system/cpu/aarch32_el0`` and is documented further in -``Documentation/ABI/testing/sysfs-devices-system-cpu``. +Documentation/ABI/testing/sysfs-devices-system-cpu. **Note:** CPUs are advertised by this file as they are detected and so late-onlining of 32-bit-capable CPUs can result in the file contents diff --git a/Documentation/conf.py b/Documentation/conf.py index 0c2205d536b3..3dad1f90b098 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -47,7 +47,7 @@ from load_config import loadConfig # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '2.4.4' +needs_sphinx = '3.4.3' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst index 683bc6d09f00..9f57766581df 100644 --- a/Documentation/core-api/min_heap.rst +++ b/Documentation/core-api/min_heap.rst @@ -47,8 +47,8 @@ Example: #define MIN_HEAP_PREALLOCATED(_type, _name, _nr) struct _name { - int nr; /* Number of elements in the heap */ - int size; /* Maximum number of elements that can be held */ + size_t nr; /* Number of elements in the heap */ + size_t size; /* Maximum number of elements that can be held */ _type *data; /* Pointer to the heap data */ _type preallocated[_nr]; /* Static preallocated array */ } diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index d81c42d1063e..8575178aa87f 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -203,7 +203,7 @@ they happen concurrently in different threads, and at least one of them is a least one is a write. For a more thorough discussion and definition, see `"Plain Accesses and Data Races" in the LKMM`_. -.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922 +.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt?id=8f6629c004b193d23612641c3607e785819e97ab#n2164 Relationship with the Linux-Kernel Memory Consistency Model (LKMM) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml index 2a98b26630cb..c155c9c6db39 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml @@ -40,7 +40,7 @@ properties: microchip,rx-int-gpios: description: - GPIO phandle of GPIO connected to to INT1 pin of the MCP251XFD, which + GPIO phandle of GPIO connected to INT1 pin of the MCP251XFD, which signals a pending RX interrupt. maxItems: 1 diff --git a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst index fdcfce42c6d2..336e912281c3 100644 --- a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst +++ b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst @@ -42,3 +42,8 @@ then of course these rules will not apply strictly.) deprecating old major versions, then this should only be done as a last option, and be stated clearly in all communications. +* Firmware files that affect the User API (UAPI) shall not introduce + changes that break existing userspace programs. Updates to such firmware + must ensure backward compatibility with existing userspace applications. + This includes maintaining consistent interfaces and behaviors that + userspace programs rely on. diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst index 71ccc30e586b..e826f16ea43d 100644 --- a/Documentation/driver-api/generic-counter.rst +++ b/Documentation/driver-api/generic-counter.rst @@ -467,7 +467,7 @@ Counter sysfs Translates counter data to the standard Counter sysfs interface format and vice versa. -Please refer to the ``Documentation/ABI/testing/sysfs-bus-counter`` file +Please refer to the Documentation/ABI/testing/sysfs-bus-counter file for a detailed breakdown of the available Generic Counter interface sysfs attributes. @@ -483,7 +483,7 @@ Sysfs Interface Several sysfs attributes are generated by the Generic Counter interface, and reside under the ``/sys/bus/counter/devices/counterX`` directory, where ``X`` is to the respective counter device id. Please see -``Documentation/ABI/testing/sysfs-bus-counter`` for detailed information +Documentation/ABI/testing/sysfs-bus-counter for detailed information on each Generic Counter interface sysfs attribute. Through these sysfs attributes, programs and scripts may interact with diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst index dfe438dc91a7..42b580fb2989 100644 --- a/Documentation/driver-api/iio/core.rst +++ b/Documentation/driver-api/iio/core.rst @@ -60,7 +60,7 @@ directory. Common attributes are: * :file:`sampling_frequency_available`, available discrete set of sampling frequency values for device. * Available standard attributes for IIO devices are described in the - :file:`Documentation/ABI/testing/sysfs-bus-iio` file in the Linux kernel + :file:Documentation/ABI/testing/sysfs-bus-iio file in the Linux kernel sources. IIO device channels diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst index 30e142ccbee9..10d8be9e74fe 100644 --- a/Documentation/driver-api/infiniband.rst +++ b/Documentation/driver-api/infiniband.rst @@ -77,14 +77,14 @@ iSCSI Extensions for RDMA (iSER) :internal: .. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.c - :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers \ - iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit \ - iscsi_iser_cleanup_task iscsi_iser_check_protection \ - iscsi_iser_conn_create iscsi_iser_conn_bind \ - iscsi_iser_conn_start iscsi_iser_conn_stop \ - iscsi_iser_session_destroy iscsi_iser_session_create \ - iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll \ - iscsi_iser_ep_disconnect + :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers + iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit + iscsi_iser_cleanup_task iscsi_iser_check_protection + iscsi_iser_conn_create iscsi_iser_conn_bind + iscsi_iser_conn_start iscsi_iser_conn_stop + iscsi_iser_session_destroy iscsi_iser_session_create + iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll + iscsi_iser_ep_disconnect .. kernel-doc:: drivers/infiniband/ulp/iser/iser_initiator.c :internal: diff --git a/Documentation/driver-api/media/drivers/zoran.rst b/Documentation/driver-api/media/drivers/zoran.rst index b205e10c3154..3e05b7f0442a 100644 --- a/Documentation/driver-api/media/drivers/zoran.rst +++ b/Documentation/driver-api/media/drivers/zoran.rst @@ -222,7 +222,7 @@ The CCIR - I uses the PAL colorsystem, and is used in Great Britain, Hong Kong, Ireland, Nigeria, South Africa. The CCIR - N uses the PAL colorsystem and PAL frame size but the NTSC framerate, -and is used in Argentinia, Uruguay, an a few others +and is used in Argentina, Uruguay, an a few others We do not talk about how the audio is broadcast ! diff --git a/Documentation/driver-api/media/maintainer-entry-profile.rst b/Documentation/driver-api/media/maintainer-entry-profile.rst index ffc712a5f632..ad96a89ee916 100644 --- a/Documentation/driver-api/media/maintainer-entry-profile.rst +++ b/Documentation/driver-api/media/maintainer-entry-profile.rst @@ -116,7 +116,7 @@ CEC drivers ``cec-compliance`` .. [3] The ``v4l2-compliance`` also covers the media controller usage inside V4L2 drivers. -Other compilance tools are under development to check other parts of the +Other compliance tools are under development to check other parts of the subsystem. Those tests need to pass before the patches go upstream. diff --git a/Documentation/driver-api/nvdimm/nvdimm.rst b/Documentation/driver-api/nvdimm/nvdimm.rst index ca16b5acbf30..c205efa4d45b 100644 --- a/Documentation/driver-api/nvdimm/nvdimm.rst +++ b/Documentation/driver-api/nvdimm/nvdimm.rst @@ -535,12 +535,12 @@ internally with a static identifier:: char devname[50]; snprintf(devname, sizeof(devname), "namespace%d.%d", - ndctl_region_get_id(region), paramaters->id); + ndctl_region_get_id(region), parameters->id); ndctl_namespace_set_alt_name(ndns, devname); /* 'uuid' must be set prior to setting size! */ - ndctl_namespace_set_uuid(ndns, paramaters->uuid); - ndctl_namespace_set_size(ndns, paramaters->size); + ndctl_namespace_set_uuid(ndns, parameters->uuid); + ndctl_namespace_set_size(ndns, parameters->size); /* unlike pmem namespaces, blk namespaces have a sector size */ if (parameters->lbasize) ndctl_namespace_set_sector_size(ndns, parameters->lbasize); diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index d448cb57df86..8d86d5da4023 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst @@ -358,7 +358,7 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. is probed against the device in question by passing them to the :c:func:`dev_pm_set_driver_flags` helper function.] If the first of these flags is set, the PM core will not apply the direct-complete - procedure described above to the given device and, consequenty, to any + procedure described above to the given device and, consequently, to any of its ancestors. The second flag, when set, informs the middle layer code (bus types, device types, PM domains, classes) that it should take the return value of the ``->prepare`` callback provided by the driver diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt index 02febc883588..d937b7a03575 100644 --- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt +++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt @@ -20,7 +20,7 @@ | openrisc: | TODO | | parisc: | ok | | powerpc: | ok | - | riscv: | ok | + | riscv: | TODO | | s390: | ok | | sh: | TODO | | sparc: | TODO | diff --git a/Documentation/features/list-arch.sh b/Documentation/features/list-arch.sh index e73aa35848f0..ac8ff7f6f859 100755 --- a/Documentation/features/list-arch.sh +++ b/Documentation/features/list-arch.sh @@ -6,6 +6,6 @@ # (If no arguments are given then it will print the host architecture's status.) # -ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/')} +ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/')} $(dirname $0)/../../scripts/get_feat.pl list --arch $ARCH diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst index 2bbf68b56b0d..3078f3c9256a 100644 --- a/Documentation/filesystems/9p.rst +++ b/Documentation/filesystems/9p.rst @@ -90,7 +90,7 @@ Just start the 9pfs capable network server like diod/nfs-ganesha e.g.:: $ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD -Optionaly scan your bus if there are more then one usbg gadgets to find their path:: +Optionally scan your bus if there are more then one usbg gadgets to find their path:: $ python $kernel_dir/tools/usb/p9_fwd.py list diff --git a/Documentation/filesystems/bcachefs/SubmittingPatches.rst b/Documentation/filesystems/bcachefs/SubmittingPatches.rst index 026b12ae0d6a..4b79ca58faf2 100644 --- a/Documentation/filesystems/bcachefs/SubmittingPatches.rst +++ b/Documentation/filesystems/bcachefs/SubmittingPatches.rst @@ -30,7 +30,7 @@ CI: === Instead of running your tests locally, when running the full test suite it's -prefereable to let a server farm do it in parallel, and then have the results +preferable to let a server farm do it in parallel, and then have the results in a nice test dashboard (which can tell you which failures are new, and presents results in a git log view, avoiding the need for most bisecting). @@ -68,7 +68,7 @@ Other things to think about: land - use them. Use them judiciously, and not as a replacement for proper error handling, but use them. -- Does it need to be performance tested? Should we add new peformance counters? +- Does it need to be performance tested? Should we add new performance counters? bcachefs has a set of persistent runtime counters which can be viewed with the 'bcachefs fs top' command; this should give users a basic idea of what diff --git a/Documentation/filesystems/coda.rst b/Documentation/filesystems/coda.rst index bdde7e4e010b..0db3c83a50e5 100644 --- a/Documentation/filesystems/coda.rst +++ b/Documentation/filesystems/coda.rst @@ -141,7 +141,7 @@ kernel support. a process P which accessing a Coda file. It makes a system call which traps to the OS kernel. Examples of such calls trapping to the kernel are ``read``, ``write``, ``open``, ``close``, ``create``, ``mkdir``, - ``rmdir``, ``chmod`` in a Unix ontext. Similar calls exist in the Win32 + ``rmdir``, ``chmod`` in a Unix context. Similar calls exist in the Win32 environment, and are named ``CreateFile``. Generally the operating system handles the request in a virtual diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst index f7f977ffbf8d..610f718ef8b5 100644 --- a/Documentation/filesystems/debugfs.rst +++ b/Documentation/filesystems/debugfs.rst @@ -220,7 +220,7 @@ There are a couple of other directory-oriented helper functions:: A call to debugfs_change_name() will give a new name to an existing debugfs file, always in the same directory. The new_name must not exist prior -to the call; the return value is 0 on success and -E... on failuer. +to the call; the return value is 0 on success and -E... on failure. Symbolic links can be created with debugfs_create_symlink(). There is one important thing that all debugfs users must take into account: diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 73f0bfd7e903..3886c14f89f4 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -515,7 +515,7 @@ The methods defined in the table are: the cache to expand a request in either direction. This allows the cache to size the request appropriately for the cache granularity. - The function is passed poiners to the start and length in its parameters, + The function is passed pointers to the start and length in its parameters, plus the size of the file for reference, and adjusts the start and length appropriately. It should return one of: diff --git a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst index 6402ab8e370c..2a2705e975e8 100644 --- a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst +++ b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst @@ -219,7 +219,7 @@ The log is circular, so the positions in the log are defined by the combination of a cycle number - the number of times the log has been overwritten - and the offset into the log. A LSN carries the cycle in the upper 32 bits and the offset in the lower 32 bits. The offset is in units of "basic blocks" (512 -bytes). Hence we can do realtively simple LSN based math to keep track of +bytes). Hence we can do relatively simple LSN based math to keep track of available space in the log. Log space accounting is done via a pair of constructs called "grant heads". The diff --git a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst index 32b6ac4ca9d6..ce4584fb3103 100644 --- a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst +++ b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst @@ -93,7 +93,7 @@ others on a regular basis about burnout. sponsoring work on any part of XFS. - **LTS Maintainer**: Someone who backports and tests bug fixes from - uptream to the LTS kernels. + upstream to the LTS kernels. There tend to be six separate LTS trees at any given time. The maintainer for a given LTS release should identify themselves with an diff --git a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst index 12aa63840830..e231d127cd40 100644 --- a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst +++ b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst @@ -4521,8 +4521,8 @@ Both online and offline repair can use this strategy. | For this second effort, the ondisk parent pointer format as originally | | proposed was ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``. | | The format was changed during development to eliminate the requirement | -| of repair tools needing to to ensure that the ``dirent_pos`` field | -| always matched when reconstructing a directory. | +| of repair tools needing to ensure that the ``dirent_pos`` field always | +| matched when reconstructing a directory. | | | | There were a few other ways to have solved that problem: | | | diff --git a/Documentation/iio/iio_devbuf.rst b/Documentation/iio/iio_devbuf.rst index 9919e4792d0e..dca1f0200b0d 100644 --- a/Documentation/iio/iio_devbuf.rst +++ b/Documentation/iio/iio_devbuf.rst @@ -148,5 +148,5 @@ applied), however there are corner cases in which the buffered data may be found in a processed form. Please note that these corner cases are not addressed by this documentation. -Please see ``Documentation/ABI/testing/sysfs-bus-iio`` for a complete +Please see Documentation/ABI/testing/sysfs-bus-iio for a complete description of the attributes. diff --git a/Documentation/input/devices/elantech.rst b/Documentation/input/devices/elantech.rst index c3374a7ce7af..98163a258b83 100644 --- a/Documentation/input/devices/elantech.rst +++ b/Documentation/input/devices/elantech.rst @@ -556,7 +556,7 @@ Note on debounce: In case the box has unstable power supply or other electricity issues, or when number of finger changes, F/W would send "debounce packet" to inform driver that the hardware is in debounce status. -The debouce packet has the following signature:: +The debounce packet has the following signature:: byte 0: 0xc4 byte 1: 0xff diff --git a/Documentation/input/input-programming.rst b/Documentation/input/input-programming.rst index c9264814c7aa..2b3e6a34e34b 100644 --- a/Documentation/input/input-programming.rst +++ b/Documentation/input/input-programming.rst @@ -346,3 +346,22 @@ driver can handle these events, it has to set the respective bits in evbit, This callback routine can be called from an interrupt or a BH (although that isn't a rule), and thus must not sleep, and must not take too long to finish. + +Polled input devices +~~~~~~~~~~~~~~~~~~~~ + +Input polling is set up by passing an input device struct and a callback to +the function:: + + int input_setup_polling(struct input_dev *dev, + void (*poll_fn)(struct input_dev *dev)) + +Within the callback, devices should use the regular input_report_* functions +and input_sync as is used by other devices. + +There is also the function:: + + void input_set_poll_interval(struct input_dev *dev, unsigned int interval) + +which is used to configure the interval, in milliseconds, that the device will +be polled at. diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst index 8e1ceb0a6619..cc3cd46abd1b 100644 --- a/Documentation/mm/split_page_table_lock.rst +++ b/Documentation/mm/split_page_table_lock.rst @@ -4,7 +4,7 @@ Split page table lock Originally, mm->page_table_lock spinlock protected all page tables of the mm_struct. But this approach leads to poor page fault scalability of -multi-threaded applications due high contention on the lock. To improve +multi-threaded applications due to high contention on the lock. To improve scalability, split page table lock was introduced. With split page table lock we have separate per-table lock to serialize diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst index 75e017dfa825..518284e287b0 100644 --- a/Documentation/networking/statistics.rst +++ b/Documentation/networking/statistics.rst @@ -143,7 +143,7 @@ reading multiple stats as it internally performs a full dump of and reports only the stat corresponding to the accessed file. Sysfs files are documented in -`Documentation/ABI/testing/sysfs-class-net-statistics`. +Documentation/ABI/testing/sysfs-class-net-statistics. netlink diff --git a/Documentation/nvme/nvme-pci-endpoint-target.rst b/Documentation/nvme/nvme-pci-endpoint-target.rst index 66e7b7d869b4..b699595d1762 100644 --- a/Documentation/nvme/nvme-pci-endpoint-target.rst +++ b/Documentation/nvme/nvme-pci-endpoint-target.rst @@ -86,7 +86,7 @@ configurable through configfs before starting the controller. To avoid issues with excessive local memory usage for executing commands, MDTS defaults to 512 KB and is limited to a maximum of 2 MB (arbitrary limit). -Mimimum number of PCI Address Mapping Windows Required +Minimum number of PCI Address Mapping Windows Required ------------------------------------------------------ Most PCI endpoint controllers provide a limited number of mapping windows for diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst index dbb763a8de90..22fa925353cf 100644 --- a/Documentation/process/5.Posting.rst +++ b/Documentation/process/5.Posting.rst @@ -268,10 +268,15 @@ The tags in common use are: - Cc: the named person received a copy of the patch and had the opportunity to comment on it. -Be careful in the addition of tags to your patches, as only Cc: is appropriate -for addition without the explicit permission of the person named; using -Reported-by: is fine most of the time as well, but ask for permission if -the bug was reported in private. +Be careful in the addition of the aforementioned tags to your patches, as all +except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the +person named. For those three implicit permission is sufficient if the person +contributed to the Linux kernel using that name and email address according +to the lore archives or the commit history -- and in case of Reported-by: +and Suggested-by: did the reporting or suggestion in public. Note, +bugzilla.kernel.org is a public place in this sense, but email addresses +used there are private; so do not expose them in tags, unless the person +used them in earlier contributions. Sending the patch diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index a0beca805362..d564362773b5 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -58,11 +58,11 @@ mcelog 0.6 mcelog --version iptables 1.4.2 iptables -V openssl & libcrypto 1.0.0 openssl version bc 1.06.95 bc --version -Sphinx\ [#f1]_ 2.4.4 sphinx-build --version +Sphinx\ [#f1]_ 3.4.3 sphinx-build --version GNU tar 1.28 tar --version gtags (optional) 6.6.5 gtags --version mkimage (optional) 2017.01 mkimage --version -Python (optional) 3.5.x python3 --version +Python (optional) 3.9.x python3 --version GNU AWK (optional) 5.1.0 gawk --version ====================== =============== ======================================== diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst index 1d1150954be3..4cdef8360698 100644 --- a/Documentation/process/code-of-conduct-interpretation.rst +++ b/Documentation/process/code-of-conduct-interpretation.rst @@ -145,13 +145,16 @@ kernel community. Any decisions regarding enforcement recommendations will be brought to the TAB for implementation of enforcement with the relevant maintainers -if needed. A decision by the Code of Conduct Committee can be overturned -by the TAB by a two-thirds vote. +if needed. Once the TAB approves one or more of the measures outlined +in the scope of the ban by two-thirds of the members voting for the +measures, the Code of Conduct Committee will enforce the TAB approved +measures. Any Code of Conduct Committee members serving on the TAB will +not vote on the measures. At quarterly intervals, the Code of Conduct Committee and TAB will provide a report summarizing the anonymised reports that the Code of Conduct committee has received and their status, as well details of any -overridden decisions including complete and identifiable voting details. +TAB approved decisions including complete and identifiable voting details. Because how we interpret and enforce the Code of Conduct will evolve over time, this document will be updated when necessary to reflect any @@ -227,9 +230,11 @@ The scope of the ban for a period of time could include: such as mailing lists and social media sites Once the TAB approves one or more of the measures outlined in the scope of -the ban by a two-thirds vote, the Code of Conduct Committee will enforce -the TAB approved measure(s) in collaboration with the community, maintainers, -sub-maintainers, and kernel.org administrators. +the ban by two-thirds of the members voting for the measures, the Code of +Conduct Committee will enforce the TAB approved measure(s) in collaboration +with the community, maintainers, sub-maintainers, and kernel.org +administrators. Any Code of Conduct Committee members serving on the TAB +will not vote on the measures. The Code of Conduct Committee is mindful of the negative impact of seeking public apology and instituting ban could have on individuals. It is also diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst index 3b5b5983fea8..c67ac12cf789 100644 --- a/Documentation/process/kernel-docs.rst +++ b/Documentation/process/kernel-docs.rst @@ -75,6 +75,17 @@ On-line docs Published books --------------- + * Title: **The Linux Memory Manager** + + :Author: Lorenzo Stoakes + :Publisher: No Starch Press + :Date: February 2025 + :Pages: 1300 + :ISBN: 978-1718504462 + :Notes: Memory management. Full draft available as early access for + pre-order, full release scheduled for Fall 2025. See + https://nostarch.com/linux-memory-manager for further info. + * Title: **Practical Linux System Administration: A Guide to Installation, Configuration, and Management, 1st Edition** :Author: Kenneth Hess diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst index e531dd504b6c..beb7f94279fd 100644 --- a/Documentation/process/submit-checklist.rst +++ b/Documentation/process/submit-checklist.rst @@ -52,7 +52,8 @@ Provide documentation 4) All new module parameters are documented with ``MODULE_PARM_DESC()`` 5) All new userspace interfaces are documented in ``Documentation/ABI/``. - See ``Documentation/ABI/README`` for more information. + See Documentation/admin-guide/abi.rst (or ``Documentation/ABI/README``) + for more information. Patches that change userspace interfaces should be CCed to linux-api@vger.kernel.org. @@ -91,9 +92,12 @@ Build your code fix any issues. 2) Builds on multiple CPU architectures by using local cross-compile tools - or some other build farm. Note that ppc64 is a good architecture for - cross-compilation checking because it tends to use ``unsigned long`` for - 64-bit quantities. + or some other build farm. + Note that testing against architectures of different word sizes + (32- and 64-bit) and different endianness (big- and little-) is effective + in catching various portability issues due to false assumptions on + representable quantity range, data alignment, or endianness, among + others. 3) Newly-added code has been compiled with ``gcc -W`` (use ``make KCFLAGS=-W``). This will generate lots of noise, but is good diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index 8fdc0ef3e604..cede4e7b29af 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -495,10 +495,10 @@ list archives. A "# Suffix" may also be used in this case to clarify. If a person has had the opportunity to comment on a patch, but has not provided such comments, you may optionally add a ``Cc:`` tag to the patch. -This is the only tag which might be added without an explicit action by the -person it names - but it should indicate that this person was copied on the -patch. This tag documents that potentially interested parties -have been included in the discussion. +This tag documents that potentially interested parties have been included in +the discussion. Note, this is one of only three tags you might be able to use +without explicit permission of the person named (see 'Tagging people requires +permission' below for details). Co-developed-by: states that the patch was co-created by multiple developers; it is used to give attribution to co-authors (in addition to the author @@ -544,9 +544,9 @@ hopefully inspires them to help us again in the future. The tag is intended for bugs; please do not use it to credit feature requests. The tag should be followed by a Closes: tag pointing to the report, unless the report is not available on the web. The Link: tag can be used instead of Closes: if the patch -fixes a part of the issue(s) being reported. Please note that if the bug was -reported in private, then ask for permission first before using the Reported-by -tag. +fixes a part of the issue(s) being reported. Note, the Reported-by tag is one +of only three tags you might be able to use without explicit permission of the +person named (see 'Tagging people requires permission' below for details). A Tested-by: tag indicates that the patch has been successfully tested (in some environment) by the person named. This tag informs maintainers that @@ -596,11 +596,11 @@ Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned in the patch changelog (after the '---' separator). A Suggested-by: tag indicates that the patch idea is suggested by the person -named and ensures credit to the person for the idea. Please note that this -tag should not be added without the reporter's permission, especially if the -idea was not posted in a public forum. That said, if we diligently credit our -idea reporters, they will, hopefully, be inspired to help us again in the -future. +named and ensures credit to the person for the idea: if we diligently credit +our idea reporters, they will, hopefully, be inspired to help us again in the +future. Note, this is one of only three tags you might be able to use without +explicit permission of the person named (see 'Tagging people requires +permission' below for details). A Fixes: tag indicates that the patch fixes an issue in a previous commit. It is used to make it easy to determine where a bug originated, which can help @@ -618,6 +618,21 @@ Finally, while providing tags is welcome and typically very appreciated, please note that signers (i.e. submitters and maintainers) may use their discretion in applying offered tags. +.. _tagging_people: + +Tagging people requires permission +---------------------------------- + +Be careful in the addition of the aforementioned tags to your patches, as all +except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the +person named. For those three implicit permission is sufficient if the person +contributed to the Linux kernel using that name and email address according +to the lore archives or the commit history -- and in case of Reported-by: +and Suggested-by: did the reporting or suggestion in public. Note, +bugzilla.kernel.org is a public place in this sense, but email addresses +used there are private; so do not expose them in tags, unless the person +used them in earlier contributions. + .. _the_canonical_patch_format: The canonical patch format @@ -717,6 +732,12 @@ patch in the permanent changelog. If the ``from`` line is missing, then the ``From:`` line from the email header will be used to determine the patch author in the changelog. +The author may indicate their affiliation or the sponsor of the work +by adding the name of an organization to the ``from`` and ``SoB`` lines, +e.g.: + + From: Patch Author (Company) <author@example.com> + Explanation Body ^^^^^^^^^^^^^^^^ diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 41ed2ceafc92..e881a945c188 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -59,7 +59,7 @@ At the same time, we can say that the worst case deadline miss, will be \Sum e_i; that is, there is a bounded tardiness (under the assumption that x+e is indeed WCET). -The interferenece when using burst is valued by the possibilities for +The interference when using burst is valued by the possibilities for missing the deadline and the average WCET. Test results showed that when there many cgroups or CPU is under utilized, the interference is limited. More details are shown in: diff --git a/Documentation/sound/soc/machine.rst b/Documentation/sound/soc/machine.rst index 9db132bc0070..1828f5edca3e 100644 --- a/Documentation/sound/soc/machine.rst +++ b/Documentation/sound/soc/machine.rst @@ -75,7 +75,7 @@ In the above struct, dai’s are registered using names but you can pass either dai name or device tree node but not both. Also, names used here for cpu/codec/platform dais should be globally unique. -Additionaly below example macro can be used to register cpu, codec and +Additionally below example macro can be used to register cpu, codec and platform dai:: SND_SOC_DAILINK_DEFS(wm2200_cpu_dsp, diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py index a413f8dd5115..ecf54d22e9dc 100644 --- a/Documentation/sphinx/automarkup.py +++ b/Documentation/sphinx/automarkup.py @@ -11,13 +11,7 @@ from sphinx.errors import NoUri import re from itertools import chain -# -# Python 2 lacks re.ASCII... -# -try: - ascii_p3 = re.ASCII -except AttributeError: - ascii_p3 = 0 +from kernel_abi import get_kernel_abi # # Regex nastiness. Of course. @@ -26,28 +20,30 @@ except AttributeError: # :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last # bit tries to restrict matches to things that won't create trouble. # -RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3) +RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII) # # Sphinx 2 uses the same :c:type role for struct, union, enum and typedef # RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)', - flags=ascii_p3) + flags=re.ASCII) # # Sphinx 3 uses a different C role for each one of struct, union, enum and # typedef # -RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3) -RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3) -RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3) -RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3) +RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII) # # Detects a reference to a documentation page of the form Documentation/... with # an optional extension # RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)') +RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)') +RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)') RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$') @@ -83,11 +79,10 @@ def markup_refs(docname, app, node): # # Associate each regex with the function that will markup its matches # - markup_func_sphinx2 = {RE_doc: markup_doc_ref, - RE_function: markup_c_ref, - RE_generic_type: markup_c_ref} - markup_func_sphinx3 = {RE_doc: markup_doc_ref, + markup_func = {RE_doc: markup_doc_ref, + RE_abi_file: markup_abi_file_ref, + RE_abi_symbol: markup_abi_ref, RE_function: markup_func_ref_sphinx3, RE_struct: markup_c_ref, RE_union: markup_c_ref, @@ -95,11 +90,6 @@ def markup_refs(docname, app, node): RE_typedef: markup_c_ref, RE_git: markup_git} - if sphinx.version_info[0] >= 3: - markup_func = markup_func_sphinx3 - else: - markup_func = markup_func_sphinx2 - match_iterators = [regex.finditer(t) for regex in markup_func] # # Sort all references by the starting position in text @@ -270,6 +260,54 @@ def markup_doc_ref(docname, app, match): else: return nodes.Text(match.group(0)) +# +# Try to replace a documentation reference for ABI symbols and files +# with a cross reference to that page +# +def markup_abi_ref(docname, app, match, warning=False): + stddom = app.env.domains['std'] + # + # Go through the dance of getting an xref out of the std domain + # + kernel_abi = get_kernel_abi() + + fname = match.group(1) + target = kernel_abi.xref(fname) + + # Kernel ABI doesn't describe such file or symbol + if not target: + if warning: + kernel_abi.log.warning("%s not found", fname) + return nodes.Text(match.group(0)) + + pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'ref', + reftarget = target, modname = None, + classname = None, refexplicit = False) + + # + # XXX The Latex builder will throw NoUri exceptions here, + # work around that by ignoring them. + # + try: + xref = stddom.resolve_xref(app.env, docname, app.builder, 'ref', + target, pxref, None) + except NoUri: + xref = None + # + # Return the xref if we got it; otherwise just return the plain text. + # + if xref: + return xref + else: + return nodes.Text(match.group(0)) + +# +# Variant of markup_abi_ref() that warns whan a reference is not found +# +def markup_abi_file_ref(docname, app, match): + return markup_abi_ref(docname, app, match, warning=True) + + def get_c_namespace(app, docname): source = app.env.doc2path(docname) with open(source) as f: diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py index e6959af25402..e8ea80d4324c 100644 --- a/Documentation/sphinx/cdomain.py +++ b/Documentation/sphinx/cdomain.py @@ -1,6 +1,6 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=W0141,C0113,C0103,C0325 -u""" +""" cdomain ~~~~~~~ @@ -45,9 +45,6 @@ import re __version__ = '1.1' -# Get Sphinx version -major, minor, patch = sphinx.version_info[:3] - # Namespace to be prepended to the full name namespace = None @@ -145,7 +142,7 @@ class CObject(Base_CObject): } def handle_func_like_macro(self, sig, signode): - u"""Handles signatures of function-like macros. + """Handles signatures of function-like macros. If the objtype is 'function' and the signature ``sig`` is a function-like macro, the name of the macro is returned. Otherwise diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py index 5911bd0d7965..db6f0380de94 100644 --- a/Documentation/sphinx/kernel_abi.py +++ b/Documentation/sphinx/kernel_abi.py @@ -2,7 +2,7 @@ # coding=utf-8 # SPDX-License-Identifier: GPL-2.0 # -u""" +""" kernel-abi ~~~~~~~~~~ @@ -14,7 +14,7 @@ u""" :license: GPL Version 2, June 1991 see Linux/COPYING for details. The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the - scripts/get_abi.pl script to parse the Kernel ABI files. + scripts/get_abi.py script to parse the Kernel ABI files. Overview of directive's argument and options. @@ -32,107 +32,137 @@ u""" """ -import codecs import os -import subprocess -import sys import re -import kernellog +import sys from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive -from docutils.utils.error_reporting import ErrorString from sphinx.util.docutils import switch_source_input +from sphinx.util import logging + +srctree = os.path.abspath(os.environ["srctree"]) +sys.path.insert(0, os.path.join(srctree, "scripts/lib/abi")) + +from abi_parser import AbiParser + +__version__ = "1.0" + +logger = logging.getLogger('kernel_abi') +path = os.path.join(srctree, "Documentation/ABI") -__version__ = '1.0' +_kernel_abi = None + +def get_kernel_abi(): + """ + Initialize kernel_abi global var, if not initialized yet. + + This is needed to avoid warnings during Sphinx module initialization. + """ + global _kernel_abi + + if not _kernel_abi: + # Parse ABI symbols only once + _kernel_abi = AbiParser(path, logger=logger) + _kernel_abi.parse_abi() + _kernel_abi.check_issues() + + return _kernel_abi def setup(app): app.add_directive("kernel-abi", KernelCmd) - return dict( - version = __version__ - , parallel_read_safe = True - , parallel_write_safe = True - ) + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True + } -class KernelCmd(Directive): - u"""KernelABI (``kernel-abi``) directive""" +class KernelCmd(Directive): + """KernelABI (``kernel-abi``) directive""" required_arguments = 1 - optional_arguments = 2 + optional_arguments = 3 has_content = False final_argument_whitespace = True + parser = None option_spec = { - "debug" : directives.flag, - "rst" : directives.unchanged + "debug": directives.flag, + "no-symbols": directives.flag, + "no-files": directives.flag, } def run(self): + kernel_abi = get_kernel_abi() + doc = self.state.document if not doc.settings.file_insertion_enabled: raise self.warning("docutils: file insertion disabled") - srctree = os.path.abspath(os.environ["srctree"]) - - args = [ - os.path.join(srctree, 'scripts/get_abi.pl'), - 'rest', - '--enable-lineno', - '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]), - ] - - if 'rst' in self.options: - args.append('--rst-source') - - lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8') - nodeList = self.nestedParse(lines, self.arguments[0]) - return nodeList - - def nestedParse(self, lines, fname): env = self.state.document.settings.env content = ViewList() node = nodes.section() - if "debug" in self.options: - code_block = "\n\n.. code-block:: rst\n :linenos:\n" - for l in lines.split("\n"): - code_block += "\n " + l - lines = code_block + "\n\n" + abi_type = self.arguments[0] - line_regex = re.compile(r"^\.\. LINENO (\S+)\#([0-9]+)$") - ln = 0 - n = 0 - f = fname + if "no-symbols" in self.options: + show_symbols = False + else: + show_symbols = True - for line in lines.split("\n"): - n = n + 1 - match = line_regex.search(line) - if match: - new_f = match.group(1) + if "no-files" in self.options: + show_file = False + else: + show_file = True - # Sphinx parser is lazy: it stops parsing contents in the - # middle, if it is too big. So, handle it per input file - if new_f != f and content: - self.do_parse(content, node) - content = ViewList() + tab_width = self.options.get('tab-width', + self.state.document.settings.tab_width) - # Add the file to Sphinx build dependencies - env.note_dependency(os.path.abspath(f)) - - f = new_f - - # sphinx counts lines from 0 - ln = int(match.group(2)) - 1 + old_f = None + n = 0 + n_sym = 0 + for msg, f, ln in kernel_abi.doc(show_file=show_file, + show_symbols=show_symbols, + filter_path=abi_type): + n_sym += 1 + msg_list = statemachine.string2lines(msg, tab_width, + convert_whitespace=True) + if "debug" in self.options: + lines = [ + "", "", ".. code-block:: rst", + " :linenos:", "" + ] + for m in msg_list: + lines.append(" " + m) else: - content.append(line, f, ln) - - kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n)) + lines = msg_list - if content: - self.do_parse(content, node) + for line in lines: + # sphinx counts lines from 0 + content.append(line, f, ln - 1) + n += 1 + + if f != old_f: + # Add the file to Sphinx build dependencies + env.note_dependency(os.path.abspath(f)) + + old_f = f + + # Sphinx doesn't like to parse big messages. So, let's + # add content symbol by symbol + if content: + self.do_parse(content, node) + content = ViewList() + + if show_symbols and not show_file: + logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n)) + elif not show_symbols and show_file: + logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n)) + else: + logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n)) return node.children diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py index 03ace5f01b5c..e3a51867f27b 100644 --- a/Documentation/sphinx/kernel_feat.py +++ b/Documentation/sphinx/kernel_feat.py @@ -1,7 +1,7 @@ # coding=utf-8 # SPDX-License-Identifier: GPL-2.0 # -u""" +""" kernel-feat ~~~~~~~~~~~ @@ -56,7 +56,7 @@ def setup(app): class KernelFeat(Directive): - u"""KernelFeat (``kernel-feat``) directive""" + """KernelFeat (``kernel-feat``) directive""" required_arguments = 1 optional_arguments = 2 diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py index 638762442336..8db176045bc5 100755 --- a/Documentation/sphinx/kernel_include.py +++ b/Documentation/sphinx/kernel_include.py @@ -2,7 +2,7 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=R0903, C0330, R0914, R0912, E0401 -u""" +""" kernel-include ~~~~~~~~~~~~~~ @@ -56,7 +56,7 @@ def setup(app): class KernelInclude(Include): # ============================================================================== - u"""KernelInclude (``kernel-include``) directive""" + """KernelInclude (``kernel-include``) directive""" def run(self): env = self.state.document.settings.env diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index ec1ddfff1863..39ddae6ae7dd 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -39,7 +39,7 @@ from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive import sphinx from sphinx.util.docutils import switch_source_input -import kernellog +from sphinx.util import logging __version__ = '1.0' @@ -56,16 +56,12 @@ class KernelDocDirective(Directive): 'functions': directives.unchanged, } has_content = False + logger = logging.getLogger('kerneldoc') def run(self): env = self.state.document.settings.env cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno'] - # Pass the version string to kernel-doc, as it needs to use a different - # dialect, depending what the C domain supports for each specific - # Sphinx versions - cmd += ['-sphinx-version', sphinx.__version__] - filename = env.config.kerneldoc_srctree + '/' + self.arguments[0] export_file_patterns = [] @@ -109,8 +105,7 @@ class KernelDocDirective(Directive): cmd += [filename] try: - kernellog.verbose(env.app, - 'calling kernel-doc \'%s\'' % (" ".join(cmd))) + self.logger.verbose("calling kernel-doc '%s'" % (" ".join(cmd))) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() @@ -120,8 +115,8 @@ class KernelDocDirective(Directive): if p.returncode != 0: sys.stderr.write(err) - kernellog.warn(env.app, - 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) + self.logger.warning("kernel-doc '%s' failed with return code %d" + % (" ".join(cmd), p.returncode)) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] elif env.config.kerneldoc_verbosity > 0: sys.stderr.write(err) @@ -148,8 +143,8 @@ class KernelDocDirective(Directive): return node.children except Exception as e: # pylint: disable=W0703 - kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' % - (" ".join(cmd), str(e))) + self.logger.warning("kernel-doc '%s' processing failed with: %s" % + (" ".join(cmd), str(e))) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] def do_parse(self, result, node): diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py deleted file mode 100644 index 0bc00c138cad..000000000000 --- a/Documentation/sphinx/kernellog.py +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Sphinx has deprecated its older logging interface, but the replacement -# only goes back to 1.6. So here's a wrapper layer to keep around for -# as long as we support 1.4. -# -# We don't support 1.4 anymore, but we'll keep the wrappers around until -# we change all the code to not use them anymore :) -# -import sphinx -from sphinx.util import logging - -logger = logging.getLogger('kerneldoc') - -def warn(app, message): - logger.warning(message) - -def verbose(app, message): - logger.verbose(message) - -def info(app, message): - logger.info(message) diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index 97166333b727..f1a7f13c9c60 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -1,6 +1,6 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=C0103, R0903, R0912, R0915 -u""" +""" scalable figure and image handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -59,12 +59,14 @@ from docutils.parsers.rst import directives from docutils.parsers.rst.directives import images import sphinx from sphinx.util.nodes import clean_astext -import kernellog +from sphinx.util import logging Figure = images.Figure __version__ = '1.0.0' +logger = logging.getLogger('kfigure') + # simple helper # ------------- @@ -163,14 +165,14 @@ def setup(app): def setupTools(app): - u""" + """ Check available build tools and log some *verbose* messages. This function is called once, when the builder is initiated. """ global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603 global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603 - kernellog.verbose(app, "kfigure: check installed tools ...") + logger.verbose("kfigure: check installed tools ...") dot_cmd = which('dot') convert_cmd = which('convert') @@ -178,7 +180,7 @@ def setupTools(app): inkscape_cmd = which('inkscape') if dot_cmd: - kernellog.verbose(app, "use dot(1) from: " + dot_cmd) + logger.verbose("use dot(1) from: " + dot_cmd) try: dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'], @@ -190,10 +192,11 @@ def setupTools(app): dot_Tpdf_ptn = b'pdf' dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list) else: - kernellog.warn(app, "dot(1) not found, for better output quality install " - "graphviz from https://www.graphviz.org") + logger.warning( + "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org" + ) if inkscape_cmd: - kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd) + logger.verbose("use inkscape(1) from: " + inkscape_cmd) inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'], stderr=subprocess.DEVNULL) ver_one_ptn = b'Inkscape 1' @@ -204,26 +207,27 @@ def setupTools(app): else: if convert_cmd: - kernellog.verbose(app, "use convert(1) from: " + convert_cmd) + logger.verbose("use convert(1) from: " + convert_cmd) else: - kernellog.verbose(app, + logger.verbose( "Neither inkscape(1) nor convert(1) found.\n" - "For SVG to PDF conversion, " - "install either Inkscape (https://inkscape.org/) (preferred) or\n" - "ImageMagick (https://www.imagemagick.org)") + "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n" + "ImageMagick (https://www.imagemagick.org)" + ) if rsvg_convert_cmd: - kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd) - kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion") + logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd) + logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion") dot_Tpdf = False else: - kernellog.verbose(app, + logger.verbose( "rsvg-convert(1) not found.\n" - " SVG rendering of convert(1) is done by ImageMagick-native renderer.") + " SVG rendering of convert(1) is done by ImageMagick-native renderer." + ) if dot_Tpdf: - kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion") + logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion") else: - kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion") + logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion") # integrate conversion tools @@ -257,13 +261,12 @@ def convert_image(img_node, translator, src_fname=None): # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages - kernellog.verbose(app, 'assert best format for: ' + img_node['uri']) + logger.verbose('assert best format for: ' + img_node['uri']) if in_ext == '.dot': if not dot_cmd: - kernellog.verbose(app, - "dot from graphviz not available / include DOT raw.") + logger.verbose("dot from graphviz not available / include DOT raw.") img_node.replace_self(file2literal(src_fname)) elif translator.builder.format == 'latex': @@ -290,10 +293,11 @@ def convert_image(img_node, translator, src_fname=None): if translator.builder.format == 'latex': if not inkscape_cmd and convert_cmd is None: - kernellog.warn(app, - "no SVG to PDF conversion available / include SVG raw." - "\nIncluding large raw SVGs can cause xelatex error." - "\nInstall Inkscape (preferred) or ImageMagick.") + logger.warning( + "no SVG to PDF conversion available / include SVG raw.\n" + "Including large raw SVGs can cause xelatex error.\n" + "Install Inkscape (preferred) or ImageMagick." + ) img_node.replace_self(file2literal(src_fname)) else: dst_fname = path.join(translator.builder.outdir, fname + '.pdf') @@ -306,15 +310,14 @@ def convert_image(img_node, translator, src_fname=None): _name = dst_fname[len(str(translator.builder.outdir)) + 1:] if isNewer(dst_fname, src_fname): - kernellog.verbose(app, - "convert: {out}/%s already exists and is newer" % _name) + logger.verbose("convert: {out}/%s already exists and is newer" % _name) else: ok = False mkdir(path.dirname(dst_fname)) if in_ext == '.dot': - kernellog.verbose(app, 'convert DOT to: {out}/' + _name) + logger.verbose('convert DOT to: {out}/' + _name) if translator.builder.format == 'latex' and not dot_Tpdf: svg_fname = path.join(translator.builder.outdir, fname + '.svg') ok1 = dot2format(app, src_fname, svg_fname) @@ -325,7 +328,7 @@ def convert_image(img_node, translator, src_fname=None): ok = dot2format(app, src_fname, dst_fname) elif in_ext == '.svg': - kernellog.verbose(app, 'convert SVG to: {out}/' + _name) + logger.verbose('convert SVG to: {out}/' + _name) ok = svg2pdf(app, src_fname, dst_fname) if not ok: @@ -354,7 +357,7 @@ def dot2format(app, dot_fname, out_fname): with open(out_fname, "w") as out: exit_code = subprocess.call(cmd, stdout = out) if exit_code != 0: - kernellog.warn(app, + logger.warning( "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) @@ -388,13 +391,14 @@ def svg2pdf(app, svg_fname, pdf_fname): pass if exit_code != 0: - kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + logger.warning("Error #%d when calling: %s" % + (exit_code, " ".join(cmd))) if warning_msg: - kernellog.warn(app, "Warning msg from %s: %s" - % (cmd_name, str(warning_msg, 'utf-8'))) + logger.warning( "Warning msg from %s: %s" % + (cmd_name, str(warning_msg, 'utf-8'))) elif warning_msg: - kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s" - % (cmd_name, str(warning_msg, 'utf-8'))) + logger.verbose("Warning msg from %s (likely harmless):\n%s" % + (cmd_name, str(warning_msg, 'utf-8'))) return bool(exit_code == 0) @@ -418,7 +422,8 @@ def svg2pdf_by_rsvg(app, svg_fname, pdf_fname): # use stdout and stderr from parent exit_code = subprocess.call(cmd) if exit_code != 0: - kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + logger.warning("Error #%d when calling: %s" % + (exit_code, " ".join(cmd))) ok = bool(exit_code == 0) return ok @@ -440,7 +445,7 @@ class kernel_image(nodes.image): pass class KernelImage(images.Image): - u"""KernelImage directive + """KernelImage directive Earns everything from ``.. image::`` directive, except *remote URI* and *glob* pattern. The KernelImage wraps a image node into a @@ -476,7 +481,7 @@ class kernel_figure(nodes.figure): """Node for ``kernel-figure`` directive.""" class KernelFigure(Figure): - u"""KernelImage directive + """KernelImage directive Earns everything from ``.. figure::`` directive, except *remote URI* and *glob* pattern. The KernelFigure wraps a figure node into a kernel_figure @@ -513,15 +518,15 @@ def visit_kernel_render(self, node): app = self.builder.app srclang = node.get('srclang') - kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang)) + logger.verbose('visit kernel-render node lang: "%s"' % srclang) tmp_ext = RENDER_MARKUP_EXT.get(srclang, None) if tmp_ext is None: - kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang)) + logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang) return if not dot_cmd and tmp_ext == '.dot': - kernellog.verbose(app, "dot from graphviz not available / include raw.") + logger.verbose("dot from graphviz not available / include raw.") return literal_block = node[0] @@ -552,7 +557,7 @@ class kernel_render(nodes.General, nodes.Inline, nodes.Element): pass class KernelRender(Figure): - u"""KernelRender directive + """KernelRender directive Render content by external tool. Has all the options known from the *figure* directive, plus option ``caption``. If ``caption`` has a diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py index 8b416bfd75ac..ec50e1ee5223 100644 --- a/Documentation/sphinx/load_config.py +++ b/Documentation/sphinx/load_config.py @@ -9,7 +9,7 @@ from sphinx.util.osutil import fs_encoding def loadConfig(namespace): # ------------------------------------------------------------------------------ - u"""Load an additional configuration file into *namespace*. + """Load an additional configuration file into *namespace*. The name of the configuration file is taken from the environment ``SPHINX_CONF``. The external configuration file extends (or overwrites) the diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py index dcad0fff4723..d31cff867436 100755 --- a/Documentation/sphinx/maintainers_include.py +++ b/Documentation/sphinx/maintainers_include.py @@ -3,7 +3,7 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=R0903, C0330, R0914, R0912, E0401 -u""" +""" maintainers-include ~~~~~~~~~~~~~~~~~~~ @@ -37,7 +37,7 @@ def setup(app): ) class MaintainersInclude(Include): - u"""MaintainersInclude (``maintainers-include``) directive""" + """MaintainersInclude (``maintainers-include``) directive""" required_arguments = 0 def parse_maintainers(self, path): diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py index 16bea0632555..180fbb50c337 100755 --- a/Documentation/sphinx/rstFlatTable.py +++ b/Documentation/sphinx/rstFlatTable.py @@ -2,7 +2,7 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=C0330, R0903, R0912 -u""" +""" flat-table ~~~~~~~~~~ @@ -99,7 +99,7 @@ class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321 class FlatTable(Table): # ============================================================================== - u"""FlatTable (``flat-table``) directive""" + """FlatTable (``flat-table``) directive""" option_spec = { 'name': directives.unchanged @@ -135,7 +135,7 @@ class FlatTable(Table): class ListTableBuilder(object): # ============================================================================== - u"""Builds a table from a double-stage list""" + """Builds a table from a double-stage list""" def __init__(self, directive): self.directive = directive @@ -212,7 +212,7 @@ class ListTableBuilder(object): raise SystemMessagePropagation(error) def parseFlatTableNode(self, node): - u"""parses the node from a :py:class:`FlatTable` directive's body""" + """parses the node from a :py:class:`FlatTable` directive's body""" if len(node) != 1 or not isinstance(node[0], nodes.bullet_list): self.raiseError( @@ -225,7 +225,7 @@ class ListTableBuilder(object): self.roundOffTableDefinition() def roundOffTableDefinition(self): - u"""Round off the table definition. + """Round off the table definition. This method rounds off the table definition in :py:member:`rows`. diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py index aa9cc7abd5c2..f5609b16f589 100644 --- a/Documentation/trace/postprocess/decode_msr.py +++ b/Documentation/trace/postprocess/decode_msr.py @@ -32,6 +32,6 @@ for j in sys.stdin: break if r: j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")") - print j, + print(j) diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst index 692be4af9c9b..5bf1b4adebc1 100644 --- a/Documentation/translations/it_IT/process/submit-checklist.rst +++ b/Documentation/translations/it_IT/process/submit-checklist.rst @@ -58,9 +58,10 @@ Fornite documentazione 4) Tutti i nuovi parametri dei moduli sono documentati con ``MODULE_PARM_DESC()``. 5) Tutte le nuove interfacce verso lo spazio utente sono documentate in - ``Documentation/ABI/``. Leggete ``Documentation/ABI/README`` per maggiori - informazioni. Le patch che modificano le interfacce utente dovrebbero - essere inviate in copia anche a linux-api@vger.kernel.org. + ``Documentation/ABI/``. Leggete Documentation/admin-guide/abi.rst + (o ``Documentation/ABI/README``) per maggiori informazioni. + Le patch che modificano le interfacce utente dovrebbero essere inviate + in copia anche a linux-api@vger.kernel.org. 6) Se la patch aggiunge nuove chiamate ioctl, allora aggiornate ``Documentation/userspace-api/ioctl/ioctl-number.rst``. diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist deleted file mode 100644 index 1759c6b452d6..000000000000 --- a/Documentation/translations/ja_JP/SubmitChecklist +++ /dev/null @@ -1,105 +0,0 @@ -NOTE: -This is a version of Documentation/process/submit-checklist.rst into Japanese. -This document is maintained by Takenori Nagano <t-nagano@ah.jp.nec.com> -and the JF Project team <http://www.linux.or.jp/JF/>. -If you find any difference between this document and the original file -or a problem with the translation, -please contact the maintainer of this file or JF project. - -Please also note that the purpose of this file is to be easier to read -for non English (read: Japanese) speakers and is not intended as a -fork. So if you have any comments or updates of this file, please try -to update the original English file first. - -Last Updated: 2008/07/14 -================================== -これは、 -linux-2.6.26/Documentation/process/submit-checklist.rst の和訳です。 - -翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > -翻訳日: 2008/07/14 -翻訳者: Takenori Nagano <t-nagano at ah dot jp dot nec dot com> -校正者: Masanori Kobayashi さん <zap03216 at nifty dot ne dot jp> -================================== - - -Linux カーネルパッチ投稿者向けチェックリスト -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -本書では、パッチをより素早く取り込んでもらいたい開発者が実践すべき基本的な事柄 -をいくつか紹介します。ここにある全ての事柄は、Documentation/process/submitting-patches.rst -などのLinuxカーネルパッチ投稿に際しての心得を補足するものです。 - - 1: 妥当なCONFIGオプションや変更されたCONFIGオプション、つまり =y, =m, =n - 全てで正しくビルドできることを確認してください。その際、gcc及びリンカが - warningやerrorを出していないことも確認してください。 - - 2: allnoconfig, allmodconfig オプションを用いて正しくビルドできることを - 確認してください。 - - 3: 手許のクロスコンパイルツールやOSDLのPLMのようなものを用いて、複数の - アーキテクチャにおいても正しくビルドできることを確認してください。 - - 4: 64bit長の'unsigned long'を使用しているppc64は、クロスコンパイルでの - チェックに適当なアーキテクチャです。 - - 5: カーネルコーディングスタイルに準拠しているかどうか確認してください(!) - - 6: CONFIGオプションの追加・変更をした場合には、CONFIGメニューが壊れていない - ことを確認してください。 - - 7: 新しくKconfigのオプションを追加する際には、必ずそのhelpも記述してください。 - - 8: 適切なKconfigの依存関係を考えながら慎重にチェックしてください。 - ただし、この作業はマシンを使ったテストできちんと行うのがとても困難です。 - うまくやるには、自分の頭で考えることです。 - - 9: sparseを利用してちゃんとしたコードチェックをしてください。 - -10: 'make checkstack' を利用し、問題が発見されたら修正してください。 - 'make checkstack' は明示的に問題を示しませんが、どれか - 1つの関数が512バイトより大きいスタックを使っていれば、修正すべき候補と - なります。 - -11: グローバルなkernel API を説明する kernel-doc をソースの中に含めてください。 - ( staticな関数においては必須ではありませんが、含めてもらっても結構です ) - そして、'make htmldocs' もしくは 'make mandocs' を利用して追記した - ドキュメントのチェックを行い、問題が見つかった場合には修正を行ってください。 - -12: CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT, CONFIG_DEBUG_SLAB, - CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES, CONFIG_DEBUG_SPINLOCK, - CONFIG_DEBUG_ATOMIC_SLEEP これら全てを同時に有効にして動作確認を - 行ってください。 - -13: CONFIG_SMP, CONFIG_PREEMPT を有効にした場合と無効にした場合の両方で - ビルドした上、動作確認を行ってください。 - -14: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。 - -15: /proc に新しいエントリを追加した場合には、Documentation/ 配下に - 必ずドキュメントを追加してください。 - -16: 新しいブートパラメータを追加した場合には、 - 必ずDocumentation/admin-guide/kernel-parameters.rst に説明を追加してください。 - -17: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を - 利用して必ずその説明を記述してください。 - -18: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に - Documentation/ABI/README を参考にして必ずドキュメントを追加してください。 - -19: 少なくともslabアロケーションとpageアロケーションに失敗した場合の - 挙動について、fault-injectionを利用して確認してください。 - Documentation/fault-injection/ を参照してください。 - - 追加したコードがかなりの量であったならば、サブシステム特有の - fault-injectionを追加したほうが良いかもしれません。 - -20: 新たに追加したコードは、`gcc -W'でコンパイルしてください。 - このオプションは大量の不要なメッセージを出力しますが、 - "warning: comparison between signed and unsigned" のようなメッセージは、 - バグを見つけるのに役に立ちます。 - -21: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや - VM, VFS およびその他のサブシステムに関する様々な変更と、現時点でも共存 - できることを確認するテストを行ってください。 diff --git a/Documentation/translations/ja_JP/disclaimer-ja_JP.rst b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst new file mode 100644 index 000000000000..46a026000407 --- /dev/null +++ b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst @@ -0,0 +1,24 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. _translations_ja_JP_disclaimer: + +========================== +免責条項 (Disclaimer) 抄訳 +========================== + +.. note:: 【訳註】 + この文書は、 + :ref:`Disclaimer (英語版) <translations_disclaimer>` + の一部を翻訳したものです。全文は英語版を参照願います。 + +Documentation/translations/ja_JP/ 以下のファイルは、対応する +Documentation/ 以下のファイル (原文) の日本語訳です。 +翻訳と原文との違いや翻訳上の問題を見つけたら、 +MAINTAINERS に記載の維持管理者に知らせてください。 +翻訳が原文の更新に追いついていない場合は、それを日本語版に反映するパッチの +投稿も歓迎です。 + +なお、この翻訳の目的は非英語 (ここでは日本語) 話者への便宜提供であり、 +フォークを意図したものではない事を念頭においてください。したがって、 +このファイルの内容に対するコメントや更新すべきことがあれば、先に原文の +更新を検討してください。 diff --git a/Documentation/translations/ja_JP/index.rst b/Documentation/translations/ja_JP/index.rst index 0b476b429e3b..4159b417bfdd 100644 --- a/Documentation/translations/ja_JP/index.rst +++ b/Documentation/translations/ja_JP/index.rst @@ -11,7 +11,9 @@ .. toctree:: :maxdepth: 1 + disclaimer-ja_JP process/howto + process/submit-checklist .. raw:: latex diff --git a/Documentation/translations/ja_JP/process/howto.rst b/Documentation/translations/ja_JP/process/howto.rst index d9ba40588e46..5e307f90982c 100644 --- a/Documentation/translations/ja_JP/process/howto.rst +++ b/Documentation/translations/ja_JP/process/howto.rst @@ -1,35 +1,18 @@ -.. raw:: latex +.. SPDX-License-Identifier: GPL-2.0 - \kerneldocCJKoff - -NOTE: -This is a version of Documentation/process/howto.rst translated into Japanese. -This document is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com> -If you find any difference between this document and the original file or -a problem with the translation, please contact the maintainer of this file. - -Please also note that the purpose of this file is to be easier to -read for non English (read: Japanese) speakers and is not intended as -a fork. So if you have any comments or updates for this file, please -try to update the original English file first. - ----------------------------------- - -.. raw:: latex - - \kerneldocCJKon - -この文書は、 -Documentation/process/howto.rst -の和訳です。 - -翻訳者: Tsugikazu Shibata <tshibata@ab.jp.nec.com> - ----------------------------------- +.. Originally contributed by Tsugikazu Shibata Linux カーネル開発のやり方 ========================== +.. note:: 【訳註】 + この文書は、 + Documentation/process/howto.rst + の翻訳です。 + 免責条項については、 + :ref:`免責条項の抄訳 <translations_ja_JP_disclaimer>` および、 + :ref:`Disclaimer (英語版) <translations_disclaimer>` を参照してください。 + これは上のトピック( Linux カーネル開発のやり方)の重要な事柄を網羅した ドキュメントです。ここには Linux カーネル開発者になるための方法とLinux カーネル開発コミュニティと共に活動するやり方を学ぶ方法が含まれています。 diff --git a/Documentation/translations/ja_JP/process/submit-checklist.rst b/Documentation/translations/ja_JP/process/submit-checklist.rst new file mode 100644 index 000000000000..fb3b9e3bd8ee --- /dev/null +++ b/Documentation/translations/ja_JP/process/submit-checklist.rst @@ -0,0 +1,163 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. Translated by Akira Yokosawa <akiyks@gmail.com> + +.. An old translation of this document of a different origin was at + Documentation/translations/ja_JP/SubmitChecklist, which can be found + in the pre-v6.14 tree if you are interested. + Please note that this translation is independent of the previous one. + +====================================== +Linux カーネルパッチ投稿チェックリスト +====================================== + +.. note:: 【訳註】 + この文書は、 + Documentation/process/submit-checklist.rst + の翻訳です。 + 免責条項については、 + :ref:`免責条項の抄訳 <translations_ja_JP_disclaimer>` および、 + :ref:`Disclaimer (英語版) <translations_disclaimer>` を参照してください。 + +以下は、カーネルパッチの投稿時に、そのスムーズな受け入れのために心がける +べき基本的な事項です。 + +これは、 Documentation/process/submitting-patches.rst およびその他の +Linux カーネルパッチ投稿に関する文書を踏まえ、それを補足するものです。 + +.. note:: 【訳註】 + 可能な項目については、パッチもしくはパッチ内の更新を暗黙の主語として、 + その望ましい状態を表す文体とします。その他、原義を損なわない範囲で + 係り結びを調整するなど、簡潔で把握しやすい箇条書きを目指します。 + + +コードのレビュー +================ + +1) 利用する機能について、その機能を定義・宣言しているファイルを + ``#include`` している。 + 他のヘッダーファイル経由での取り込みに依存しない。 + +2) Documentation/process/coding-style.rst に詳述されている一般的なスタイル + についてチェック済み。 + +3) メモリバリアー (例, ``barrier()``, ``rmb()``, ``wmb()``) について、 + そのすべてに、作用と目的、及び必要理由についての説明がソースコード内の + コメントとして記述されている。 + + +Kconfig 変更のレビュー +====================== + +1) 新規の、もしくは変更された ``CONFIG`` オプションについて、それが関係する + コンフィグメニューへの悪影響がない。また、 + Documentation/kbuild/kconfig-language.rst の + "Menu attibutes: default value" に記載の例外条件を満たす場合を除き、 + そのデフォルトが無効になっている。 + +2) 新規の ``Kconfig`` オプションにヘルプテキストがある。 + +3) 妥当な ``Kconfig`` の組み合わせについて注意深くレビューされている。 + これをテストでやり切るのは困難で、知力が決め手となる。 + +ドキュメンテーションの作成 +========================== + +1) グローバルなカーネル API が :ref:`kernel-doc <kernel_doc>` の形式で + ドキュメント化されている (静的関数には求められないが、付けてもよい)。 + +2) 新規 ``/proc`` エントリーが、すべて ``Documentation/`` 以下に記載されて + いる。 + +3) 新規カーネル・ブート・パラメータが、すべて + ``Documentation/admin-guide/kernel-parameters.rst`` に記載されている。 + +4) 新規モジュール・パラメータが、すべて ``MODULE_PARM_DESC()`` によって記述 + されている。 + +5) 新規ユーザースペース・インターフェースが、すべて ``Documentaion/ABI/`` + 以下に記載されている。詳しくは、 Documentation/admin-guide/abi.rst + (もしくは ``Documentation/ABI/README``) を参照。 + ユーザースペース・インターフェースを変更するパッチは、 + linux-api@vger.kernel.org にも CC すべし。 + +6) なんらかの ioctl を追加するパッチは、 + ``Documentation/userspace-api/ioctl/ioctl-number.rst`` + の更新を伴う。 + +ツールによるコードのチェック +============================ + +1) スタイル・チェッカー (``scripts/checkpatch.pl``) によって、犯しがちな + パッチ・スタイルの違反がないことを確認済み。 + 指摘される違反を残す場合は、それを正当化できること。 + +2) sparse により入念にチェック済み。 + +3) ``make checkstack`` で指摘される問題があれば、それが修正済み。 + ``checkstack`` は問題点を明示的には指摘しないが、 スタック消費が + 512 バイトを越える関数は見直しの候補。 + +コードのビルド +============== + +1) 以下の条件でクリーンにビルドできる。 + + a) 適用可能な、および ``=y``, ``=m``, ``=n`` を変更した ``CONFIG`` + オプションでのビルド。 + ``gcc`` およびリンカーからの警告・エラーがないこと。 + + b) ``allnoconfig`` と ``allmodconfig`` がパス + + c) ``O=builddir`` を指定してのビルド + + d) Documentation/ 以下の変更に関して、ドキュメントのビルドで新たな警告や + エラーが出ない。 + ``make htmldocs`` または ``make pdfdocs`` でビルドし、問題があれば修正。 + +2) ローカルのクロス・コンパイル・ツール、その他のビルド環境 (訳註: build farm) + を使って、複数の CPU アーキテクチャ向けにビルドできる。 + 特に、ワードサイズ (32 ビットと 64 ビット) やエンディアン (ビッグとリトル) + の異なるアーキテクチャを対象とするテストは、表現可能数値範囲・データ整列・ + エンディアンなどについての誤った仮定に起因する様々な移植上の問題を捕える + のに効果的。 + +3) 新規に追加されたコードについて (``make KCFLAGS=-W`` を使って) + ``gcc -W`` でコンパイル。 + これは多くのノイズを伴うが、 + ``warning: comparison between signed and unsigned`` + の類いのバグをあぶり出すのに効果的。 + +4) 変更されるソースコードが、下記の ``Kconfig`` シンボルに関連するカーネル + API や機能に依存 (もしくは利用) する場合、それらの ``Kconfig`` シンボルが、 + 無効、および (可能なら) ``=m`` の場合を組み合わせた複数のビルドを + (全部まとめてではなく、いろいろなランダムの組み合わせで) テスト済み。 + + ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``, + ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``, + ``CONFIG_NET``, ``CONFIG_INET=n`` (ただし、後者は ``CONFIG_NET=y`` + との組み合わせ)。 + +コードのテスト +============== + +1) ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``, + ``CONFIG_SLUB_DEBUG``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``, + ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``, + ``CONFIG_PROVE_RCU`` および ``CONFIG_DEBUG_OBJECTS_RCU_HEAD`` をすべて + 同時に有効にしてのテスト済み。 + +2) ``CONFIG_SMP`` と ``CONFIG_PREEMPT`` が有効と無効の場合について、ビルドと + ランタイムのテスト済み。 + +3) lockdep の機能をすべて有効にしての実行で、すべてのコード経路が確認済み。 + +4) 最低限、slab と ページ・アロケーションの失敗に関する誤り注入 + (訳註: fault injection) によるチェック済み。 + 詳しくは、 Documentation/fault-injection/index.rst を参照。 + 新規のコードが多い場合は、サブシステム対象の誤り注入を追加するのが望ましい + 可能性あり。 + +5) linux-next の最新タグに対するテストにより、他でキューイングされている + パッチや、VM、VFS、その他のサブシステム内のすべての変更と組み合わせての + 動作を確認済み。 diff --git a/Documentation/translations/sp_SP/process/submit-checklist.rst b/Documentation/translations/sp_SP/process/submit-checklist.rst index 0d6651f9d871..e7107cc97001 100644 --- a/Documentation/translations/sp_SP/process/submit-checklist.rst +++ b/Documentation/translations/sp_SP/process/submit-checklist.rst @@ -97,9 +97,10 @@ y en otros lugares con respecto al envío de parches del kernel de Linux. ``MODULE_PARM_DESC()``. 18) Todas las nuevas interfaces de espacio de usuario están documentadas - en ``Documentation/ABI/``. Consulte ``Documentation/ABI/README`` para - obtener más información. Los parches que cambian las interfaces del - espacio de usuario deben ser CCed a linux-api@vger.kernel.org. + en ``Documentation/ABI/``. Consulte Documentation/admin-guide/abi.rst + (o ``Documentation/ABI/README``) para obtener más información. + Los parches que cambian las interfaces del espacio de usuario deben + ser CCed a linux-api@vger.kernel.org. 19) Se ha comprobado con la inyección de al menos errores de asignación de slab y página. Consulte ``Documentation/fault-injection/``. diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst index e679cbc3c89d..1bdafdc4c8e2 100644 --- a/Documentation/translations/zh_CN/admin-guide/README.rst +++ b/Documentation/translations/zh_CN/admin-guide/README.rst @@ -146,7 +146,7 @@ Linux内核6.x版本 <http://kernel.org/> "make xconfig" 基于Qt的配置工具。 - "make gconfig" 基于GTK+的配置工具。 + "make gconfig" 基于GTK的配置工具。 "make oldconfig" 基于现有的 ./.config 文件选择所有选项,并询问 新配置选项。 diff --git a/Documentation/translations/zh_CN/dev-tools/ubsan.rst b/Documentation/translations/zh_CN/dev-tools/ubsan.rst index 2487696b3772..81ef6f77caeb 100644 --- a/Documentation/translations/zh_CN/dev-tools/ubsan.rst +++ b/Documentation/translations/zh_CN/dev-tools/ubsan.rst @@ -3,7 +3,14 @@ .. include:: ../disclaimer-zh_CN.rst :Original: Documentation/dev-tools/ubsan.rst -:Translator: Dongliang Mu <dzm91@hust.edu.cn> + +:翻译: + + 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn> + +:校译: + + 王昱力 WangYuli <wangyuli@uniontech.com> 未定义行为消毒剂 - UBSAN ==================================== @@ -55,30 +62,20 @@ GCC自4.9.x [1_] (详见 ``-fsanitize=undefined`` 选项及其子选项)版 使用如下内核配置启用UBSAN:: - CONFIG_UBSAN=y - -使用如下内核配置检查整个内核:: - - CONFIG_UBSAN_SANITIZE_ALL=y - -为了在特定文件或目录启动代码插桩,需要在相应的内核Makefile中添加一行类似内容: + CONFIG_UBSAN=y -- 单文件(如main.o):: - - UBSAN_SANITIZE_main.o := y - -- 一个目录中的所有文件:: - - UBSAN_SANITIZE := y - -即使设置了``CONFIG_UBSAN_SANITIZE_ALL=y``,为了避免文件被插桩,可使用:: +排除要被检测的文件:: UBSAN_SANITIZE_main.o := n -与:: +排除一个目录中的所有文件:: UBSAN_SANITIZE := n +当全部文件都被禁用,可通过如下方式为特定文件启用:: + + UBSAN_SANITIZE_main.o := y + 未对齐的内存访问检测可通过开启独立选项 - CONFIG_UBSAN_ALIGNMENT 检测。 该选项在支持未对齐访问的架构上(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y) 默认为关闭。该选项仍可通过内核配置启用,但它将产生大量的UBSAN报告。 diff --git a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst index 3c6db094a63c..37681c0b2a01 100644 --- a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst +++ b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst @@ -1,9 +1,7 @@ :orphan: -.. warning:: +.. note:: 此文件的目的是为让中文读者更容易阅读和理解,而不是作为一个分支。 因此, 如果您对此文件有任何意见或更新,请先尝试更新原始英文文件。 - -.. note:: - 如果您发现本文档与原始文件有任何不同或者有翻译问题,请联系该文件的译者, - 或者请求时奎亮的帮助:<alexs@kernel.org>。 + 如果您发现本文档与原始文件有任何不同或者有翻译问题,请发建议或者补丁给 + 该文件的译者,或者请求中文文档维护者和审阅者的帮助。 diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst index 7574e1673180..cc512ca54172 100644 --- a/Documentation/translations/zh_CN/index.rst +++ b/Documentation/translations/zh_CN/index.rst @@ -26,7 +26,13 @@ 顺便说下,中文文档也需要遵守内核编码风格,风格中中文和英文的主要不同就是中文 的字符标点占用两个英文字符宽度,所以,当英文要求不要超过每行100个字符时, 中文就不要超过50个字符。另外,也要注意'-','='等符号与相关标题的对齐。在将 -补丁提交到社区之前,一定要进行必要的 ``checkpatch.pl`` 检查和编译测试。 +补丁提交到社区之前,一定要进行必要的 ``checkpatch.pl`` 检查和编译测试,确保 +在 ``make htmldocs/pdfdocs`` 中不增加新的告警,最后,安装检查你生成的 +html/pdf 文件,确认它们看起来是正常的。 + +提交之前请确认你的补丁可以正常提交到中文文档维护库: +https://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git/ +如果你的补丁依赖于其他人的补丁, 可以与其他人商量后由某一个人合并提交。 与Linux 内核社区一起工作 ------------------------ diff --git a/Documentation/translations/zh_CN/mm/balance.rst b/Documentation/translations/zh_CN/mm/balance.rst index 6fd79209c307..f877c0cfa39a 100644 --- a/Documentation/translations/zh_CN/mm/balance.rst +++ b/Documentation/translations/zh_CN/mm/balance.rst @@ -64,7 +64,7 @@ kswapd并不真正需要平衡高内存区,因为中断上下文并不请求 如果从进程内存和shm中偷取页面可以减轻该页面节点中任何区的内存压力,而该区的内存压力 已经低于其水位,则会进行偷取。 -watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: +watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: 这些是每个区的字段,用于确定一个区何时需要平衡。当页面数低于水位[WMARK_MIN]时, hysteric 的字段low_on_memory被设置。这个字段会一直被设置,直到空闲页数变成水位 [WMARK_HIGH]。当low_on_memory被设置时,页面分配请求将尝试释放该区域的一些页面(如果 diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst index 10536b74aeec..0e524f1c1af5 100644 --- a/Documentation/translations/zh_CN/process/submit-checklist.rst +++ b/Documentation/translations/zh_CN/process/submit-checklist.rst @@ -82,8 +82,8 @@ Linux内核补丁提交检查单 17) 所有新的模块参数都记录在 ``MODULE_PARM_DESC()`` 18) 所有新的用户空间接口都记录在 ``Documentation/ABI/`` 中。有关详细信息, - 请参阅 ``Documentation/ABI/README`` 。更改用户空间接口的补丁应该抄送 - linux-api@vger.kernel.org。 + 请参阅 Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。 + 更改用户空间接口的补丁应该抄送 linux-api@vger.kernel.org\ 。 19) 已通过至少注入slab和page分配失败进行检查。请参阅 ``Documentation/fault-injection/`` 。 如果新代码是实质性的,那么添加子系统特定的故障注入可能是合适的。 diff --git a/Documentation/translations/zh_CN/security/credentials.rst b/Documentation/translations/zh_CN/security/credentials.rst new file mode 100644 index 000000000000..91c353dfb622 --- /dev/null +++ b/Documentation/translations/zh_CN/security/credentials.rst @@ -0,0 +1,479 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/security/credentials.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +============= +Linux中的凭据 +============= + +作者: David Howells <dhowells@redhat.com> + +.. contents:: :local: + +概述 +==== + +当一个对象对另一个对象进行操作时,Linux执行的安全检查包含几个部分: + + 1. 对象 + + 对象是可以直接由用户空间程序操作的系统中的实体。Linux具有多种可操作 + 的对象,包括: + + - 任务 + - 文件/索引节点 + - 套接字 + - 消息队列 + - 共享内存段 + - 信号量 + - 密钥 + + 所有这些对象的描述的一部分是一组凭据。集合中的内容取决于对象的类型。 + + 2. 对象所有权 + + 大多数对象的凭据中会有一个子集用来表示该对象的所有权。 + 这用于资源核算和限制(如磁盘配额和任务资源限制)。 + + 例如,在标准的UNIX文件系统中,这将由标记在索引节点上的UID定义。 + + 3. 对象上下文 + + 此外在这些对象的凭据中,将有一个子集表示对象的“对象上下文”。 + 这可能与(2)中相同,也可能不同 —— 例如,在标准的UNIX文件中, + 这是由标记在索引节点上的UID和GID定义的。 + + 对象上下文是进行安全计算的一部分,当对象被操作时会用到。 + + 4. 主体 + + 主体是正在对其他对象执行操作的对象。 + + 系统中的大多数对象是不活动的:他们不会对系统中的其他对象起作用。 + 进程/任务是明显的例外:它们可以访问和操纵其他对象。 + + 任务之外的其他对象在某些情况下也可以是主体。例如,打开的文件可以使用 + 名为 ``fcntl(F_SETOWN)`` 的任务给它的UID和EUID向一个任务发送SIGIO + 信号。在这种情况下,文件结构也会有一个主体上下文。 + + 5. 主体上下文 + + 主体对其凭据有一个额外的解释。其凭据的一个子集形成了“主体上下文”。主体 + 上下文在主体执行操作时作为安全计算的一部分使用。 + + 例如,Linux任务在操作文件时会有FSUID、FSGID和附加组列表 —— 这些凭据 + 与通常构成任务的对象上下文的真实UID和GID是相互独立的。 + + 6. 操作 + + Linux提供许多操作,主体可以对对象执行这些操作。可用的操作集取决于主体 + 和对象的性质。 + + + 操作包括读取、写入、创建和删除文件,以及派生(forking)或发送 + 信号(signalling)和跟踪(tracing)任务等。 + + 7. 规则,访问控制列表和安全计算 + + 当主体对对象进行操作时,会进行安全计算。这涉及到使用主体上下文、对象 + 上下文和操作,并搜索一个或多个规则集,以确定在给定这些上下文的情况下, + 主体是否被授予或拒绝以所需方式对对象进行操作的权限。 + + 主要有两个规则来源: + + a. 自主访问控制(DAC): + + 有时,对象的描述中会包含一组规则。这就是所谓的“访问控制列表”或‘ACL’。 + 一个Linux文件可以提供多个ACL。 + + 例如,传统的UNIX文件包括一个权限掩码,它是一个简化的ACL,具有三个固定的 + 主体类别(“用户”、“组”和“其他”),每一个都可以被授予一定的特权(如“读取”、 + “写入”和“执行” —— 无论这些映射对于对象意味着什么)。然而,UNIX文件权限不 + 允许任意指定主体,因此用途有限。 + + Linux文件还可以支持POSIX ACL。这是一个规则列表,为任意主体授予各种权限。 + + b. 强制访问控制(MAC): + + 整个系统可能有一个或多个规则集,适用于所有主体和对象,不考虑它们的来源。 + SELinux和Smack就是这种情况的例子。 + + 在SELinux和Smack的情况下,每个对象在其凭据中都被赋予一个标签。当请求执 + 行操作时,它们使用主体标签、对象标签和操作,寻找一个规则,该规则表示此操 + 作是授予还是拒绝的。 + + +凭据类型 +======== + +Linux内核支持以下类型的凭据: + + 1. 传统的UNIX凭据。 + + - 真实用户ID + - 真实组ID + + UID和GID由大多数(如果不是全部)Linux对象携带,即使有时它们需要被虚构出 + 来(例如FAT或CIFS文件,这些文件来源于Windows)。这些(通常)定义了该对象 + 的对象上下文,但任务在某些情况下略有不同。 + + - 有效用户ID,保存用户ID和FS用户ID + - 有效组ID,保存组ID和FS组ID + - 补充组 + + 这些是仅由任务使用的额外凭据。通常,一个EUID/EGID/GROUPS 被用作主体上下文, + 而真实UID/GID 被用作对象上下文。对于任务,这并不总是正确的。 + + 2. 能力 + + - 允许的能力集合 + - 可继承的能力集合 + - 有效的能力集合 + - 能力边界集合 + + 这些仅由任务携带,表示授予任务的超出普通任务权限的能力。这些可以通过传统 + UNIX凭据的更改进行隐式操作,但也可以通过 ``capset()`` 系统调用直接操作。 + + 允许的能力是指进程可以通过 ``capset()`` 将其添加到其有效或允许集合中的 + 那些能力。这个可继承的集合也可能受到这样的限制。 + + 有效能力是任务本身实际可以使用的能力。 + + 可继承能力是那些可以通过 ``execve()`` 传递的能力。 + + 边界集限制了通过 ``execve()`` 继承的能力,特别是在以UID 0执行二进制文件时。 + + 3. 安全管理标记(securebits) + + 它们用于控制上述凭据在特定操作如execve()中的操作和继承方式。它们并不直接 + 用作对象或主体凭据使用。 + + 4. 密钥和密钥环 + + 这些仅由任务携带。它们用于携带和缓存不适合放入其他标准UNIX凭据中的安全令牌。 + 它们用诸如使网络文件系统密钥在进程执行的文件访问时可用,而无需让普通程序了解 + 涉及的安全细节。 + + 密钥环是一种特殊类型的密钥。它们携带一组其他密钥,并可以搜索来查找所需的密钥。 + 每个进程可以订阅多个密钥环: + + 每线程密钥 + 每进程密钥环 + 每会话密钥环 + + 当进程访问一个密钥时,若尚不存在,则通常会将其缓存在一个密钥环中,以便将来的 + 访问时找到该密钥。 + + 有关密钥的更多信息,请参见 ``Documentation/translations/zh_CN/security/keys/*`` 。 + + 5. LSM + + Linux安全模块允许在任务执行操作时施加额外的控制。目前,Linux支持几种LSM选项。 + + 一些工作通过标记系统中的对象,并应用一组规则(策略)说明某个标签的任务可以对 + 另一标签的对象执行哪些操作。 + + 6. AF_KEY + + 这是一种基于套接字网络协议栈中的凭据管理[RFC 2367]。本文档中没有讨论它,因为不 + 直接与任务和文件凭据进行交互,而是保留了系统级的凭据。 + + +当打开一个文件时,打开任务的主体上下文的一部分会记录在创建的文件结构中。 +这使得使用该文件结构的操作可以使用这些凭据,而不是发出操作的任务的主体上下文。 +一个例子是在网络文件系统上打开的文件,打开文件的凭据应该被呈现给服务器,而不管 +实际进行读取或写入操作的是谁。 + + +文件标记 +======== + +存储在磁盘上或通过网络获取的文件可能具有注释,构成该文件的对象安全上下文。 +根据文件系统的类型,这些注释可能包括以下一项或多项: + + * UNIX UID, GID, mode; + * Windows user ID; + * Access control list; + * LSM security label; + * UNIX exec privilege escalation bits (SUID/SGID); + * File capabilities exec privilege escalation bits. + +将这些与任务的主体安全上下文进行比较,并根据比较结果允许或禁止执行某些操作。 +在execve()的情况下,特权提升位起作用,并且可能允许由可执行文件的注释决定的 +进程获得额外的特权。 + + +任务凭据 +======== + +在Linux中,一个任务的所有凭据都保存在一个引用计数结构体‘struct cred’中, +通过(uid, gid)或(groups, keys, LSM security)进行访问。每个任务在其 +task_struct中通过一个名为‘cred’的指针指向其凭据。 + +一旦一组凭据已经准备好并提交,除非以下几种情况,否则不能更改: + + 1. 其引用计数可以更改; + + 2. 它所指向的 group_info 结构体的引用计数可以更改; + + 3. 它所指向的安全数据的引用计数可以更改; + + 4. 它所指向的任何密钥环的引用计数可以更改; + + 5. 它所指向的任何密钥环可以被撤销、过期或其安全属性可以更改; + + 6. 它所指向的任何密钥环的内容可以更改(密钥环的整个目的就是作为一组共享凭据, + 可由具有适当访问权限的任何人修改)。 + +要更改cred结构体中的任何内容,必须遵循复制和替换的原则。首先进行复制,然后修 +改副本,最后使用RCU(读-复制-更新)将任务指针更改为指向新的副本。有一些封装可 +用于帮助执行这个过程(见下文)。 + +一个任务只能修改自己的凭据;不再允许一个任务修改另一个任务的凭据。 +这意味着 ``capset()`` 系统调用不再允许使用除当前进程之外的任何PID。 +此外, ``keyctl_instantiate()`` 和 ``keyctl_negate()`` 函数也不再 +允许在请求进程中附加到特定于进程的密钥环,因为实例化进程可能需要创建它们。 + + +不可变凭据 +---------- + +一旦一组凭据已经被公开(例如通过调用 ``commit_creds()`` ),必须将其视为 +不可变的,除了两个例外情况: + + 1. 引用计数可以被修改。 + + 2. 虽然无法更改一组凭据的密钥环订阅,但订阅的密钥环的内容可以被更改。 + +为了在编译时捕获意外的凭据修改,struct task_struct具有_const_指针指向其凭据集, +struct file也是如此。此外,某些函数如 ``get_cred()`` 和 ``put_cred()`` 在 +const指针上操作,因此不需要进行类型转换,但需要临时放弃const限定,以便能够修改 +引用计数。 + + +访问任务凭据 +------------ + +任务只能修改自己的凭据,允许当前进程可以读取或替换自己的凭据,无需任何形式锁定的 +情况下 —— 这极大简化了事情。它可以调用:: + + const struct cred *current_cred() + +获取指向其凭据结构的指针,并且之后不必释放它。 + +有一些方便的封装用于检索任务凭据的特定方面(在每种情况下都只返回值):: + + uid_t current_uid(void) Current's real UID + gid_t current_gid(void) Current's real GID + uid_t current_euid(void) Current's effective UID + gid_t current_egid(void) Current's effective GID + uid_t current_fsuid(void) Current's file access UID + gid_t current_fsgid(void) Current's file access GID + kernel_cap_t current_cap(void) Current's effective capabilities + struct user_struct *current_user(void) Current's user account + +还有一些方便的封装,用于检索任务凭据的特定关联对:: + + void current_uid_gid(uid_t *, gid_t *); + void current_euid_egid(uid_t *, gid_t *); + void current_fsuid_fsgid(uid_t *, gid_t *); + +在从当前任务的凭据中检索后,通过其参数返回这些值对。 + + +此外,还有一个函数用于获取当前进程的当前凭据集的引用:: + + const struct cred *get_current_cred(void); + +以及用于获取对一个实际上不存在于struct cred中的凭据的引用的函数:: + + struct user_struct *get_current_user(void); + struct group_info *get_current_groups(void); + +分别获得对当前进程的 user accounting structure 和补充组列表的引用。 + +一旦获得引用,就必须使用 ``put_cred()``, ``free_uid()`` 或 +``put_group_info()`` 来适当释放它。 + + +访问其他任务的凭据 +------------------ + +虽然一个任务可以在不需要锁定的情况下访问自己的凭据,但想要访问另一个任务 +的凭据的任务并非如此。它必须使用RCU读锁和 ``rcu_dereference()``。 + +``rcu_dereference()`` 是由:: + + const struct cred *__task_cred(struct task_struct *task); + +这应该在RCU读锁中使用,如下例所示:: + + void foo(struct task_struct *t, struct foo_data *f) + { + const struct cred *tcred; + ... + rcu_read_lock(); + tcred = __task_cred(t); + f->uid = tcred->uid; + f->gid = tcred->gid; + f->groups = get_group_info(tcred->groups); + rcu_read_unlock(); + ... + } + +如果需要长时间持有另一个任务的凭据,并且可能在此过程中休眠,则调用方 +应该使用以下函数来获取对这些凭据的引用:: + + const struct cred *get_task_cred(struct task_struct *task); + +这个函数内部完成了所有的RCU操作。当使用完这些凭据时,调用方必须调用put_cred() +函数释放它们。 + +.. note:: + ``__task_cred()`` 的结果不应直接传递给 ``get_cred()`` , + 因为这可能与 ``commit_cred()`` 发生竞争条件。 + +还有一些方便的函数可以访问另一个任务凭据的特定部分,将RCU操作对调用方隐藏起来:: + + uid_t task_uid(task) Task's real UID + uid_t task_euid(task) Task's effective UID + +如果调用方在此时已经持有RCU读锁,则应使用:: + + __task_cred(task)->uid + __task_cred(task)->euid + +类似地,如果需要访问任务凭据的多个方面,应使用RCU读锁,调用 ``__task_cred()`` +函数,将结果存储在临时指针中,然后从临时指针中调用凭据的各个方面,最后释放锁。 +这样可以防止多次调用昂贵的RCU操作。 + +如果需要访问另一个任务凭据的其他单个方面,可以使用:: + + task_cred_xxx(task, member) + +这里的‘member’是cred结构体的非指针成员。例如:: + + uid_t task_cred_xxx(task, suid); + +将从任务中检索‘struct cred::suid’,并执行适当的RCU操作。对于指针成员, +不能使用这种形式,因为它们指向的内容可能在释放RCU读锁的瞬间消失。 + + +修改凭据 +-------- + +如先前提到的,一个任务只能修改自己的凭据,不能修改其他任务的凭据。这意味 +着它不需要使用任何锁来修改自己的凭据。 + +要修改当前进程的凭据,函数应首先调用:: + + struct cred *prepare_creds(void); + +这将锁定current->cred_replace_mutex,然后分配并构建当前进程凭据的副本。 +如果成功,函数返回时仍然保持互斥锁。如果不成功(内存不足),则返回NULL。 + +互斥锁防止 ``ptrace()`` 在进行凭据构建和更改的安全检查时更改进程的ptrace +状态,因为ptrace状态可能会改变结果,特别是在 ``execve()`` 的情况下。 + +新的凭据集应适当地进行修改,并进行任何安全检查和挂钩。在此时,当前和建议的 +凭据集都可用,因为current_cred()将返回当前的凭据集。 + +在替换组列表时,必须在将其添加到凭据之前对新列表进行排序,因为使用二分查找 +测试成员资格。实际上,这意味着在set_groups()或set_current_groups()之 +前应调用groups_sort()。groups_sort()不能在共享的 ``struct group_list`` +上调用,因为即使数组已经排序,它也可能作为排序过程的一部分对元素进行排列。 + +当凭据集准备好时,应通过调用以下函数将其提交给当前进程:: + + int commit_creds(struct cred *new); + +这将修改凭据和进程的各个方面,给LSM提供机会做同样的修改,然后使用 +``rcu_assign_pointer()`` 将新的凭据实际提交给 ``current->cred`` , +释放 ``current->cred_replace_mutex`` 以允许 ``ptrace()`` 进行操 +作,并通知调度程序和其他组件有关更改的情况。 + +该函数保证返回0,以便可以在诸如 ``sys_setresuid()`` 函数的末尾进行尾调用。 + +请注意,该函数会消耗调用者对新凭据的引用。调用者在此之后不应调用 +``put_cred()`` 释放新凭据。 + +此外,一旦新的凭据上调用了该函数,就不能进一步更改这些凭据。 + + +如果在调用 ``prepare_creds()`` 之后安全检查失败或发生其他错误, +则应调用以下函数:: + + void abort_creds(struct cred *new); + +这将释放 ``prepare_creds()`` 获取的 ``current->cred_replace_mutex`` 的锁, +并释放新的凭据。 + +一个典型的凭据修改函数看起来像这样:: + + int alter_suid(uid_t suid) + { + struct cred *new; + int ret; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + new->suid = suid; + ret = security_alter_suid(new); + if (ret < 0) { + abort_creds(new); + return ret; + } + + return commit_creds(new); + } + + +管理凭据 +-------- + +有一些函数用来辅助凭据管理: + + - ``void put_cred(const struct cred *cred);`` + + 这将释放对给定凭据集的引用。如果引用计数为零,凭据集将由 + RCU系统安排进行销毁。 + + - ``const struct cred *get_cred(const struct cred *cred);`` + + 这将获取对活动凭据集的引用。返回指向凭据集的指针。 + + - ``struct cred *get_new_cred(struct cred *cred);`` + + 这将获取对当前正在构建且可变的凭据集的引用。返回指向凭据集的指针。 + +打开文件凭据 +============ + +当打开新文件时,会获取对打开任务凭据的引用,并将其附加到文件结构体的 +``f_cred`` 字段中,替代原来的 ``f_uid`` 和 ``f_gid`` 。原来访问 +``file->f_uid`` 和 ``file->f_gid`` 的代码现在应访问 ``file->f_cred->fsuid`` +和 ``file->f_cred->fsgid`` 。 + +安全访问 ``f_cred`` 的情况下可以不使用RCU或加锁,因为指向凭据的指针 +以及指向的凭据结构的内容在文件结构的整个生命周期中保持不变,除非是 +上述列出的例外情况(参阅任务凭据部分)。 + +为了避免“混淆代理”权限提升攻击,在打开的文件后续操作时,访问控制检查 +应该使用这些凭据,而不是使用“当前”的凭据,因为该文件可能已经被传递给 +一个更具特权的进程。 + +覆盖VFS对凭据的使用 +=================== + +在某些情况下,需要覆盖VFS使用的凭据,可以通过使用不同的凭据集调用 +如 ``vfs_mkdir()`` 来实现。以下是一些进行此操作的位置: + + * ``sys_faccessat()``. + * ``do_coredump()``. + * nfs4recover.c. diff --git a/Documentation/translations/zh_CN/security/index.rst b/Documentation/translations/zh_CN/security/index.rst index d8aacd1930d9..78d9d4b36dca 100644 --- a/Documentation/translations/zh_CN/security/index.rst +++ b/Documentation/translations/zh_CN/security/index.rst @@ -15,20 +15,20 @@ .. toctree:: :maxdepth: 1 + credentials + snp-tdx-threat-model lsm sak + self-protection siphash + tpm/index digsig landlock TODOLIST: -* credentials -* snp-tdx-threat-model * IMA-templates * keys/index * lsm-development * SCTP -* self-protection -* tpm/index * secrets/index * ipe diff --git a/Documentation/translations/zh_CN/security/keys/index.rst b/Documentation/translations/zh_CN/security/keys/index.rst new file mode 100644 index 000000000000..7c28d003fb0a --- /dev/null +++ b/Documentation/translations/zh_CN/security/keys/index.rst @@ -0,0 +1,22 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/keys/index.rst + +:翻译: + + +======== +内核密钥 +======== + +.. toctree:: + :maxdepth: 1 + + +TODOLIST: +* core +* ecryptfs +* request-key +* trusted-encrypted diff --git a/Documentation/translations/zh_CN/security/secrets/index.rst b/Documentation/translations/zh_CN/security/secrets/index.rst new file mode 100644 index 000000000000..5ea78713f10e --- /dev/null +++ b/Documentation/translations/zh_CN/security/secrets/index.rst @@ -0,0 +1,17 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/secrets/index.rst + +:翻译: + +===================== +密钥文档 +===================== + +.. toctree:: + + +TODOLIST: + +* coco diff --git a/Documentation/translations/zh_CN/security/self-protection.rst b/Documentation/translations/zh_CN/security/self-protection.rst new file mode 100644 index 000000000000..3c8a68b1e1be --- /dev/null +++ b/Documentation/translations/zh_CN/security/self-protection.rst @@ -0,0 +1,271 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../disclaimer-zh_CN.rst +:Original: Documentation/security/self-protection.rst + +:翻译: + + 张巍 zhangwei <zhangwei@cqsoftware.com.cn> + +============ +内核自我保护 +============ + +内核自我保护是指在Linux内核中设计与实现的各种系统与结构 +以防止内核本身的安全漏洞问题。它涵盖了广泛问题,包括去除 +整个类的漏洞,阻止安全漏洞利用方法,以及主动检测攻击尝 +试。并非所有的话题都在本文中涉及,但它应该为了解内核自我 +保护提供一个合理的起点,并解答常见的问题。(当然,欢迎提 +交补丁!) + +在最坏的情况下,我们假设一个非特权的本地攻击者对内核内存 +有任意读写访问权限。虽然在许多情况下,漏洞被利用时并不会 +提供此级别的访问权限,但如果我们能防御最坏情况,也能应对 +权限较低的攻击。一个更高的标准,且需要牢记的是保护内核免 +受具有特权的本地攻击者的攻击,因为root用户可以有更多权限。 +(尤其是当他们能够加载任意内核模块时) + +成功的自我保护的目标是:有效、默认开启、不需要开发者主动 +选择、没有性能影响、不妨碍内核调试、并且没有测试。虽然很 +难满足所有的这些目标,但明确提到这些目标非常重要,因为这 +些方面需要被探索、解决或接受。 + +========== +攻击面缩减 +========== + +防止安全漏洞最基本的防御方式是减少可以被用来重定向执行的 +内核区域。这包括限制用户公开使用的API、使内核API更难被错 +误使用、最小化可写内核内存区域等。 + +严格的内核内存权限 +------------------- + +当所有内核内存都是可写的,攻击者可以轻松地重定向执行流。 +为了减少这种攻击目标的可用性,内核需要更严格的权限集来 +保护其内存。 + +可执行代码和只读数据必须不可写 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +任何具有可执行内存的区域必须不可写,显然这也包括内核文本 +本身。我们还必须考虑其他地方:内核模块、JIT内存等,(在 +某些情况下,为了支持像指令替代、断点、kprobes等功能,这些 +区域会暂时被设置为可写。如果这些功能必须存在于内核中,它 +们的实现方式是:在更新期间将内存临时设置可写,然后再恢复 +为原始权限。) + +为了支持这一点,CONFIG_STRICT_KERNEL_RWX 和 +CONFIG_STRICT_MODULE_RWX 的设计旨在确保代码不可写,数据不 +可执行,以及只读数据既不可写也不可执行。 + +大多数架构默认支持这些选项,且用户无法选择。对于一些像arm +这种希望能够选择这些选项的架构,可以在架构Kconfig中选择 +ARCH_OPTIONAL_KERNEL_RWX以启用Kconfig提示。 +CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT决定在启用 +ARCH_OPTIONAL_KERNEL_RWX时的默认设置。 + +函数指针和敏感变量必须不可写 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +内核内存中有大量的函数指针,这些指针被内核查找并用于继续执行 +(例如,描述符/向量表、文件/网络等操作结构等)。这些变量的数 +量必须减少到最低限度 + +许多像这样的变量可以通过设置为"const"来实现只读,从而使它们 +存放在内核的.rodata段而非.data段,从而获得内核严格内存权限的 +保护。 + +对于在_init是仅初始化一次的变量,可以使用_ro_after_init属性 +进行标记。 + +剩下的变量通常是那些更新频率较低的(例如GDT)。这些变量需要另 +一个机制(类似于上述提到的对内核代码所做的临时例外),以便在 +其余生命周期内保持只读状态。(例如,在进行更新时,只有执行 +更新的CPU线程会被授予对内存的不可中断写入访问权限。) + +将内核内存与用户空间内存分隔开 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +内核绝对不可以执行用户空间内存,同时,内核也不得在没有明确预 +期的情况下访问用户内存空间。这些规则可以通过一些硬件限制来支 +持(如x86的SMEP/SMAP,ARM的PXN/PAN)或通过仿真(如ARM的内存 +域)来强制执行。通过这种方式阻止用户空间内存的访问,攻击者就 +无法将执行和数据解析转移到易于控制的用户空间内存,从而迫使攻 +击完全在内核中进行。 + +减少对系统调用的访问 +-------------------- + +对于64位系统,一种消除许多系统调用最简单的方法是构建时不启用 +CONFIG_CONPAT。然而,这种情况通常不可行。 + +“seccomp”系统为用户空间提供了一种可选功能,提供了一种减少可供 +运行中进程使用内核入口点数量的方法。这限制了可以访问内核代码 +的范围,可能降低了某个特定漏洞被攻击者利用的可能性。 + +一个改进的方向是创建有效的方法,仅允许受信任的进程访问例如兼 +容模式、用户命名空间、BPF创建和性能分析等功能。这将把内核入口 +点范围限制在通常可以被非特权用户空间进程访问的较常见集合中 + +限制对内核模块的访问 +-------------------- + +内核绝不应允许非特权用户加载特定的内核模块,因为这可能为攻击者 +提供一个意外扩展的可用攻击面的方法。(通过已预定义子系统按需加 +载模块,如MODULE_ALIAS_*,被认为是“预期的”,但即便如此,也应对 +这些情况给予更多的关注。)例如,通过非特权的套接字API加载文件 +系统模块是没有意义的:只有root用户或物理本地用户应该触发文件系 +统模块的加载。(在某些情况下,这甚至可能存在争议。) + +为了防止特权用户的攻击,系统可能需要完全禁止模块加载(例如,通 +过单体内核构建或modules_disabled sysctl),或者使用签名模块(例 +如,CONFIG_MODULE_SIG_FORCE或通过LoadPin保护的dm-crypt),以防 +止root用户通过模块加载器加载任意内核代码。 + +内存完整性 +---------- + +内核中有许多内存结构在攻击过程中被定期泛滥用以获取执行控制,迄今 +为止,最常见的是堆栈缓冲区溢出,在这种攻击中,堆栈上存储的返回地 +址被覆盖。除此之外,还有许多其他类型的攻击,防护措施也应运而生。 + +堆栈缓冲区溢出 +-------------- + +经典的堆栈缓冲区溢出攻击是指超出栈上分配的变量预期大小,从而将一 +个受控值写入栈帧的返回地址。最常见的防御措施是堆栈保护 +(CONFIG_STACKPROTECTOR),它在函数返回前会验证栈上的“stack canary”。 +其他防御措施还包括影子堆栈等。 + +堆栈深度溢出 +------------ + +一个不太容易被理解的攻击方式是利用bug触发内核通过深度函数调用或 +大的堆栈分配来消耗堆栈内存。通过这种攻击,攻击者可以将数据写入内 +核预分配堆栈空间之外的敏感结构。为了更好的防护这种攻击,必须进行 +两项重要的更改:将敏感的线程信息结构转移到其他地方,并在堆栈底部 +添加一个故障内存洞,以捕获这些溢出 + +栈内存完整性 +------------ + +用于跟踪堆空闲列表的结构可以在分配和释放时进行完整性检查,以确保它 +们不会被用来操作其它内存区域。 + +计算器完整性 +------------ + +内核中的许多地方使用原子计数器来跟踪对象引用或执行类似的生命周期管 +理。当这些计数器可能发生溢出时(无论是上溢还是下溢),这通常会暴露 +出使用后释放(use-after-free)漏洞。通过捕捉原子计数器溢出,这类漏 +洞就可以消失。 + +大小计算溢出检测 +---------------- + +与计算器溢出类似,整数溢出(通常是大小计算)需要在运行时进行检测, +以防止这类在传统上会导致能够写入内核缓冲区末尾之外的漏洞。 + +概率性防御 +---------- + +尽管许多防御措施可以被认定是确定的(例如,只读内存不能写入),但 +有些确保措施仅提供统计防御,即攻击者必须收集足够的关于运行系统的 +信息才能突破防御。尽管这些防御并不完美,但它们确实提供了有意义的 +保护。 + +栈保护、迷惑技术和其他秘密 +-------------------------- + +值得注意的是,像之前讨论的栈保护这样的技术,从技术上来说是统计性防 +御,因为它们依赖于一个秘密值,而这样的值可能会通过信息泄露漏洞而被 +发现。 + +对于想JIT(及时翻译器)这样的情况,其中可执行内容可能部分由用户空间 +控制,也需要类似的秘密之来迷惑。 + +至关重要的是,所使用的秘密值必须是独立的(例如,每个栈使用不同的栈 +保护值),并且具有高熵(例如,随机数生成器(RNG)是否正常工作?), +以最大限度地提高其成功率。 + +内核地址空间布局随机化(KASLR) +------------------------------- + +由于内核内存的位置几乎总是攻击成功的关键因素,因此使内核内存位置变 +得非确定性会增加攻击的难度。(请注意,这反过来提高了信息泄露的价 +值,因为泄露的信息可以用来发现目标内存位置。) + +文本和模块基址 +-------------- + +通过在启动时重新设定内核的物理基地址和虚拟基地址 +(CONFIG_RANDOMIZE_BASE),那些需要利用内核代码的攻击将会受阻。此外 +通过偏移模块加载基地址,意味着即使系统每次启动时按相同顺序加载同一 +组模块,这些模块也不会与内核文本的其余部分公用一个基地址。 + +堆栈基地址 +---------- + +如果进程之间内核堆栈的基地址不相同,甚至在不同系统调用之间也不相同, +那么栈上或超出栈的目标位置就会变得更加难以确定。 + +动态内存基址 +------------ + +很多内核的动态内存(例如kmalloc,vmalloc等)由于早期启动初始化的顺 +序,最终布局是相对确定的。如果这些区域的基地址在启动之间不相同,攻 +击者就无法轻易定位它们,必须依赖于针对该区域的信息泄露才能成功。 + +结构布局 +-------- + +通过在每次构建时对敏感结构的布局进行随机化处理,攻击这必须将攻击调 +节到已知的内核版本,或者泄露足够的内核内存来确定结构布局,然后才能 +对其进行操作。 + +防止信息泄露 +------------ + +由于敏感结构的位置是攻击的主要目标,因此防止内核内存地址和内核内存 +内容泄露非常重要(因为它们可能包含内核地址或者其他敏感数据,例如 +栈保护值)。 + +内核地址 +-------- + +将内核地址打印到用户空间会泄露有关内核内存布局的敏感信息。在使用任 +何打印符号打印原始地址时,目前%px,%p[ad](和在某些情况下的%p[sSb]) +时。使用这些格式符写入的文件需要限制为只有特权进程可读。 + +在4.14及以前的内核版本中,使用%p格式符打印的是原始地址。从4.15-rcl +版本开始,使用%p格式符打印的地址会在打印前进行哈希处理。 + +[*]如果启用KALLSYMS并且符号查找失败,则打印原始地址;如果没有启用 +KALLSYSM,则会直接打印原始地址。 + +唯一标识符 +---------- + +内核内存地址绝不可能用作向用户空间公开的标识符。相反,应该使用原子 +计数器,IDR(ID映射表)或类似的唯一标识符。 + +内存初始化 +---------- + +复制到用户空间的内存必须始终被完全初始化,如果没有显式地使用memset() +函数进行初始化,那就需要修改编译器,确保清除结构中的空洞。 + +内存清除 +-------- + +在释放内存时,最好对内存内容进行清除处理,以防止攻击者重用内存中以前 +的内容。例如,在系统调用返回时清除堆栈(CONFIG_GCC_PLUGIN_STACKLEAK), +在释放堆内容是清除其内容。这有助于防止许多未初始化变量攻击、堆栈内容 +泄露、堆内容泄露以及使用后释放攻击(user-after-free)。 + +目标追踪 +-------- + +为了帮助消除导致内核地址被写入用户空间的各种错误,需要跟踪写入的目标。 +如果缓冲区的目标是用户空间(例如,基于seq_file的/proc文件),则应该自 +动审查敏感值。 diff --git a/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst new file mode 100644 index 000000000000..b51eeaebab67 --- /dev/null +++ b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst @@ -0,0 +1,209 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/security/snp-tdx-threat-model.rst + +:翻译: + + 毛玉贤 Yuxian Mao <maoyuxian@cqsoftware.com.cn> + +========================== +Linux中x86虚拟化的机密计算 +========================== + +.. contents:: :local: + +By: Elena Reshetova <elena.reshetova@intel.com> and Carlos Bilbao <carlos.bilbao.osdev@gmail.com> + +动机 +==== + +在x86虚拟环境中从事机密计算工作的内核开发人员,是基于一组与传统Linux内核 +威胁模型有所不同的假设条件下开展工作的。传统意义上,Linux威胁模型承认攻 +击者可以存在于用户空间,以及一小部分能够通过各种网络接口或有限的硬件特定 +暴露接口(如USB、Thunderbolt)与内核交互的外部攻击者。本文档的目的是解释 +在机密计算领域中出现的额外攻击向量,并讨论为 Linux 内核提出的保护机制。 + +概述与术语 +========== + +机密计算(Confidential Computing,简称CoCo)是一个广泛的术语,涵盖了多种 +旨在保护数据在使用过程中(与静态数据或传输数据相比)的机密性和完整性的安 +全技术。从本质上讲,机密计算(CoCo)解决方案提供了一个受信任执行环境(TEE), +在该环境中可以进行安全的数据处理,因此,它们通常根据预期在TEE中运行的软件 +来进一步划分为不同的子类型。本文档专注于一类针对虚拟化环境的机密计算技术 +(Confidential Computing, CoCo),这些技术允许在可信执行环境 +(Trusted Execution Environment, TEE)中运行虚拟机(VM)。从现在起,本文档 +将把这一类机密计算(CoCo)技术称为“虚拟化环境(VE)中的机密计算(CoCo)”。 + +在虚拟化环境中,机密计算(CoCo)指的是一组硬件和/或软件技术,这些技术能够 +为在CoCo虚拟机(VM)内运行的软件提供更强的安全保障。具体来说,机密计算允许 +其用户确认所有软件组件的可信度,从而将其包含在精简的受信任计算基(TCB)中, +这是基于机密计算具备验证这些受信组件状态的能力。 + +虽然不同技术之间的具体实现细节有所不同,但所有现有机制都旨在为虚拟机的客户 +内存和执行状态(vCPU寄存器)提供更高的机密性和完整性,更严格地控制客户中断 +注入,并提供一些额外机制来控制客户与宿主机之间的页映射。有关x86特定解决方案 +的更多细节,可以参考 +:doc:`Intel Trust Domain Extensions (TDX) </arch/x86/tdx>` 和 +`AMD Memory Encryption <https://www.amd.com/system/files/techdocs/sev-snp-strengthening-vm-isolation-with-integrity-protection-and-more.pdf>`_. + +基本的机密计算(CoCo)客户布局包括宿主机、客户机、用于客户机与宿主机之间通信 +的接口、能够支持CoCo虚拟机(VM)的平台,以及一个在客户VM和底层平台之间充当安 +全管理员的可信中介。宿主机侧的虚拟机监视器(VMM)通常由传统VMM功能的一个子集 +组成,并仍然负责客户机生命周期的管理,即创建或销毁CoCo虚拟机、管理其对系统资 +源的访问等。然而,由于它通常不在CoCo VM的可信计算基(TCB)内,其访问权限受到 +限制,以确保实现安全目标。 + +在下图中,"<--->" 线表示机密计算(CoCo)安全管理员与其余组件之间的双向通信通 +道或接口,这些组件包括客户机、宿主机和硬件(数据流):: + + +-------------------+ +-----------------------+ + | CoCo guest VM |<---->| | + +-------------------+ | | + | Interfaces | | CoCo security manager | + +-------------------+ | | + | Host VMM |<---->| | + +-------------------+ | | + | | + +--------------------+ | | + | CoCo platform |<--->| | + +--------------------+ +-----------------------+ + +机密计算(CoCo)安全管理器的具体细节在在不同技术之间存在显著差异。例如,在某 +些情况下,它可能通过硬件(HW)实现,而在其他情况下,它可能是纯软件(SW)实现。 + +现有的Linux内核威胁模型 +======================= + +当前Linux内核威胁模型的总体组件包括:: + + +-----------------------+ +-------------------+ + | |<---->| Userspace | + | | +-------------------+ + | External attack | | Interfaces | + | vectors | +-------------------+ + | |<---->| Linux Kernel | + | | +-------------------+ + +-----------------------+ +-------------------+ + | Bootloader/BIOS | + +-------------------+ + +-------------------+ + | HW platform | + +-------------------+ + +在启动过程中,引导加载程序(bootloader)和内核之间也存在通信,但本图并未明确 +表示这一点。“接口”框表示允许内核与用户空间之间通信的各种接口。 这包括系统调用、 +内核 API、设备驱动程序等。 + +现有的 Linux 内核威胁模型通常假设其在一个受信任的硬件平台上执行,并且所有固件 +和启动加载程序都包含在该平台的受信任计算基(TCB)中。主要攻击者驻留在用户空间 +中,来自用户空间的所有数据通常被认为是不可信的,除非用户空间具有足够的特权来 +执行受信任的操作。此外,通常还会考虑外部攻击者,包括那些能够访问启用的外部网络 +(例如以太网、无线网络、蓝牙)、暴露的硬件接口(例如 USB、Thunderbolt),以及 +能够离线修改磁盘内容的攻击者。 + +关于外部攻击途径,值得注意的是,在大多数情况下,外部攻击者会首先尝试利用用户空 +间的漏洞,但攻击者也可能直接针对内核,特别是在宿主机具有物理访问权限的情况下。直 +接攻击内核的例子包括漏洞 CVE-2019-19524、CVE-2022-0435 和 CVE-2020-24490。 + +机密计算威胁模型及其安全目标 +============================ + +机密计算在上述攻击者列表中增加了一种新的攻击者类型:可能存在行为不当的宿主机 +(这可能包括传统虚拟机监视器VMM的部分组件或全部),由于其较大的软件攻击面, +通常被置于CoCo VM TCB之外。需要注意的是,这并不意味着宿主机或VMM是故意恶意的, +而是强调拥有一个较小的CoCo VM TCB具有安全价值。这种新型的攻击者可以被视为一种 +更强大的外部攻击者,因为它位于同一物理机器上(与远程网络攻击者不同),并且对 +客户机内核与大部分硬件的通信具有控制权:: + + +------------------------+ + | CoCo guest VM | + +-----------------------+ | +-------------------+ | + | |<--->| | Userspace | | + | | | +-------------------+ | + | External attack | | | Interfaces | | + | vectors | | +-------------------+ | + | |<--->| | Linux Kernel | | + | | | +-------------------+ | + +-----------------------+ | +-------------------+ | + | | Bootloader/BIOS | | + +-----------------------+ | +-------------------+ | + | |<--->+------------------------+ + | | | Interfaces | + | | +------------------------+ + | CoCo security |<--->| Host/Host-side VMM | + | manager | +------------------------+ + | | +------------------------+ + | |<--->| CoCo platform | + +-----------------------+ +------------------------+ + +传统上,宿主机对客户机数据拥有无限访问权限,并可以利用这种访问权限来攻击客户虚 +拟机。然而,机密计算(CoCo)系统通过添加诸如客户数据保密性和完整性保护等安全 +特性来缓解此类攻击。该威胁模型假设这些安全特性是可用且完好的。 + +这个 **Linux内核机密计算虚拟机(CoCo VM)的安全目标** 可以总结如下: + +1. 保护CoCo客户机私有内存和寄存器的机密性和完整性。 + +2. 防止宿主机特权升级到CoCo客户机Linux内核。虽然宿主机(及主机端虚拟机管理程序) + 确实需要一定的特权来创建、销毁或暂停访客,但防止特权升级的部分目标是确保这些 + 操作不会为攻击者提供获取客户机内核访问权限的途径。 + +上述安全目标导致了两个主要的**Linux内核机密计算虚拟机(CoCo VM)资产**: + +1. 客户机内核执行上下文。 +2. 客户机内核私有内存。 + +宿主机对CoCo客户机资源具有完全控制权,并可以随时拒绝访问这些资源。资源的示例包 +括CPU时间、客户机可以消耗的内存、网络带宽等。因此,宿主机对CoCo客户机的拒绝服务 +(DoS)攻击超出了此威胁模型的范围。 + +Linux CoCo虚拟机攻击面是指从CoCo客户机Linux内核暴露到不受信任的主机的任何接口, +这些接口未被CoCo技术的软硬件保护所覆盖。这包括所有可能的侧信道攻击以及瞬态执 +行侧信道攻击。显式(非旁道)接口的示例包括访问端口I/O、内存映射I/O(MMIO)和 +直接内存访问(DMA)接口、访问PCI配置空间、特定于虚拟机管理程序(VMM)的超调用 +(指向主机端VMM)、访问共享内存页、主机允许注入到访客内核的中断,以及特定于 +CoCo技术的超调用(如果存在)。此外,在CoCo系统中,宿主机通常控制创建CoCo客户机 +的过程:它有方法将固件和引导程序镜像、内核镜像以及内核命令行加载到客户机中。所有 +这些数据在通过证明机制确认其完整性和真实性之前,都应视为不可信的。 + +下表显示了针对CoCo客户机Linux内核的威胁矩阵,但并未讨论潜在的缓解策略。该矩阵涉 +及的是CoCo特定版本的客户机、宿主机和平台。 + +.. list-table:: CoCo Linux客户机内核威胁矩阵 + :widths: auto + :align: center + :header-rows: 1 + + * - 威胁名称 + - 威胁描述 + + * - 客户机恶意配置 + - 一个行为不当的主机修改了以下其中一个客户机的配置: + + 1. 客户机固件或引导加载程序 + + 2. 客户机内核或模块二进制文件 + + 3. 客户机命令行参数 + + 这使得宿主机能够破坏在CoCo客户虚拟机内部运行代码的完整性,从而违反了机密计算 + (CoCo)的安全目标。 + + * - CoCo客户机数据攻击 + - 一个行为不当的宿主机对CoCo客户虚拟机与宿主机管理的物理或虚拟设备之间传输的数 + 据拥有完全控制权。这使得宿主机可以对这类数据的保密性、完整性和新鲜性进行任何攻击。 + + * - 格式错误的运行时输入 + - 一个行为不当的宿主机通过客户机内核代码使用的任意通信接口注入格式错误的输入。 + 如果代码没有正确处理这些输入,这可能导致从宿主机到客户机内核的特权提升。这包 + 括传统的侧信道攻击和/或瞬态执行攻击路径。 + + * - 恶意运行时输入 + - 一个行为不当的宿主机通过客户机内核代码使用的任意通信接口注入特定的输入值。与之前 + 的攻击向量(格式错误的运行时输入)不同,这个输入并非格式错误,而是其值被精心设 + 计以影响客户机内核的安全性。这类输入的例子包括向客户机提供恶意的时间或向客户机 + 的随机数生成器提供熵值。此外,如果它导致客户机内核执行特定操作(例如处理主机注 + 入的中断),此类事件的时序本身也可能成为一种攻击路径。这种攻击是对提供的宿主机输 + 入具有抵抗性的一种方式。 diff --git a/Documentation/translations/zh_CN/security/tpm/index.rst b/Documentation/translations/zh_CN/security/tpm/index.rst new file mode 100644 index 000000000000..707646590647 --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/index.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +================ +可信平台模块文档 +================ + +.. toctree:: + + tpm_event_log + tpm-security + tpm_tis + tpm_vtpm_proxy + xen-tpmfront + tpm_ftpm_tee diff --git a/Documentation/translations/zh_CN/security/tpm/tpm-security.rst b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst new file mode 100644 index 000000000000..26818d28c98f --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst @@ -0,0 +1,151 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/tpm-security.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +TPM安全 +======= + +本文档的目的是描述我们如何使内核使用TPM在面对外部窥探和数据包篡改 +攻击(文献中称为被动和主动中间人攻击)时保持合理的稳健性。当前的 +安全文档适用于TPM2.0。 + +介绍 +---- + +TPM通常是一个通过某种低带宽总线连接到PC的独立芯片。虽然有一些 +例外,例如Intel PTT,它是运行在靠近CPU的软件环境中的软件TPM, +容易受到不同类型的攻击,但目前大多数强化的安全环境要求使用独立 +硬件TPM,这是本文讨论的使用场景。 + +总线上的窥探和篡改攻击 +---------------------- + +当前的技术状态允许使用 `TPM Genie`_ 硬件中间人,这是一种简单的外部设备,可以在 +任何系统或笔记本电脑上几秒钟内安装。最近成功演示了针对 `Windows Bitlocker TPM`_ +系统的攻击。最近同样的攻击针对 `基于TPM的Linux磁盘加密`_ 方案也遭到了同样的攻击。 +下一阶段的研究似乎是入侵总线上现有的设备以充当中间人,因此攻击者需要物理访问几 +秒钟的要求可能不再存在。然而,本文档的目标是尽可能在这种环境下保护TPM的机密性和 +完整性,并尝试确保即使我们不能防止攻击,至少可以检测到它。 + +不幸的是,大多数TPM功能,包括硬件重置功能,都能被能够访问总线的攻击 +者控制,因此下面我们将讨论一些可能出现的干扰情况。 + +测量(PCR)完整性 +----------------- + +由于攻击者可以向TPM发送自己的命令,他们可以发送任意的PCR扩展,从而破 +坏测量系统,这将是一种烦人的拒绝服务攻击。然而,针对密封到信任测量中 +的实体,有两类更严重的攻击。 + +1. 攻击者可以拦截来自系统的所有PCR扩展,并完全替换为自己的值,产生 + 一个未篡改状态的重现,这会使PCR测量证明状态是可信的,并释放密钥。 + +2. 攻击者可能会在某个时刻重置TPM,清除PCR,然后发送自己的测量,从而 + 有效地覆盖TPM已经完成的启动时间测量。 + +第一种攻击可以通过始终对PCR扩展和读取命令进行HMAC保护来防止,这意味着 +如果没有在响应中产生可检测的HMAC失败,则测量值无法被替换。然而第二种 +攻击只能通过依赖某种机制来检测,这种机制会在TPM重置后发生变化。 + +秘密保护 +-------- + +某些进出TPM的信息,如密钥密封、私钥导入和随机数生成容易被拦截,而仅仅 +使用HMAC保护无法防止这种情况。因此,对于这些类型的命令,我们必须使用 +请求和响应加密来防止秘密信息的泄露。 + +与TPM建立初始信任 +----------------- + +为了从一开始就提供安全性,必须建立一个初始的共享或非对称秘密,并且该 +秘钥必须对攻击者不可知。最明显的途径是使用背书和存储种子,这些可以用 +来派生非对称密钥。然而,使用这些密钥很困难,因为将它们传递给内核的唯 +一方法是通过命令行,这需要在启动系统中进行广泛的支持,而且无法保证任 +何一个层次不会有任何形式的授权。 + +Linux内核选择的机制是从空种子使用标准的存储种子参数派生出主椭圆曲线 +密钥。空种子有两个优势:首先该层次物理上无法具有授权,因此我们始终可 +以使用它;其次空种子在TPM重置时会发生变化,这意味着如果我们在当天开始 +时基于空种子建立信任,如果TPM重置且种子变化,则所有派生的密钥进行加盐 +处理的会话都将失败。 + +显然,在没有任何其他共享秘密的情况下使用空种子,我们必须创建并读取初始 +公钥,这当然可能会被总线中间人拦截并替换。然而,TPM有一个密钥认证机制 +(使用EK背书证书,创建认证身份密钥,并用该密钥认证空种子主密钥),但由 +于它过于复杂,无法在内核中运行,因此我们保留空主密钥名称的副本,通过 +sysfs导出,以便用户空间在启动时进行完整的认证。这里的明确保证是,如果空 +主密钥认证成功,那么从当天开始的所有TPM交易都是安全的;如果认证失败,则 +说明系统上有中间人(并且任何在启动期间使用的秘密可能已被泄露)。 + +信任堆叠 +-------- + +在当前的空主密钥场景中,TPM必须在交给下一个使用者之前完全清除。然而, +内核将派生出的空种子密钥的名称传递给用户空间,用户空间可以通过认证来 +验证该名称。因此,这种名称传递链也可以用于各个启动组件之间(通过未指 +定的机制)。例如grub可以使用空种子方案来实现安全性,并将名称交给内核。 +内核可以派生出密钥和名称,并确定如果它们与交接的版本不同,则表示发生 +了篡改。因此可以通过名称传递将任意启动组件(从UEFI到grub到内核)串联 +起来,只要每个后续组件知道如何收集该名称,并根据其派生的密钥进行验证。 + +会话属性 +-------- + +所有内核使用的TPM命令都允许会话。HMAC会话可用于检查请求和响应的完整性, +而解密和加密标志可用于保护参数和响应。HMAC和加密密钥通常是从共享授权秘 +钥推导出来的,但对于许多内核操作来说,这些密钥是已知的(通常为空)。因 +此内核使用空主密钥作为盐密钥来创建每个HMAC会话,这样就为会话密钥的派生 +提供了加密输入。因此内核仅需创建一次空主密钥(作为一个易失的TPM句柄), +并将其保存在tpm_chip中,用于每次在内核中使用TPM时。由于内核资源管理器缺 +乏去间隙化,当前每次操作都需要创建和销毁会话,但未来可能会将单个会话重用 +用于内核中的HMAC、加密和解密会话。 + +保护类型 +-------- + +对于每个内核操作,我们使用空主密钥加盐的HMAC来保护完整性。此外我们使用参数 +加密来保护密钥密封,并使用参数解密来保护密钥解封和随机数生成。 + +空主密钥认证在用户空间的实现 +============================ + +每个TPM都会附带几个X.509证书,通常用于主背书密钥。本文档假设存在椭圆曲线 +版本的证书,位于01C00002,但也同样适用于RSA证书(位于01C00001)。 + +认证的第一步是使用 `TCG EK Credential Profile`_ 模板进行主密钥的创建,该 +模板允许将生成的主密钥与证书中的主密钥进行比较(公钥必须匹配)。需要注意 +的是,生成EK主密钥需要EK层级密码,但EC主密钥的预生成版本应位于81010002, +并且可以无需密钥授权对其执行TPM2_ReadPublic()操作。接下来,证书本身必须 +经过验证,以确保其可以追溯到制造商根证书(该根证书应公开在制造商网站上)。 +完成此步骤后将在TPM内部生成一个认证密钥(AK),并使用TPM2_MakeCredential、 +AK的名称和EK公钥加密一个秘密。然后TPM执行TPM2_ActivateCredential,只有在 +TPM、EK和AK之间的绑定关系成立时,才能恢复秘密。现在,生成的AK可以用于对由 +内核导出的空主密钥进行认证。由于TPM2_MakeCredential/ActivateCredential操作 +相对复杂,下面将描述一种涉及外部生成私钥的简化过程。 + +这个过程是通常基于隐私CA认证过程的简化缩写。假设此时认证由TPM所有者进行, +所有者只能访问所有者层次。所有者创建一个外部公/私钥对(假设是椭圆曲线), +并使用内部包装过程将私钥进行封装以便导入,该私钥被其父级由EC派生的存储主密 +钥保护。TPM2_Import()操作使用一个以EK主密钥为盐值的参数解密HMAC会话(这也不 +需要EK密钥授权),意味着内部封装密钥是加密参数,因此除非TPM拥有认证的EK,否 +则无法执行导入操作。如果该命令成功执行并且HMAC在返回时通过验证,我们就知道 +我们有一个只为认证TPM加载的私钥副本。现在该密钥已加载到TPM中,并且存储主密 +钥已被清除(以释放空间用于生成空密钥)。 + +现在根据 `TCG TPM v2.0 Provisioning Guidance`_ 中的存储配置生成空EC主密钥; +该密钥的名称(即公钥区域的哈希值)被计算出来并与内核在/sys/class/tpm/tpm0/null_name +中提供的空种子名称进行比较。如果名称不匹配,TPM就被认为是受损的。如果名称匹配, +用户执行TPM2_Certify(),使用空主密钥作为对象句柄,使用加载的私钥作为签名句柄, +并提供随机的合格数据。返回的certifyInfo的签名将与加载的私钥的公钥部分进行验证, +并检查合格数据以防止重放。如果所有测试都通过,用户就可以确信TPM的完整性和隐私 +性在整个内核启动过程中得到了保护。 + +.. _TPM Genie: https://www.nccgroup.trust/globalassets/about-us/us/documents/tpm-genie.pdf +.. _Windows Bitlocker TPM: https://dolosgroup.io/blog/2021/7/9/from-stolen-laptop-to-inside-the-company-network +.. _基于TPM的Linux磁盘加密: https://www.secura.com/blog/tpm-sniffing-attacks-against-non-bitlocker-targets +.. _TCG EK Credential Profile: https://trustedcomputinggroup.org/resource/tcg-ek-credential-profile-for-tpm-family-2-0/ +.. _TCG TPM v2.0 Provisioning Guidance: https://trustedcomputinggroup.org/resource/tcg-tpm-v2-0-provisioning-guidance/ diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst new file mode 100644 index 000000000000..9c173291ac3e --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst @@ -0,0 +1,49 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/tpm_event_log.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +=========== +TPM事件日志 +=========== + +本文档简要介绍了什么是TPM日志,以及它是如何从预启动固件移交到操作系统的。 + +介绍 +==== + +预启动固件维护一个事件日志,每当它将某些内容哈希到任何一个PCR寄存器时,该 +日志会添加新条目。这些事件按类型分类,并包含哈希后的PCR寄存器值。通常,预 +启动固件会哈希那些即将移交执行权或与启动过程相关的组件。 + +其主要应用是远程认证,而它之所以有用的原因在[1]中第一部分很好地阐述了: + +认证用于向挑战者提供有关平台状态的信息。然而,PCR的内容难以解读;因此,当 +PCR内容附有测量日志时,认证通常会更有用。尽管测量日志本身并不可信,但它们 +包含比PCR内容更为丰富的信息集。PCR内容用于对测量日志进行验证。 + +UEFI事件日志 +============ + +UEFI提供的事件日志有一些比较奇怪的特性。 + +在调用ExitBootServices()之前,Linux EFI引导加载程序会将事件日志复制到由 +引导加载程序自定义的配置表中。不幸的是,通过ExitBootServices()生成的事件 +并不会出现在这个表里。 + +固件提供了一个所谓的最终事件配置表排序来解决这个问题。事件会在第一次调用 +EFI_TCG2_PROTOCOL.GetEventLog()后被镜像到这个表中。 + +这引出了另一个问题:无法保证它不会在 Linux EFI stub 开始运行之前被调用。 +因此,在 stub 运行时,它需要计算并将最终事件表的大小保存到自定义配置表中, +以便TPM驱动程序可以在稍后连接来自自定义配置表和最终事件表的两个事件日志时 +跳过这些事件。 + +参考文献 +======== + +- [1] https://trustedcomputinggroup.org/resource/pc-client-specific-platform-firmware-profile-specification/ +- [2] The final concatenation is done in drivers/char/tpm/eventlog/efi.c diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst new file mode 100644 index 000000000000..5901eee32563 --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst @@ -0,0 +1,31 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/tpm_ftpm_tee.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +=========== +固件TPM驱动 +=========== + +本文档描述了固件可信平台模块(fTPM)设备驱动。 + +介绍 +==== + +该驱动程序是用于ARM的TrustZone环境中实现的固件的适配器。该驱动 +程序允许程序以与硬件TPM相同的方式与TPM进行交互。 + +设计 +==== + +该驱动程序充当一个薄层,传递命令到固件实现的TPM并接收其响应。驱动 +程序本身并不包含太多逻辑,更像是固件与内核/用户空间之间的一个管道。 + +固件本身基于以下论文: +https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf + +当驱动程序被加载时,它会向用户空间暴露 ``/dev/tpmX`` 字符设备,允许 +用户空间通过该设备与固件TPM进行通信。 diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst new file mode 100644 index 000000000000..0fb009f93e10 --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst @@ -0,0 +1,43 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/tpm_tis.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +TPM FIFO接口驱动 +================ + +TCG PTP规范定义了两种接口类型:FIFO和CRB。前者基于顺序的读写操作, +后者基于包含完整命令或响应的缓冲区。 + +FIFO(先进先出)接口被tpm_tis_core依赖的驱动程序使用。最初,Linux只 +有一个名为tpm_tis的驱动,覆盖了内存映射(即 MMIO)接口,但后来它被 +扩展以支持TCG标准所支持的其他物理接口。 + +由于历史原因,最初的MMIO驱动被称为tpm_tis,而FIFO驱动的框架被命名为 +tpm_tis_core。在tpm_tis中的“tis”后缀来自TPM接口规范,这是针对TPM1.x +芯片的硬件接口规范。 + +通信基于一个由TPM芯片通过硬件总线或内存映射共享的20KiB 缓冲区,具体 +取决于物理接线。该缓冲区进一步分为五个相等大小的4KiB缓冲区,为CPU和 +TPM之间的通信提供等效的寄存器集。这些通信端点在TCG术语中称为localities。 + +当内核想要向TPM芯片发送命令时,它首先通过在TPM_ACCESS寄存器中设置 +requestUse位来保留locality0。当访问被授予时,该位由芯片清除。一旦完成 +通信,内核会写入TPM_ACCESS.activeLocality位。这告诉芯片该本地性已被 +释放。 + +待处理的本地性由芯片按优先级降序逐个服务,一次一个: + +- Locality0优先级最低。 +- Locality5优先级最高。 + +关于localities的更多信息和含义,请参阅TCG PC客户端平台TPM 配置文件规范的第3.2节。 + +参考文献 +======== + +TCG PC客户端平台TPM配置文件(PTP)规范 +https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst new file mode 100644 index 000000000000..bc92cfb684c3 --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst @@ -0,0 +1,51 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/tpm_vtpm_proxy.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +========================== +Linux容器的虚拟TPM代理驱动 +========================== + +| 作者: +| Stefan Berger <stefanb@linux.vnet.ibm.com> + +本文档描述了用于Linux容器的虚拟可信平台模块(vTPM)代理设备驱动。 + +介绍 +==== + +这项工作的目标是为每个Linux容器提供TPM功能。这使得程序能够像与物理系统 +上的TPM交互一样,与容器中的TPM进行交互。每个容器都会获得一个唯一的、模 +拟的软件TPM。 + +设计 +==== + +为了使每个容器都能使用模拟的软件TPM,容器管理栈需要创建一对设备,其中 +包括一个客户端TPM字符设备 ``/dev/tpmX`` (X=0,1,2...)和一个‘服务器端’ +文件描述符。当文件描述符传被递给TPM模拟器时,前者通过创建具有适当主次 +设备号的字符设备被移入容器,然后,容器内的软件可以使用字符设备发送TPM +命令,模拟器将通过文件描述符接收这些命令,并用它来发送响应。 + +为了支持这一点,虚拟TPM代理驱动程序提供了一个设备 ``/dev/vtpmx`` ,该设备 +用于通过ioctl创建设备对。ioctl将其作为配置设备的输入标志,例如这些标志指示 +TPM模拟器是否支持TPM1.2或TPM2功能。ioctl的结果是返回‘服务器端’的文件描述符 +以及创建的字符设备的主次设备号。此外,还会返回TPM字符设备的编号。例如,如果 +创建了 ``/dev/tpm10`` ,则返回编号( ``dev_num`` )10。 + +一旦设备被创建,驱动程序将立即尝试与TPM进行通信。来自驱动程序的所有命令 +都可以从ioctl返回的文件描述符中读取。这些命令应该立即得到响应。 + +UAPI +==== + +该API在以下内核代码中: + +include/uapi/linux/vtpm_proxy.h +drivers/char/tpm/tpm_vtpm_proxy.c + +函数:vtpmx_ioc_new_dev diff --git a/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst new file mode 100644 index 000000000000..fa085d98a99b --- /dev/null +++ b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst @@ -0,0 +1,114 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../../disclaimer-zh_CN.rst + +:Original: Documentation/security/tpm/xen-tpmfront.rst + +:翻译: + 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn> + +================ +Xen的虚拟TPM接口 +================ + +作者:Matthew Fioravante (JHUAPL), Daniel De Graaf (NSA) + +本文档描述了用于Xen的虚拟可信平台模块(vTPM)子系统。假定读者熟悉 +Xen和Linux的构建和安装,并对TPM和vTPM概念有基本的理解。 + +介绍 +---- + +这项工作的目标是为虚拟客户操作系统(在Xen中称为DomU)提供TPM功能。这使得 +程序能够像与物理系统上的TPM交互一样,与虚拟系统中的TPM进行交互。每个客户 +操作系统都会获得一个唯一的、模拟的软件TPM。然而,vTPM的所有秘密(如密钥、 +NVRAM 等)由vTPM管理域进行管理,该域将这些秘密封存到物理TPM中。如果创建这 +些域(管理域、vTPM域和客户域)的过程是可信的,vTPM子系统就能将根植于硬件 +TPM的信任链扩展到Xen中的虚拟机。vTPM的每个主要组件都作为一个独立的域实现, +从而通过虚拟机监控程序(hypervisor)提供安全隔离。 + +这个mini-os vTPM 子系统是建立在IBM和Intel公司之前的vTPM工作基础上的。 + + +设计概述 +-------- + +vTPM的架构描述如下:: + + +------------------+ + | Linux DomU | ... + | | ^ | + | v | | + | xen-tpmfront | + +------------------+ + | ^ + v | + +------------------+ + | mini-os/tpmback | + | | ^ | + | v | | + | vtpm-stubdom | ... + | | ^ | + | v | | + | mini-os/tpmfront | + +------------------+ + | ^ + v | + +------------------+ + | mini-os/tpmback | + | | ^ | + | v | | + | vtpmmgr-stubdom | + | | ^ | + | v | | + | mini-os/tpm_tis | + +------------------+ + | ^ + v | + +------------------+ + | Hardware TPM | + +------------------+ + +* Linux DomU: + 希望使用vTPM的基于Linux的客户机。可能有多个这样的实例。 + +* xen-tpmfront.ko: + Linux内核虚拟TPM前端驱动程序。该驱动程序为基于Linux的DomU提供 + vTPM访问。 + +* mini-os/tpmback: + Mini-os TPM后端驱动程序。Linux前端驱动程序通过该后端驱动程序连 + 接,以便在Linux DomU和其vTPM之间进行通信。该驱动程序还被 + vtpmmgr-stubdom用于与vtpm-stubdom通信。 + +* vtpm-stubdom: + 一个实现vTPM的mini-os存根域。每个正在运行的vtpm-stubdom实例与系统 + 上的逻辑vTPM之间有一一对应的关系。vTPM平台配置寄存器(PCRs)通常都 + 初始化为零。 + +* mini-os/tpmfront: + Mini-os TPM前端驱动程序。vTPM mini-os域vtpm-stubdom使用该驱动程序 + 与vtpmmgr-stubdom通信。此驱动程序还用于与vTPM域通信的mini-os域,例 + 如 pv-grub。 + +* vtpmmgr-stubdom: + 一个实现vTPM管理器的mini-os域。系统中只有一个vTPM管理器,并且在整个 + 机器生命周期内应一直运行。此域调节对系统中物理TPM的访问,并确保每个 + vTPM的持久状态。 + +* mini-os/tpm_tis: + Mini-osTPM1.2版本TPM 接口规范(TIS)驱动程序。该驱动程序由vtpmmgr-stubdom + 用于直接与硬件TPM通信。通信通过将硬件内存页映射到vtpmmgr-stubdom来实现。 + +* 硬件TPM: + 固定在主板上的物理 TPM。 + +与Xen的集成 +----------- + +vTPM驱动程序的支持已在Xen4.3中通过libxl工具堆栈添加。有关设置vTPM和vTPM +管理器存根域的详细信息,请参见Xen文档(docs/misc/vtpm.txt)。一旦存根域 +运行,与磁盘或网络设备相同,vTPM设备将在域的配置文件中进行设置 + +为了使用诸如IMA(完整性测量架构)等需要在initrd之前加载TPM的功能,必须将 +xen-tpmfront驱动程序编译到内核中。如果不使用这些功能,驱动程序可以作为 +模块编译,并像往常一样加载。 diff --git a/Documentation/translations/zh_TW/admin-guide/README.rst b/Documentation/translations/zh_TW/admin-guide/README.rst index a6e34c200ea3..0b038074d9d1 100644 --- a/Documentation/translations/zh_TW/admin-guide/README.rst +++ b/Documentation/translations/zh_TW/admin-guide/README.rst @@ -149,7 +149,7 @@ Linux內核6.x版本 <http://kernel.org/> "make xconfig" 基於Qt的配置工具。 - "make gconfig" 基於GTK+的配置工具。 + "make gconfig" 基於GTK的配置工具。 "make oldconfig" 基於現有的 ./.config 文件選擇所有選項,並詢問 新配置選項。 diff --git a/Documentation/translations/zh_TW/process/submit-checklist.rst b/Documentation/translations/zh_TW/process/submit-checklist.rst index 0ecb187753e4..a0cb91a6945f 100644 --- a/Documentation/translations/zh_TW/process/submit-checklist.rst +++ b/Documentation/translations/zh_TW/process/submit-checklist.rst @@ -85,8 +85,8 @@ Linux內核補丁提交檢查單 17) 所有新的模塊參數都記錄在 ``MODULE_PARM_DESC()`` 18) 所有新的用戶空間接口都記錄在 ``Documentation/ABI/`` 中。有關詳細信息, - 請參閱 ``Documentation/ABI/README`` 。更改用戶空間接口的補丁應該抄送 - linux-api@vger.kernel.org。 + 請參閱 Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。 + 更改用戶空間接口的補丁應該抄送 linux-api@vger.kernel.org\ 。 19) 已通過至少注入slab和page分配失敗進行檢查。請參閱 ``Documentation/fault-injection/`` 。 如果新代碼是實質性的,那麼添加子系統特定的故障注入可能是合適的。 diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst index bf555c2270f5..1998dc146c56 100644 --- a/Documentation/usb/gadget-testing.rst +++ b/Documentation/usb/gadget-testing.rst @@ -1050,7 +1050,7 @@ Its attributes are: midi1_num_groups The number of groups for MIDI 1.0 (0-16) ui_hint UI-hint of this FB 0: unknown, 1: receiver, 2: sender, 3: both - midi_ci_verison Supported MIDI-CI version number (8 bit) + midi_ci_version Supported MIDI-CI version number (8 bit) is_midi1 Legacy MIDI 1.0 device (0-2) 0: MIDI 2.0 device, 1: MIDI 1.0 without restriction, or diff --git a/Documentation/userspace-api/accelerators/ocxl.rst b/Documentation/userspace-api/accelerators/ocxl.rst index db7570d5e50d..4e213af70237 100644 --- a/Documentation/userspace-api/accelerators/ocxl.rst +++ b/Documentation/userspace-api/accelerators/ocxl.rst @@ -3,8 +3,11 @@ OpenCAPI (Open Coherent Accelerator Processor Interface) ======================================================== OpenCAPI is an interface between processors and accelerators. It aims -at being low-latency and high-bandwidth. The specification is -developed by the `OpenCAPI Consortium <http://opencapi.org/>`_. +at being low-latency and high-bandwidth. + +The specification was developed by the OpenCAPI Consortium, and is now +available from the `Compute Express Link Consortium +<https://computeexpresslink.org/resource/opencapi-specification-archive/>`_. It allows an accelerator (which could be an FPGA, ASICs, ...) to access the host memory coherently, using virtual addresses. An OpenCAPI diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst new file mode 100644 index 000000000000..535f49047ce6 --- /dev/null +++ b/Documentation/userspace-api/dma-buf-heaps.rst @@ -0,0 +1,25 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================== +Allocating dma-buf using heaps +============================== + +Dma-buf Heaps are a way for userspace to allocate dma-buf objects. They are +typically used to allocate buffers from a specific allocation pool, or to share +buffers across frameworks. + +Heaps +===== + +A heap represents a specific allocator. The Linux kernel currently supports the +following heaps: + + - The ``system`` heap allocates virtually contiguous, cacheable, buffers. + + - The ``cma`` heap allocates physically contiguous, cacheable, + buffers. Only present if a CMA region is present. Such a region is + usually created either through the kernel commandline through the + `cma` parameter, a memory region Device-Tree node with the + `linux,cma-default` property set, or through the `CMA_SIZE_MBYTES` or + `CMA_SIZE_PERCENTAGE` Kconfig options. Depending on the platform, it + might be called ``reserved``, ``linux,cma``, or ``default-pool``. diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index b1395d94b3fd..9cbe4390c872 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -44,6 +44,7 @@ Devices and I/O :maxdepth: 1 accelerators/ocxl + dma-buf-heaps dma-buf-alloc-exchange gpio/index iommufd diff --git a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst index 34d6a0a1f4d3..70b5966aaff8 100644 --- a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst +++ b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst @@ -6,7 +6,7 @@ Remote Controller's sysfs nodes ******************************* -As defined at ``Documentation/ABI/testing/sysfs-class-rc``, those are +As defined at Documentation/ABI/testing/sysfs-class-rc, those are the sysfs nodes that control the Remote Controllers: diff --git a/MAINTAINERS b/MAINTAINERS index 7caaa76b9954..c0d0ecde9a7e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5402,6 +5402,8 @@ F: Documentation/dev-tools/checkpatch.rst CHINESE DOCUMENTATION M: Alex Shi <alexs@kernel.org> M: Yanteng Si <siyanteng@loongson.cn> +R: Dongliang Mu <dzm91@hust.edu.cn> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git S: Maintained F: Documentation/translations/zh_CN/ @@ -6918,6 +6920,7 @@ L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: Documentation/userspace-api/dma-buf-heaps.rst F: drivers/dma-buf/dma-heap.c F: drivers/dma-buf/heaps/* F: include/linux/dma-heap.h diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h index 4aa2797f5e3c..8b85524beb59 100644 --- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h @@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config { * 0: positive, 1: negative, default 0. * @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be * used for building histogram. Range [0, 32], default 8. - * Rule: + * + * Rule: * y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32 * A single Y is calculated based on sum of Gr/R/B/Gb based on * their contribution ratio. diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index a5cbbf3e26ec..3c61c29ff6ab 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1212,7 +1212,7 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) #ifndef memset_io /** - * memset_io Set a range of I/O memory to a constant value + * memset_io - Set a range of I/O memory to a constant value * @addr: The beginning of the I/O-memory range to set * @val: The value to set the memory to * @count: The number of bytes to set @@ -1224,7 +1224,7 @@ void memset_io(volatile void __iomem *addr, int val, size_t count); #ifndef memcpy_fromio /** - * memcpy_fromio Copy a block of data from I/O memory + * memcpy_fromio - Copy a block of data from I/O memory * @dst: The (RAM) destination for the copy * @src: The (I/O memory) source for the data * @count: The number of bytes to copy @@ -1236,7 +1236,7 @@ void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count); #ifndef memcpy_toio /** - * memcpy_toio Copy a block of data into I/O memory + * memcpy_toio - Copy a block of data into I/O memory * @dst: The (I/O memory) destination for the copy * @src: The (RAM) source for the data * @count: The number of bytes to copy diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 1f2c9469f921..05e3aa8fa8bc 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -449,7 +449,8 @@ struct fw_cdev_event_phy_packet { * which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet, * the time stamp of isochronous cycle at which the packet was sent. For ping packet, * the tick count for round-trip time measured by 1394 OHCI controller. - * The time stamp of isochronous cycle at which either the response was sent for + * + * The time stamp of isochronous cycle at which either the response was sent for * %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2. * @data: Incoming data diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check index 68083f2f1122..408b1dbe7884 100755 --- a/scripts/documentation-file-ref-check +++ b/scripts/documentation-file-ref-check @@ -92,7 +92,7 @@ while (<IN>) { next if ($f =~ m,^Next/,); # Makefiles and scripts contain nasty expressions to parse docs - next if ($f =~ m/Makefile/ || $f =~ m/\.sh$/); + next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/); # It doesn't make sense to parse hidden files next if ($f =~ m#/\.#); diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl deleted file mode 100755 index de1c0354b50c..000000000000 --- a/scripts/get_abi.pl +++ /dev/null @@ -1,1103 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0 - -BEGIN { $Pod::Usage::Formatter = 'Pod::Text::Termcap'; } - -use strict; -use warnings; -use utf8; -use Pod::Usage qw(pod2usage); -use Getopt::Long; -use File::Find; -use IO::Handle; -use Fcntl ':mode'; -use Cwd 'abs_path'; -use Data::Dumper; - -my $help = 0; -my $hint = 0; -my $man = 0; -my $debug = 0; -my $enable_lineno = 0; -my $show_warnings = 1; -my $prefix="Documentation/ABI"; -my $sysfs_prefix="/sys"; -my $search_string; - -# Debug options -my $dbg_what_parsing = 1; -my $dbg_what_open = 2; -my $dbg_dump_abi_structs = 4; -my $dbg_undefined = 8; - -$Data::Dumper::Indent = 1; -$Data::Dumper::Terse = 1; - -# -# If true, assumes that the description is formatted with ReST -# -my $description_is_rst = 1; - -GetOptions( - "debug=i" => \$debug, - "enable-lineno" => \$enable_lineno, - "rst-source!" => \$description_is_rst, - "dir=s" => \$prefix, - 'help|?' => \$help, - "show-hints" => \$hint, - "search-string=s" => \$search_string, - man => \$man -) or pod2usage(2); - -pod2usage(1) if $help; -pod2usage(-exitstatus => 0, -noperldoc, -verbose => 2) if $man; - -pod2usage(2) if (scalar @ARGV < 1 || @ARGV > 2); - -my ($cmd, $arg) = @ARGV; - -pod2usage(2) if ($cmd ne "search" && $cmd ne "rest" && $cmd ne "validate" && $cmd ne "undefined"); -pod2usage(2) if ($cmd eq "search" && !$arg); - -require Data::Dumper if ($debug & $dbg_dump_abi_structs); - -my %data; -my %symbols; - -# -# Displays an error message, printing file name and line -# -sub parse_error($$$$) { - my ($file, $ln, $msg, $data) = @_; - - return if (!$show_warnings); - - $data =~ s/\s+$/\n/; - - print STDERR "Warning: file $file#$ln:\n\t$msg"; - - if ($data ne "") { - print STDERR ". Line\n\t\t$data"; - } else { - print STDERR "\n"; - } -} - -# -# Parse an ABI file, storing its contents at %data -# -sub parse_abi { - my $file = $File::Find::name; - - my $mode = (stat($file))[2]; - return if ($mode & S_IFDIR); - return if ($file =~ m,/README,); - return if ($file =~ m,/\.,); - return if ($file =~ m,\.(rej|org|orig|bak)$,); - - my $name = $file; - $name =~ s,.*/,,; - - my $fn = $file; - $fn =~ s,.*Documentation/ABI/,,; - - my $nametag = "File $fn"; - $data{$nametag}->{what} = "File $name"; - $data{$nametag}->{type} = "File"; - $data{$nametag}->{file} = $name; - $data{$nametag}->{filepath} = $file; - $data{$nametag}->{is_file} = 1; - $data{$nametag}->{line_no} = 1; - - my $type = $file; - $type =~ s,.*/(.*)/.*,$1,; - - my $what; - my $new_what; - my $tag = ""; - my $ln; - my $xrefs; - my $space; - my @labels; - my $label = ""; - - print STDERR "Opening $file\n" if ($debug & $dbg_what_open); - open IN, $file; - while(<IN>) { - $ln++; - if (m/^(\S+)(:\s*)(.*)/i) { - my $new_tag = lc($1); - my $sep = $2; - my $content = $3; - - if (!($new_tag =~ m/(what|where|date|kernelversion|contact|description|users)/)) { - if ($tag eq "description") { - # New "tag" is actually part of - # description. Don't consider it a tag - $new_tag = ""; - } elsif ($tag ne "") { - parse_error($file, $ln, "tag '$tag' is invalid", $_); - } - } - - # Invalid, but it is a common mistake - if ($new_tag eq "where") { - parse_error($file, $ln, "tag 'Where' is invalid. Should be 'What:' instead", ""); - $new_tag = "what"; - } - - if ($new_tag =~ m/what/) { - $space = ""; - $content =~ s/[,.;]$//; - - push @{$symbols{$content}->{file}}, " $file:" . ($ln - 1); - - if ($tag =~ m/what/) { - $what .= "\xac" . $content; - } else { - if ($what) { - parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description}); - - foreach my $w(split /\xac/, $what) { - $symbols{$w}->{xref} = $what; - }; - } - - $what = $content; - $label = $content; - $new_what = 1; - } - push @labels, [($content, $label)]; - $tag = $new_tag; - - push @{$data{$nametag}->{symbols}}, $content if ($data{$nametag}->{what}); - next; - } - - if ($tag ne "" && $new_tag) { - $tag = $new_tag; - - if ($new_what) { - @{$data{$what}->{label_list}} = @labels if ($data{$nametag}->{what}); - @labels = (); - $label = ""; - $new_what = 0; - - $data{$what}->{type} = $type; - if (!defined($data{$what}->{file})) { - $data{$what}->{file} = $name; - $data{$what}->{filepath} = $file; - } else { - $data{$what}->{description} .= "\n\n" if (defined($data{$what}->{description})); - if ($name ne $data{$what}->{file}) { - $data{$what}->{file} .= " " . $name; - $data{$what}->{filepath} .= " " . $file; - } - } - print STDERR "\twhat: $what\n" if ($debug & $dbg_what_parsing); - $data{$what}->{line_no} = $ln; - } else { - $data{$what}->{line_no} = $ln if (!defined($data{$what}->{line_no})); - } - - if (!$what) { - parse_error($file, $ln, "'What:' should come first:", $_); - next; - } - if ($new_tag eq "description") { - $sep =~ s,:, ,; - $content = ' ' x length($new_tag) . $sep . $content; - while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {} - if ($content =~ m/^(\s*)(\S.*)$/) { - # Preserve initial spaces for the first line - $space = $1; - $content = "$2\n"; - $data{$what}->{$tag} .= $content; - } else { - undef($space); - } - - } else { - $data{$what}->{$tag} = $content; - } - next; - } - } - - # Store any contents before tags at the database - if (!$tag && $data{$nametag}->{what}) { - $data{$nametag}->{description} .= $_; - next; - } - - if ($tag eq "description") { - my $content = $_; - while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {} - if (m/^\s*\n/) { - $data{$what}->{$tag} .= "\n"; - next; - } - - if (!defined($space)) { - # Preserve initial spaces for the first line - if ($content =~ m/^(\s*)(\S.*)$/) { - $space = $1; - $content = "$2\n"; - } - } else { - $space = "" if (!($content =~ s/^($space)//)); - } - $data{$what}->{$tag} .= $content; - - next; - } - if (m/^\s*(.*)/) { - $data{$what}->{$tag} .= "\n$1"; - $data{$what}->{$tag} =~ s/\n+$//; - next; - } - - # Everything else is error - parse_error($file, $ln, "Unexpected content", $_); - } - $data{$nametag}->{description} =~ s/^\n+// if ($data{$nametag}->{description}); - if ($what) { - parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description}); - - foreach my $w(split /\xac/,$what) { - $symbols{$w}->{xref} = $what; - }; - } - close IN; -} - -sub create_labels { - my %labels; - - foreach my $what (keys %data) { - next if ($data{$what}->{file} eq "File"); - - foreach my $p (@{$data{$what}->{label_list}}) { - my ($content, $label) = @{$p}; - $label = "abi_" . $label . " "; - $label =~ tr/A-Z/a-z/; - - # Convert special chars to "_" - $label =~s/([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff])/_/g; - $label =~ s,_+,_,g; - $label =~ s,_$,,; - - # Avoid duplicated labels - while (defined($labels{$label})) { - my @chars = ("A".."Z", "a".."z"); - $label .= $chars[rand @chars]; - } - $labels{$label} = 1; - - $data{$what}->{label} = $label; - - # only one label is enough - last; - } - } -} - -# -# Outputs the book on ReST format -# - -# \b doesn't work well with paths. So, we need to define something else: -# Boundaries are punct characters, spaces and end-of-line -my $start = qr {(^|\s|\() }x; -my $bondary = qr { ([,.:;\)\s]|\z) }x; -my $xref_match = qr { $start(\/(sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)$bondary }x; -my $symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff]) }x; - -sub output_rest { - create_labels(); - - my $part = ""; - - foreach my $what (sort { - ($data{$a}->{type} eq "File") cmp ($data{$b}->{type} eq "File") || - $a cmp $b - } keys %data) { - my $type = $data{$what}->{type}; - - my @file = split / /, $data{$what}->{file}; - my @filepath = split / /, $data{$what}->{filepath}; - - if ($enable_lineno) { - printf ".. LINENO %s%s#%s\n\n", - $prefix, $file[0], - $data{$what}->{line_no}; - } - - my $w = $what; - - if ($type ne "File") { - my $cur_part = $what; - if ($what =~ '/') { - if ($what =~ m#^(\/?(?:[\w\-]+\/?){1,2})#) { - $cur_part = "Symbols under $1"; - $cur_part =~ s,/$,,; - } - } - - if ($cur_part ne "" && $part ne $cur_part) { - $part = $cur_part; - my $bar = $part; - $bar =~ s/./-/g; - print "$part\n$bar\n\n"; - } - - printf ".. _%s:\n\n", $data{$what}->{label}; - - my @names = split /\xac/,$w; - my $len = 0; - - foreach my $name (@names) { - $name =~ s/$symbols/\\$1/g; - $name = "**$name**"; - $len = length($name) if (length($name) > $len); - } - - print "+-" . "-" x $len . "-+\n"; - foreach my $name (@names) { - printf "| %s", $name . " " x ($len - length($name)) . " |\n"; - print "+-" . "-" x $len . "-+\n"; - } - - print "\n"; - } - - for (my $i = 0; $i < scalar(@filepath); $i++) { - my $path = $filepath[$i]; - my $f = $file[$i]; - - $path =~ s,.*/(.*/.*),$1,;; - $path =~ s,[/\-],_,g;; - my $fileref = "abi_file_".$path; - - if ($type eq "File") { - print ".. _$fileref:\n\n"; - } else { - print "Defined on file :ref:`$f <$fileref>`\n\n"; - } - } - - if ($type eq "File") { - my $bar = $w; - $bar =~ s/./-/g; - print "$w\n$bar\n\n"; - } - - my $desc = ""; - $desc = $data{$what}->{description} if (defined($data{$what}->{description})); - $desc =~ s/\s+$/\n/; - - if (!($desc =~ /^\s*$/)) { - if ($description_is_rst) { - # Remove title markups from the description - # Having titles inside ABI files will only work if extra - # care would be taken in order to strictly follow the same - # level order for each markup. - $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g; - - # Enrich text by creating cross-references - - my $new_desc = ""; - my $init_indent = -1; - my $literal_indent = -1; - - open(my $fh, "+<", \$desc); - while (my $d = <$fh>) { - my $indent = $d =~ m/^(\s+)/; - my $spaces = length($indent); - $init_indent = $indent if ($init_indent < 0); - if ($literal_indent >= 0) { - if ($spaces > $literal_indent) { - $new_desc .= $d; - next; - } else { - $literal_indent = -1; - } - } else { - if ($d =~ /()::$/ && !($d =~ /^\s*\.\./)) { - $literal_indent = $spaces; - } - } - - $d =~ s,Documentation/(?!devicetree)(\S+)\.rst,:doc:`/$1`,g; - - my @matches = $d =~ m,Documentation/ABI/([\w\/\-]+),g; - foreach my $f (@matches) { - my $xref = $f; - my $path = $f; - $path =~ s,.*/(.*/.*),$1,;; - $path =~ s,[/\-],_,g;; - $xref .= " <abi_file_" . $path . ">"; - $d =~ s,\bDocumentation/ABI/$f\b,:ref:`$xref`,g; - } - - # Seek for cross reference symbols like /sys/... - @matches = $d =~ m/$xref_match/g; - - foreach my $s (@matches) { - next if (!($s =~ m,/,)); - if (defined($data{$s}) && defined($data{$s}->{label})) { - my $xref = $s; - - $xref =~ s/$symbols/\\$1/g; - $xref = ":ref:`$xref <" . $data{$s}->{label} . ">`"; - - $d =~ s,$start$s$bondary,$1$xref$2,g; - } - } - $new_desc .= $d; - } - close $fh; - - - print "$new_desc\n\n"; - } else { - $desc =~ s/^\s+//; - - # Remove title markups from the description, as they won't work - $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g; - - if ($desc =~ m/\:\n/ || $desc =~ m/\n[\t ]+/ || $desc =~ m/[\x00-\x08\x0b-\x1f\x7b-\xff]/) { - # put everything inside a code block - $desc =~ s/\n/\n /g; - - print "::\n\n"; - print " $desc\n\n"; - } else { - # Escape any special chars from description - $desc =~s/([\x00-\x08\x0b-\x1f\x21-\x2a\x2d\x2f\x3c-\x40\x5c\x5e-\x60\x7b-\xff])/\\$1/g; - print "$desc\n\n"; - } - } - } else { - print "DESCRIPTION MISSING for $what\n\n" if (!$data{$what}->{is_file}); - } - - if ($data{$what}->{symbols}) { - printf "Has the following ABI:\n\n"; - - foreach my $content(@{$data{$what}->{symbols}}) { - my $label = $data{$symbols{$content}->{xref}}->{label}; - - # Escape special chars from content - $content =~s/([\x00-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])/\\$1/g; - - print "- :ref:`$content <$label>`\n\n"; - } - } - - if (defined($data{$what}->{users})) { - my $users = $data{$what}->{users}; - - $users =~ s/\n/\n\t/g; - printf "Users:\n\t%s\n\n", $users if ($users ne ""); - } - - } -} - -# -# Searches for ABI symbols -# -sub search_symbols { - foreach my $what (sort keys %data) { - next if (!($what =~ m/($arg)/)); - - my $type = $data{$what}->{type}; - next if ($type eq "File"); - - my $file = $data{$what}->{filepath}; - - $what =~ s/\xac/, /g; - my $bar = $what; - $bar =~ s/./-/g; - - print "\n$what\n$bar\n\n"; - - my $kernelversion = $data{$what}->{kernelversion} if (defined($data{$what}->{kernelversion})); - my $contact = $data{$what}->{contact} if (defined($data{$what}->{contact})); - my $users = $data{$what}->{users} if (defined($data{$what}->{users})); - my $date = $data{$what}->{date} if (defined($data{$what}->{date})); - my $desc = $data{$what}->{description} if (defined($data{$what}->{description})); - - $kernelversion =~ s/^\s+// if ($kernelversion); - $contact =~ s/^\s+// if ($contact); - if ($users) { - $users =~ s/^\s+//; - $users =~ s/\n//g; - } - $date =~ s/^\s+// if ($date); - $desc =~ s/^\s+// if ($desc); - - printf "Kernel version:\t\t%s\n", $kernelversion if ($kernelversion); - printf "Date:\t\t\t%s\n", $date if ($date); - printf "Contact:\t\t%s\n", $contact if ($contact); - printf "Users:\t\t\t%s\n", $users if ($users); - print "Defined on file(s):\t$file\n\n"; - print "Description:\n\n$desc"; - } -} - -# Exclude /sys/kernel/debug and /sys/kernel/tracing from the search path -sub dont_parse_special_attributes { - if (($File::Find::dir =~ m,^/sys/kernel,)) { - return grep {!/(debug|tracing)/ } @_; - } - - if (($File::Find::dir =~ m,^/sys/fs,)) { - return grep {!/(pstore|bpf|fuse)/ } @_; - } - - return @_ -} - -my %leaf; -my %aliases; -my @files; -my %root; - -sub graph_add_file { - my $file = shift; - my $type = shift; - - my $dir = $file; - $dir =~ s,^(.*/).*,$1,; - $file =~ s,.*/,,; - - my $name; - my $file_ref = \%root; - foreach my $edge(split "/", $dir) { - $name .= "$edge/"; - if (!defined ${$file_ref}{$edge}) { - ${$file_ref}{$edge} = { }; - } - $file_ref = \%{$$file_ref{$edge}}; - ${$file_ref}{"__name"} = [ $name ]; - } - $name .= "$file"; - ${$file_ref}{$file} = { - "__name" => [ $name ] - }; - - return \%{$$file_ref{$file}}; -} - -sub graph_add_link { - my $file = shift; - my $link = shift; - - # Traverse graph to find the reference - my $file_ref = \%root; - foreach my $edge(split "/", $file) { - $file_ref = \%{$$file_ref{$edge}} || die "Missing node!"; - } - - # do a BFS - - my @queue; - my %seen; - my $st; - - push @queue, $file_ref; - $seen{$start}++; - - while (@queue) { - my $v = shift @queue; - my @child = keys(%{$v}); - - foreach my $c(@child) { - next if $seen{$$v{$c}}; - next if ($c eq "__name"); - - if (!defined($$v{$c}{"__name"})) { - printf STDERR "Error: Couldn't find a non-empty name on a children of $file/.*: "; - print STDERR Dumper(%{$v}); - exit; - } - - # Add new name - my $name = @{$$v{$c}{"__name"}}[0]; - if ($name =~ s#^$file/#$link/#) { - push @{$$v{$c}{"__name"}}, $name; - } - # Add child to the queue and mark as seen - push @queue, $$v{$c}; - $seen{$c}++; - } - } -} - -my $escape_symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x29\x2b-\x2d\x3a-\x40\x7b-\xfe]) }x; -sub parse_existing_sysfs { - my $file = $File::Find::name; - - my $mode = (lstat($file))[2]; - my $abs_file = abs_path($file); - - my @tmp; - push @tmp, $file; - push @tmp, $abs_file if ($abs_file ne $file); - - foreach my $f(@tmp) { - # Ignore cgroup, as this is big and has zero docs under ABI - return if ($f =~ m#^/sys/fs/cgroup/#); - - # Ignore firmware as it is documented elsewhere - # Either ACPI or under Documentation/devicetree/bindings/ - return if ($f =~ m#^/sys/firmware/#); - - # Ignore some sysfs nodes that aren't actually part of ABI - return if ($f =~ m#/sections|notes/#); - - # Would need to check at - # Documentation/admin-guide/kernel-parameters.txt, but this - # is not easily parseable. - return if ($f =~ m#/parameters/#); - } - - if (S_ISLNK($mode)) { - $aliases{$file} = $abs_file; - return; - } - - return if (S_ISDIR($mode)); - - # Trivial: file is defined exactly the same way at ABI What: - return if (defined($data{$file})); - return if (defined($data{$abs_file})); - - push @files, graph_add_file($abs_file, "file"); -} - -sub get_leave($) -{ - my $what = shift; - my $leave; - - my $l = $what; - my $stop = 1; - - $leave = $l; - $leave =~ s,/$,,; - $leave =~ s,.*/,,; - $leave =~ s/[\(\)]//g; - - # $leave is used to improve search performance at - # check_undefined_symbols, as the algorithm there can seek - # for a small number of "what". It also allows giving a - # hint about a leave with the same name somewhere else. - # However, there are a few occurences where the leave is - # either a wildcard or a number. Just group such cases - # altogether. - if ($leave =~ m/\.\*/ || $leave eq "" || $leave =~ /\\d/) { - $leave = "others"; - } - - return $leave; -} - -my @not_found; - -sub check_file($$) -{ - my $file_ref = shift; - my $names_ref = shift; - my @names = @{$names_ref}; - my $file = $names[0]; - - my $found_string; - - my $leave = get_leave($file); - if (!defined($leaf{$leave})) { - $leave = "others"; - } - my @expr = @{$leaf{$leave}->{expr}}; - die ("\rmissing rules for $leave") if (!defined($leaf{$leave})); - - my $path = $file; - $path =~ s,(.*/).*,$1,; - - if ($search_string) { - return if (!($file =~ m#$search_string#)); - $found_string = 1; - } - - for (my $i = 0; $i < @names; $i++) { - if ($found_string && $hint) { - if (!$i) { - print STDERR "--> $names[$i]\n"; - } else { - print STDERR " $names[$i]\n"; - } - } - foreach my $re (@expr) { - print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined); - if ($names[$i] =~ $re) { - return; - } - } - } - - if ($leave ne "others") { - my @expr = @{$leaf{"others"}->{expr}}; - for (my $i = 0; $i < @names; $i++) { - foreach my $re (@expr) { - print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined); - if ($names[$i] =~ $re) { - return; - } - } - } - } - - push @not_found, $file if (!$search_string || $found_string); - - if ($hint && (!$search_string || $found_string)) { - my $what = $leaf{$leave}->{what}; - $what =~ s/\xac/\n\t/g; - if ($leave ne "others") { - print STDERR "\r more likely regexes:\n\t$what\n"; - } else { - print STDERR "\r tested regexes:\n\t$what\n"; - } - } -} - -sub check_undefined_symbols { - my $num_files = scalar @files; - my $next_i = 0; - my $start_time = times; - - @files = sort @files; - - my $last_time = $start_time; - - # When either debug or hint is enabled, there's no sense showing - # progress, as the progress will be overriden. - if ($hint || ($debug && $dbg_undefined)) { - $next_i = $num_files; - } - - my $is_console; - $is_console = 1 if (-t STDERR); - - for (my $i = 0; $i < $num_files; $i++) { - my $file_ref = $files[$i]; - my @names = @{$$file_ref{"__name"}}; - - check_file($file_ref, \@names); - - my $cur_time = times; - - if ($i == $next_i || $cur_time > $last_time + 1) { - my $percent = $i * 100 / $num_files; - - my $tm = $cur_time - $start_time; - my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm)); - - printf STDERR "\33[2K\r", if ($is_console); - printf STDERR "%s: processing sysfs files... %i%%: $names[0]", $time, $percent; - printf STDERR "\n", if (!$is_console); - STDERR->flush(); - - $next_i = int (($percent + 1) * $num_files / 100); - $last_time = $cur_time; - } - } - - my $cur_time = times; - my $tm = $cur_time - $start_time; - my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm)); - - printf STDERR "\33[2K\r", if ($is_console); - printf STDERR "%s: processing sysfs files... done\n", $time; - - foreach my $file (@not_found) { - print "$file not found.\n"; - } -} - -sub undefined_symbols { - print STDERR "Reading $sysfs_prefix directory contents..."; - find({ - wanted =>\&parse_existing_sysfs, - preprocess =>\&dont_parse_special_attributes, - no_chdir => 1 - }, $sysfs_prefix); - print STDERR "done.\n"; - - $leaf{"others"}->{what} = ""; - - print STDERR "Converting ABI What fields into regexes..."; - foreach my $w (sort keys %data) { - foreach my $what (split /\xac/,$w) { - next if (!($what =~ m/^$sysfs_prefix/)); - - # Convert what into regular expressions - - # Escape dot characters - $what =~ s/\./\xf6/g; - - # Temporarily change [0-9]+ type of patterns - $what =~ s/\[0\-9\]\+/\xff/g; - - # Temporarily change [\d+-\d+] type of patterns - $what =~ s/\[0\-\d+\]/\xff/g; - $what =~ s/\[(\d+)\]/\xf4$1\xf5/g; - - # Temporarily change [0-9] type of patterns - $what =~ s/\[(\d)\-(\d)\]/\xf4$1-$2\xf5/g; - - # Handle multiple option patterns - $what =~ s/[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]/($1|$2)/g; - - # Handle wildcards - $what =~ s,\*,.*,g; - $what =~ s,/\xf6..,/.*,g; - $what =~ s/\<[^\>]+\>/.*/g; - $what =~ s/\{[^\}]+\}/.*/g; - $what =~ s/\[[^\]]+\]/.*/g; - - $what =~ s/[XYZ]/.*/g; - - # Recover [0-9] type of patterns - $what =~ s/\xf4/[/g; - $what =~ s/\xf5/]/g; - - # Remove duplicated spaces - $what =~ s/\s+/ /g; - - # Special case: this ABI has a parenthesis on it - $what =~ s/sqrt\(x^2\+y^2\+z^2\)/sqrt\(x^2\+y^2\+z^2\)/; - - # Special case: drop comparition as in: - # What: foo = <something> - # (this happens on a few IIO definitions) - $what =~ s,\s*\=.*$,,; - - # Escape all other symbols - $what =~ s/$escape_symbols/\\$1/g; - $what =~ s/\\\\/\\/g; - $what =~ s/\\([\[\]\(\)\|])/$1/g; - $what =~ s/(\d+)\\(-\d+)/$1$2/g; - - $what =~ s/\xff/\\d+/g; - - # Special case: IIO ABI which a parenthesis. - $what =~ s/sqrt(.*)/sqrt\(.*\)/; - - # Simplify regexes with multiple .* - $what =~ s#(?:\.\*){2,}##g; -# $what =~ s#\.\*/\.\*#.*#g; - - # Recover dot characters - $what =~ s/\xf6/\./g; - - my $leave = get_leave($what); - - my $added = 0; - foreach my $l (split /\|/, $leave) { - if (defined($leaf{$l})) { - next if ($leaf{$l}->{what} =~ m/\b$what\b/); - $leaf{$l}->{what} .= "\xac" . $what; - $added = 1; - } else { - $leaf{$l}->{what} = $what; - $added = 1; - } - } - if ($search_string && $added) { - print STDERR "What: $what\n" if ($what =~ m#$search_string#); - } - - } - } - # Compile regexes - foreach my $l (sort keys %leaf) { - my @expr; - foreach my $w(sort split /\xac/, $leaf{$l}->{what}) { - push @expr, qr /^$w$/; - } - $leaf{$l}->{expr} = \@expr; - } - - # Take links into account - foreach my $link (sort keys %aliases) { - my $abs_file = $aliases{$link}; - graph_add_link($abs_file, $link); - } - print STDERR "done.\n"; - - check_undefined_symbols; -} - -# Ensure that the prefix will always end with a slash -# While this is not needed for find, it makes the patch nicer -# with --enable-lineno -$prefix =~ s,/?$,/,; - -if ($cmd eq "undefined" || $cmd eq "search") { - $show_warnings = 0; -} -# -# Parses all ABI files located at $prefix dir -# -find({wanted =>\&parse_abi, no_chdir => 1}, $prefix); - -print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug & $dbg_dump_abi_structs); - -# -# Handles the command -# -if ($cmd eq "undefined") { - undefined_symbols; -} elsif ($cmd eq "search") { - search_symbols; -} else { - if ($cmd eq "rest") { - output_rest; - } - - # Warn about duplicated ABI entries - foreach my $what(sort keys %symbols) { - my @files = @{$symbols{$what}->{file}}; - - next if (scalar(@files) == 1); - - printf STDERR "Warning: $what is defined %d times: @files\n", - scalar(@files); - } -} - -__END__ - -=head1 NAME - -get_abi.pl - parse the Linux ABI files and produce a ReST book. - -=head1 SYNOPSIS - -B<get_abi.pl> [--debug <level>] [--enable-lineno] [--man] [--help] - [--(no-)rst-source] [--dir=<dir>] [--show-hints] - [--search-string <regex>] - <COMMAND> [<ARGUMENT>] - -Where B<COMMAND> can be: - -=over 8 - -B<search> I<SEARCH_REGEX> - search for I<SEARCH_REGEX> inside ABI - -B<rest> - output the ABI in ReST markup language - -B<validate> - validate the ABI contents - -B<undefined> - existing symbols at the system that aren't - defined at Documentation/ABI - -=back - -=head1 OPTIONS - -=over 8 - -=item B<--dir> - -Changes the location of the ABI search. By default, it uses -the Documentation/ABI directory. - -=item B<--rst-source> and B<--no-rst-source> - -The input file may be using ReST syntax or not. Those two options allow -selecting between a rst-compliant source ABI (B<--rst-source>), or a -plain text that may be violating ReST spec, so it requres some escaping -logic (B<--no-rst-source>). - -=item B<--enable-lineno> - -Enable output of .. LINENO lines. - -=item B<--debug> I<debug level> - -Print debug information according with the level, which is given by the -following bitmask: - - - 1: Debug parsing What entries from ABI files; - - 2: Shows what files are opened from ABI files; - - 4: Dump the structs used to store the contents of the ABI files. - -=item B<--show-hints> - -Show hints about possible definitions for the missing ABI symbols. -Used only when B<undefined>. - -=item B<--search-string> I<regex string> - -Show only occurences that match a search string. -Used only when B<undefined>. - -=item B<--help> - -Prints a brief help message and exits. - -=item B<--man> - -Prints the manual page and exits. - -=back - -=head1 DESCRIPTION - -Parse the Linux ABI files from ABI DIR (usually located at Documentation/ABI), -allowing to search for ABI symbols or to produce a ReST book containing -the Linux ABI documentation. - -=head1 EXAMPLES - -Search for all stable symbols with the word "usb": - -=over 8 - -$ scripts/get_abi.pl search usb --dir Documentation/ABI/stable - -=back - -Search for all symbols that match the regex expression "usb.*cap": - -=over 8 - -$ scripts/get_abi.pl search usb.*cap - -=back - -Output all obsoleted symbols in ReST format - -=over 8 - -$ scripts/get_abi.pl rest --dir Documentation/ABI/obsolete - -=back - -=head1 BUGS - -Report bugs to Mauro Carvalho Chehab <mchehab+huawei@kernel.org> - -=head1 COPYRIGHT - -Copyright (c) 2016-2021 by Mauro Carvalho Chehab <mchehab+huawei@kernel.org>. - -License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. - -This is free software: you are free to change and redistribute it. -There is NO WARRANTY, to the extent permitted by law. - -=cut diff --git a/scripts/get_abi.py b/scripts/get_abi.py new file mode 100755 index 000000000000..7ce4748a46d2 --- /dev/null +++ b/scripts/get_abi.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# pylint: disable=R0903 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +import argparse +import logging +import os +import sys + +# Import Python modules + +LIB_DIR = "lib/abi" +SRC_DIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR)) + +from abi_parser import AbiParser # pylint: disable=C0413 +from abi_regex import AbiRegex # pylint: disable=C0413 +from helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413 +from system_symbols import SystemSymbols # pylint: disable=C0413 + +# Command line classes + + +REST_DESC = """ +Produce output in ReST format. + +The output is done on two sections: + +- Symbols: show all parsed symbols in alphabetic order; +- Files: cross reference the content of each file with the symbols on it. +""" + +class AbiRest: + """Initialize an argparse subparser for rest output""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("rest", + formatter_class=argparse.RawTextHelpFormatter, + description=REST_DESC) + + parser.add_argument("--enable-lineno", action="store_true", + help="enable lineno") + parser.add_argument("--raw", action="store_true", + help="output text as contained in the ABI files. " + "It not used, output will contain dynamically" + " generated cross references when possible.") + parser.add_argument("--no-file", action="store_true", + help="Don't the files section") + parser.add_argument("--show-hints", help="Show-hints") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.check_issues() + + for t in parser.doc(args.raw, not args.no_file): + if args.enable_lineno: + print (f".. LINENO {t[1]}#{t[2]}\n\n") + + print(t[0]) + +class AbiValidate: + """Initialize an argparse subparser for ABI validation""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("validate", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="list events") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.check_issues() + + +class AbiSearch: + """Initialize an argparse subparser for ABI search""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("search", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Search ABI using a regular expression") + + parser.add_argument("expression", + help="Case-insensitive search pattern for the ABI symbol") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.search_symbols(args.expression) + +UNDEFINED_DESC=""" +Check undefined ABIs on local machine. + +Read sysfs devnodes and check if the devnodes there are defined inside +ABI documentation. + +The search logic tries to minimize the number of regular expressions to +search per each symbol. + +By default, it runs on a single CPU, as Python support for CPU threads +is still experimental, and multi-process runs on Python is very slow. + +On experimental tests, if the number of ABI symbols to search per devnode +is contained on a limit of ~150 regular expressions, using a single CPU +is a lot faster than using multiple processes. However, if the number of +regular expressions to check is at the order of ~30000, using multiple +CPUs speeds up the check. +""" + +class AbiUndefined: + """ + Initialize an argparse subparser for logic to check undefined ABI at + the current machine's sysfs + """ + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("undefined", + formatter_class=argparse.RawTextHelpFormatter, + description=UNDEFINED_DESC) + + parser.add_argument("-S", "--sysfs-dir", default="/sys", + help="directory where sysfs is mounted") + parser.add_argument("-s", "--search-string", + help="search string regular expression to limit symbol search") + parser.add_argument("-H", "--show-hints", action="store_true", + help="Hints about definitions for missing ABI symbols.") + parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1, + help="If bigger than one, enables multiprocessing.") + parser.add_argument("-c", "--max-chunk-size", type=int, default=50, + help="Maximum number of chunk size") + parser.add_argument("-f", "--found", action="store_true", + help="Also show found items. " + "Helpful to debug the parser."), + parser.add_argument("-d", "--dry-run", action="store_true", + help="Don't actually search for undefined. " + "Helpful to debug the parser."), + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + abi = AbiRegex(args.dir, debug=args.debug, + search_string=args.search_string) + + abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints, + sysfs=args.sysfs_dir) + + abi_symbols.check_undefined_symbols(dry_run=args.dry_run, + found=args.found, + max_workers=args.jobs, + chunk_size=args.max_chunk_size) + + +def main(): + """Main program""" + + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument("-d", "--debug", type=int, default=0, help="debug level") + parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP) + + subparsers = parser.add_subparsers() + + AbiRest(subparsers) + AbiValidate(subparsers) + AbiSearch(subparsers) + AbiUndefined(subparsers) + + args = parser.parse_args() + + if args.debug: + level = logging.DEBUG + else: + level = logging.INFO + + logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") + + if "func" in args: + args.func(args) + else: + sys.exit(f"Please specify a valid command for {sys.argv[0]}") + + +# Call main method +if __name__ == "__main__": + main() diff --git a/scripts/get_feat.pl b/scripts/get_feat.pl index 5c5397eeb237..40fb28c8424e 100755 --- a/scripts/get_feat.pl +++ b/scripts/get_feat.pl @@ -512,13 +512,13 @@ print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug); # Handles the command # if ($cmd eq "current") { - $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/'); + $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/'); $arch =~s/\s+$//; } if ($cmd eq "ls" or $cmd eq "list") { if (!$arch) { - $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/'); + $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/'); $arch =~s/\s+$//; } diff --git a/scripts/kernel-doc b/scripts/kernel-doc index e57c5e989a0a..af6cf408b96d 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -26,7 +26,7 @@ kernel-doc - Print formatted kernel documentation to stdout kernel-doc [-h] [-v] [-Werror] [-Wall] [-Wreturn] [-Wshort-desc[ription]] [-Wcontents-before-sections] [ -man | - -rst [-sphinx-version VERSION] [-enable-lineno] | + -rst [-enable-lineno] | -none ] [ @@ -130,7 +130,6 @@ if ($#ARGV == -1) { } my $kernelversion; -my ($sphinx_major, $sphinx_minor, $sphinx_patch); my $dohighlight = ""; @@ -138,7 +137,6 @@ my $verbose = 0; my $Werror = 0; my $Wreturn = 0; my $Wshort_desc = 0; -my $Wcontents_before_sections = 0; my $output_mode = "rst"; my $output_preformatted = 0; my $no_doc_sections = 0; @@ -179,7 +177,7 @@ my ($function, %function_table, %parametertypes, $declaration_purpose); my %nosymbol_table = (); my $declaration_start_line; my ($type, $declaration_name, $return_type); -my ($newsection, $newcontents, $prototype, $brcount, %source_map); +my ($newsection, $newcontents, $prototype, $brcount); if (defined($ENV{'KBUILD_VERBOSE'}) && $ENV{'KBUILD_VERBOSE'} =~ '1') { $verbose = 1; @@ -224,7 +222,6 @@ use constant { STATE_INLINE => 7, # gathering doc outside main block }; my $state; -my $in_doc_sect; my $leading_space; # Inline documentation state @@ -333,12 +330,9 @@ while ($ARGV[0] =~ m/^--?(.*)/) { $Wreturn = 1; } elsif ($cmd eq "Wshort-desc" or $cmd eq "Wshort-description") { $Wshort_desc = 1; - } elsif ($cmd eq "Wcontents-before-sections") { - $Wcontents_before_sections = 1; } elsif ($cmd eq "Wall") { $Wreturn = 1; $Wshort_desc = 1; - $Wcontents_before_sections = 1; } elsif (($cmd eq "h") || ($cmd eq "help")) { pod2usage(-exitval => 0, -verbose => 2); } elsif ($cmd eq 'no-doc-sections') { @@ -347,23 +341,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) { $enable_lineno = 1; } elsif ($cmd eq 'show-not-found') { $show_not_found = 1; # A no-op but don't fail - } elsif ($cmd eq "sphinx-version") { - my $ver_string = shift @ARGV; - if ($ver_string =~ m/^(\d+)(\.\d+)?(\.\d+)?/) { - $sphinx_major = $1; - if (defined($2)) { - $sphinx_minor = substr($2,1); - } else { - $sphinx_minor = 0; - } - if (defined($3)) { - $sphinx_patch = substr($3,1) - } else { - $sphinx_patch = 0; - } - } else { - die "Sphinx version should either major.minor or major.minor.patch format\n"; - } } else { # Unknown argument pod2usage( @@ -387,8 +364,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) { # continue execution near EOF; -# The C domain dialect changed on Sphinx 3. So, we need to check the -# version in order to produce the right tags. sub findprog($) { foreach(split(/:/, $ENV{PATH})) { @@ -396,42 +371,6 @@ sub findprog($) } } -sub get_sphinx_version() -{ - my $ver; - - my $cmd = "sphinx-build"; - if (!findprog($cmd)) { - my $cmd = "sphinx-build3"; - if (!findprog($cmd)) { - $sphinx_major = 1; - $sphinx_minor = 2; - $sphinx_patch = 0; - printf STDERR "Warning: Sphinx version not found. Using default (Sphinx version %d.%d.%d)\n", - $sphinx_major, $sphinx_minor, $sphinx_patch; - return; - } - } - - open IN, "$cmd --version 2>&1 |"; - while (<IN>) { - if (m/^\s*sphinx-build\s+([\d]+)\.([\d\.]+)(\+\/[\da-f]+)?$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - # Sphinx 1.2.x uses a different format - if (m/^\s*Sphinx.*\s+([\d]+)\.([\d\.]+)$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - } - close IN; -} - # get kernel version from env sub get_kernel_version() { my $version = 'unknown kernel version'; @@ -816,6 +755,10 @@ sub output_highlight_rst { if ($block) { $output .= highlight_block($block); } + + $output =~ s/^\n+//g; + $output =~ s/\n+$//g; + foreach $line (split "\n", $output) { print $lineprefix . $line . "\n"; } @@ -859,9 +802,10 @@ sub output_function_rst(%) { $signature .= ")"; } - if ($sphinx_major < 3) { + if ($args{'typedef'} || $args{'functiontype'} eq "") { + print ".. c:macro:: ". $args{'function'} . "\n\n"; + if ($args{'typedef'}) { - print ".. c:type:: ". $args{'function'} . "\n\n"; print_lineno($declaration_start_line); print " **Typedef**: "; $lineprefix = ""; @@ -869,25 +813,10 @@ sub output_function_rst(%) { print "\n\n**Syntax**\n\n"; print " ``$signature``\n\n"; } else { - print ".. c:function:: $signature\n\n"; + print "``$signature``\n\n"; } } else { - if ($args{'typedef'} || $args{'functiontype'} eq "") { - print ".. c:macro:: ". $args{'function'} . "\n\n"; - - if ($args{'typedef'}) { - print_lineno($declaration_start_line); - print " **Typedef**: "; - $lineprefix = ""; - output_highlight_rst($args{'purpose'}); - print "\n\n**Syntax**\n\n"; - print " ``$signature``\n\n"; - } else { - print "``$signature``\n\n"; - } - } else { - print ".. c:function:: $signature\n\n"; - } + print ".. c:function:: $signature\n\n"; } if (!$args{'typedef'}) { @@ -955,13 +884,9 @@ sub output_enum_rst(%) { my $count; my $outer; - if ($sphinx_major < 3) { - my $name = "enum " . $args{'enum'}; - print "\n\n.. c:type:: " . $name . "\n\n"; - } else { - my $name = $args{'enum'}; - print "\n\n.. c:enum:: " . $name . "\n\n"; - } + my $name = $args{'enum'}; + print "\n\n.. c:enum:: " . $name . "\n\n"; + print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); @@ -992,11 +917,8 @@ sub output_typedef_rst(%) { my $oldprefix = $lineprefix; my $name; - if ($sphinx_major < 3) { - $name = "typedef " . $args{'typedef'}; - } else { - $name = $args{'typedef'}; - } + $name = $args{'typedef'}; + print "\n\n.. c:type:: " . $name . "\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; @@ -1012,17 +934,13 @@ sub output_struct_rst(%) { my ($parameter); my $oldprefix = $lineprefix; - if ($sphinx_major < 3) { - my $name = $args{'type'} . " " . $args{'struct'}; - print "\n\n.. c:type:: " . $name . "\n\n"; + my $name = $args{'struct'}; + if ($args{'type'} eq 'union') { + print "\n\n.. c:union:: " . $name . "\n\n"; } else { - my $name = $args{'struct'}; - if ($args{'type'} eq 'union') { - print "\n\n.. c:union:: " . $name . "\n\n"; - } else { - print "\n\n.. c:struct:: " . $name . "\n\n"; - } + print "\n\n.. c:struct:: " . $name . "\n\n"; } + print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); @@ -2005,10 +1923,6 @@ sub map_filename($) { $file = $orig_file; } - if (defined($source_map{$file})) { - $file = $source_map{$file}; - } - return $file; } @@ -2044,7 +1958,6 @@ sub process_export_file($) { sub process_normal() { if (/$doc_start/o) { $state = STATE_NAME; # next line is always the function name - $in_doc_sect = 0; $declaration_start_line = $. + 1; } } @@ -2149,7 +2062,6 @@ sub process_body($$) { } if (/$doc_sect/i) { # case insensitive for supported section names - $in_doc_sect = 1; $newsection = $1; $newcontents = $2; @@ -2166,14 +2078,10 @@ sub process_body($$) { } if (($contents ne "") && ($contents ne "\n")) { - if (!$in_doc_sect && $Wcontents_before_sections) { - emit_warning("${file}:$.", "contents before sections\n"); - } dump_section($file, $section, $contents); $section = $section_default; } - $in_doc_sect = 1; $state = STATE_BODY; $contents = $newcontents; $new_start_line = $.; @@ -2387,11 +2295,6 @@ sub process_file($) { close IN_FILE; } - -if ($output_mode eq "rst") { - get_sphinx_version() if (!$sphinx_major); -} - $kernelversion = get_kernel_version(); # generate a sequence of code that will splice in highlighting information @@ -2403,19 +2306,6 @@ for (my $k = 0; $k < @highlights; $k++) { $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n"; } -# Read the file that maps relative names to absolute names for -# separate source and object directories and for shadow trees. -if (open(SOURCE_MAP, "<.tmp_filelist.txt")) { - my ($relname, $absname); - while(<SOURCE_MAP>) { - chop(); - ($relname, $absname) = (split())[0..1]; - $relname =~ s:^/+::; - $source_map{$relname} = $absname; - } - close(SOURCE_MAP); -} - if ($output_selection == OUTPUT_EXPORTED || $output_selection == OUTPUT_INTERNAL) { @@ -2471,17 +2361,6 @@ Do not output documentation, only warnings. =head3 reStructuredText only -=over 8 - -=item -sphinx-version VERSION - -Use the ReST C domain dialect compatible with a specific Sphinx Version. - -If not specified, kernel-doc will auto-detect using the sphinx-build version -found on PATH. - -=back - =head2 Output selection (mutually exclusive): =over 8 diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py new file mode 100644 index 000000000000..66a738013ce1 --- /dev/null +++ b/scripts/lib/abi/abi_parser.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +from argparse import Namespace +import logging +import os +import re + +from pprint import pformat +from random import randrange, seed + +# Import Python modules + +from helpers import AbiDebug, ABI_DIR + + +class AbiParser: + """Main class to parse ABI files""" + + TAGS = r"(what|where|date|kernelversion|contact|description|users)" + XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)" + + def __init__(self, directory, logger=None, + enable_lineno=False, show_warnings=True, debug=0): + """Stores arguments for the class and initialize class vars""" + + self.directory = directory + self.enable_lineno = enable_lineno + self.show_warnings = show_warnings + self.debug = debug + + if not logger: + self.log = logging.getLogger("get_abi") + else: + self.log = logger + + self.data = {} + self.what_symbols = {} + self.file_refs = {} + self.what_refs = {} + + # Ignore files that contain such suffixes + self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~") + + # Regular expressions used on parser + self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR) + self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I) + self.re_valid = re.compile(self.TAGS) + self.re_start_spc = re.compile(r"(\s*)(\S.*)") + self.re_whitespace = re.compile(r"^\s+") + + # Regular used on print + self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})") + self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])") + self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)") + self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n") + self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst") + self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)") + self.re_xref_node = re.compile(self.XREF) + + def warn(self, fdata, msg, extra=None): + """Displays a parse error if warning is enabled""" + + if not self.show_warnings: + return + + msg = f"{fdata.fname}:{fdata.ln}: {msg}" + if extra: + msg += "\n\t\t" + extra + + self.log.warning(msg) + + def add_symbol(self, what, fname, ln=None, xref=None): + """Create a reference table describing where each 'what' is located""" + + if what not in self.what_symbols: + self.what_symbols[what] = {"file": {}} + + if fname not in self.what_symbols[what]["file"]: + self.what_symbols[what]["file"][fname] = [] + + if ln and ln not in self.what_symbols[what]["file"][fname]: + self.what_symbols[what]["file"][fname].append(ln) + + if xref: + self.what_symbols[what]["xref"] = xref + + def _parse_line(self, fdata, line): + """Parse a single line of an ABI file""" + + new_what = False + new_tag = False + content = None + + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + sep = match.group(2) + content = match.group(3) + + match = self.re_valid.search(new) + if match: + new_tag = match.group(1) + else: + if fdata.tag == "description": + # New "tag" is actually part of description. + # Don't consider it a tag + new_tag = False + elif fdata.tag != "": + self.warn(fdata, f"tag '{fdata.tag}' is invalid", line) + + if new_tag: + # "where" is Invalid, but was a common mistake. Warn if found + if new_tag == "where": + self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead") + new_tag = "what" + + if new_tag == "what": + fdata.space = None + + if content not in self.what_symbols: + self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln) + + if fdata.tag == "what": + fdata.what.append(content.strip("\n")) + else: + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fdata.fname, + ln=fdata.what_ln, xref=fdata.key) + + fdata.label = content + new_what = True + + key = "abi_" + content.lower() + fdata.key = self.re_unprintable.sub("_", key).strip("_") + + # Avoid duplicated keys but using a defined seed, to make + # the namespace identical if there aren't changes at the + # ABI symbols + seed(42) + + while fdata.key in self.data: + char = randrange(0, 51) + ord("A") + if char > ord("Z"): + char += ord("a") - ord("Z") - 1 + + fdata.key += chr(char) + + if fdata.key and fdata.key not in self.data: + self.data[fdata.key] = { + "what": [content], + "file": [fdata.file_ref], + "path": fdata.ftype, + "line_no": fdata.ln, + } + + fdata.what = self.data[fdata.key]["what"] + + self.what_refs[content] = fdata.key + fdata.tag = new_tag + fdata.what_ln = fdata.ln + + if fdata.nametag["what"]: + t = (content, fdata.key) + if t not in fdata.nametag["symbols"]: + fdata.nametag["symbols"].append(t) + + return + + if fdata.tag and new_tag: + fdata.tag = new_tag + + if new_what: + fdata.label = "" + + if "description" in self.data[fdata.key]: + self.data[fdata.key]["description"] += "\n\n" + + if fdata.file_ref not in self.data[fdata.key]["file"]: + self.data[fdata.key]["file"].append(fdata.file_ref) + + if self.debug == AbiDebug.WHAT_PARSING: + self.log.debug("what: %s", fdata.what) + + if not fdata.what: + self.warn(fdata, "'What:' should come first:", line) + return + + if new_tag == "description": + fdata.space = None + + if content: + sep = sep.replace(":", " ") + + c = " " * len(new_tag) + sep + content + c = c.expandtabs() + + match = self.re_start_spc.match(c) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + content = match.group(2) + "\n" + + self.data[fdata.key][fdata.tag] = content + + return + + # Store any contents before tags at the database + if not fdata.tag and "what" in fdata.nametag: + fdata.nametag["description"] += line + return + + if fdata.tag == "description": + content = line.expandtabs() + + if self.re_whitespace.sub("", content) == "": + self.data[fdata.key][fdata.tag] += "\n" + return + + if fdata.space is None: + match = self.re_start_spc.match(content) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + + content = match.group(2) + "\n" + else: + if content.startswith(fdata.space): + content = content[len(fdata.space):] + + else: + fdata.space = "" + + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += content + return + + content = line.strip() + if fdata.tag: + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n") + return + + # Everything else is error + if content: + self.warn(fdata, "Unexpected content", line) + + def parse_readme(self, nametag, fname): + """Parse ABI README file""" + + nametag["what"] = ["Introduction"] + nametag["path"] = "README" + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + for line in fp: + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + + match = self.re_valid.search(new) + if match: + nametag["description"] += "\n:" + line + continue + + nametag["description"] += line + + def parse_file(self, fname, path, basename): + """Parse a single file""" + + ref = f"abi_file_{path}_{basename}" + ref = self.re_unprintable.sub("_", ref).strip("_") + + # Store per-file state into a namespace variable. This will be used + # by the per-line parser state machine and by the warning function. + fdata = Namespace + + fdata.fname = fname + fdata.name = basename + + pos = fname.find(ABI_DIR) + if pos > 0: + f = fname[pos:] + else: + f = fname + + fdata.file_ref = (f, ref) + self.file_refs[f] = ref + + fdata.ln = 0 + fdata.what_ln = 0 + fdata.tag = "" + fdata.label = "" + fdata.what = [] + fdata.key = None + fdata.xrefs = None + fdata.space = None + fdata.ftype = path.split("/")[0] + + fdata.nametag = {} + fdata.nametag["what"] = [f"ABI file {path}/{basename}"] + fdata.nametag["type"] = "File" + fdata.nametag["path"] = fdata.ftype + fdata.nametag["file"] = [fdata.file_ref] + fdata.nametag["line_no"] = 1 + fdata.nametag["description"] = "" + fdata.nametag["symbols"] = [] + + self.data[ref] = fdata.nametag + + if self.debug & AbiDebug.WHAT_OPEN: + self.log.debug("Opening file %s", fname) + + if basename == "README": + self.parse_readme(fdata.nametag, fname) + return + + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + for line in fp: + fdata.ln += 1 + + self._parse_line(fdata, line) + + if "description" in fdata.nametag: + fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n") + + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fname, xref=fdata.key) + + def _parse_abi(self, root=None): + """Internal function to parse documentation ABI recursively""" + + if not root: + root = self.directory + + with os.scandir(root) as obj: + for entry in obj: + name = os.path.join(root, entry.name) + + if entry.is_dir(): + self._parse_abi(name) + continue + + if not entry.is_file(): + continue + + basename = os.path.basename(name) + + if basename.startswith("."): + continue + + if basename.endswith(self.ignore_suffixes): + continue + + path = self.re_abi_dir.sub("", os.path.dirname(name)) + + self.parse_file(name, path, basename) + + def parse_abi(self, root=None): + """Parse documentation ABI""" + + self._parse_abi(root) + + if self.debug & AbiDebug.DUMP_ABI_STRUCTS: + self.log.debug(pformat(self.data)) + + def desc_txt(self, desc): + """Print description as found inside ABI files""" + + desc = desc.strip(" \t\n") + + return desc + "\n\n" + + def xref(self, fname): + """ + Converts a Documentation/ABI + basename into a ReST cross-reference + """ + + xref = self.file_refs.get(fname) + if not xref: + return None + else: + return xref + + def desc_rst(self, desc): + """Enrich ReST output by creating cross-references""" + + # Remove title markups from the description + # Having titles inside ABI files will only work if extra + # care would be taken in order to strictly follow the same + # level order for each markup. + desc = self.re_title_mark.sub("\n\n", "\n" + desc) + desc = desc.rstrip(" \t\n").lstrip("\n") + + # Python's regex performance for non-compiled expressions is a lot + # than Perl, as Perl automatically caches them at their + # first usage. Here, we'll need to do the same, as otherwise the + # performance penalty is be high + + new_desc = "" + for d in desc.split("\n"): + if d == "": + new_desc += "\n" + continue + + # Use cross-references for doc files where needed + d = self.re_doc.sub(r":doc:`/\1`", d) + + # Use cross-references for ABI generated docs where needed + matches = self.re_abi.findall(d) + for m in matches: + abi = m[0] + m[1] + + xref = self.file_refs.get(abi) + if not xref: + # This may happen if ABI is on a separate directory, + # like parsing ABI testing and symbol is at stable. + # The proper solution is to move this part of the code + # for it to be inside sphinx/kernel_abi.py + self.log.info("Didn't find ABI reference for '%s'", abi) + else: + new = self.re_escape.sub(r"\\\1", m[1]) + d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d) + + # Seek for cross reference symbols like /sys/... + # Need to be careful to avoid doing it on a code block + if d[0] not in [" ", "\t"]: + matches = self.re_xref_node.findall(d) + for m in matches: + # Finding ABI here is more complex due to wildcards + xref = self.what_refs.get(m) + if xref: + new = self.re_escape.sub(r"\\\1", m) + d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d) + + new_desc += d + "\n" + + return new_desc + "\n\n" + + def doc(self, output_in_txt=False, show_symbols=True, show_file=True, + filter_path=None): + """Print ABI at stdout""" + + part = None + for key, v in sorted(self.data.items(), + key=lambda x: (x[1].get("type", ""), + x[1].get("what"))): + + wtype = v.get("type", "Symbol") + file_ref = v.get("file") + names = v.get("what", [""]) + + if wtype == "File": + if not show_file: + continue + else: + if not show_symbols: + continue + + if filter_path: + if v.get("path") != filter_path: + continue + + msg = "" + + if wtype != "File": + cur_part = names[0] + if cur_part.find("/") >= 0: + match = self.re_what.match(cur_part) + if match: + symbol = match.group(1).rstrip("/") + cur_part = "Symbols under " + symbol + + if cur_part and cur_part != part: + part = cur_part + msg += part + "\n"+ "-" * len(part) +"\n\n" + + msg += f".. _{key}:\n\n" + + max_len = 0 + for i in range(0, len(names)): # pylint: disable=C0200 + names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**" + + max_len = max(max_len, len(names[i])) + + msg += "+-" + "-" * max_len + "-+\n" + for name in names: + msg += f"| {name}" + " " * (max_len - len(name)) + " |\n" + msg += "+-" + "-" * max_len + "-+\n" + msg += "\n" + + for ref in file_ref: + if wtype == "File": + msg += f".. _{ref[1]}:\n\n" + else: + base = os.path.basename(ref[0]) + msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n" + + if wtype == "File": + msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n" + + desc = v.get("description") + if not desc and wtype != "File": + msg += f"DESCRIPTION MISSING for {names[0]}\n\n" + + if desc: + if output_in_txt: + msg += self.desc_txt(desc) + else: + msg += self.desc_rst(desc) + + symbols = v.get("symbols") + if symbols: + msg += "Has the following ABI:\n\n" + + for w, label in symbols: + # Escape special chars from content + content = self.re_escape.sub(r"\\\1", w) + + msg += f"- :ref:`{content} <{label}>`\n\n" + + users = v.get("users") + if users and users.strip(" \t\n"): + users = users.strip("\n").replace('\n', '\n\t') + msg += f"Users:\n\t{users}\n\n" + + ln = v.get("line_no", 1) + + yield (msg, file_ref[0][0], ln) + + def check_issues(self): + """Warn about duplicated ABI entries""" + + for what, v in self.what_symbols.items(): + files = v.get("file") + if not files: + # Should never happen if the parser works properly + self.log.warning("%s doesn't have a file associated", what) + continue + + if len(files) == 1: + continue + + f = [] + for fname, lines in sorted(files.items()): + if not lines: + f.append(f"{fname}") + elif len(lines) == 1: + f.append(f"{fname}:{lines[0]}") + else: + m = fname + "lines " + m += ", ".join(str(x) for x in lines) + f.append(m) + + self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f)) + + def search_symbols(self, expr): + """ Searches for ABI symbols """ + + regex = re.compile(expr, re.I) + + found_keys = 0 + for t in sorted(self.data.items(), key=lambda x: [0]): + v = t[1] + + wtype = v.get("type", "") + if wtype == "File": + continue + + for what in v.get("what", [""]): + if regex.search(what): + found_keys += 1 + + kernelversion = v.get("kernelversion", "").strip(" \t\n") + date = v.get("date", "").strip(" \t\n") + contact = v.get("contact", "").strip(" \t\n") + users = v.get("users", "").strip(" \t\n") + desc = v.get("description", "").strip(" \t\n") + + files = [] + for f in v.get("file", ()): + files.append(f[0]) + + what = str(found_keys) + ". " + what + title_tag = "-" * len(what) + + print(f"\n{what}\n{title_tag}\n") + + if kernelversion: + print(f"Kernel version:\t\t{kernelversion}") + + if date: + print(f"Date:\t\t\t{date}") + + if contact: + print(f"Contact:\t\t{contact}") + + if users: + print(f"Users:\t\t\t{users}") + + print("Defined on file(s):\t" + ", ".join(files)) + + if desc: + desc = desc.strip("\n") + print(f"\n{desc}\n") + + if not found_keys: + print(f"Regular expression /{expr}/ not found.") diff --git a/scripts/lib/abi/abi_regex.py b/scripts/lib/abi/abi_regex.py new file mode 100644 index 000000000000..8a57846cbc69 --- /dev/null +++ b/scripts/lib/abi/abi_regex.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 +# xxpylint: disable=R0903 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Convert ABI what into regular expressions +""" + +import re +import sys + +from pprint import pformat + +from abi_parser import AbiParser +from helpers import AbiDebug + +class AbiRegex(AbiParser): + """Extends AbiParser to search ABI nodes with regular expressions""" + + # Escape only ASCII visible characters + escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])" + leave_others = "others" + + # Tuples with regular expressions to be compiled and replacement data + re_whats = [ + # Drop escape characters that might exist + (re.compile("\\\\"), ""), + + # Temporarily escape dot characters + (re.compile(r"\."), "\xf6"), + + # Temporarily change [0-9]+ type of patterns + (re.compile(r"\[0\-9\]\+"), "\xff"), + + # Temporarily change [\d+-\d+] type of patterns + (re.compile(r"\[0\-\d+\]"), "\xff"), + (re.compile(r"\[0:\d+\]"), "\xff"), + (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"), + + # Temporarily change [0-9] type of patterns + (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"), + + # Handle multiple option patterns + (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"), + + # Handle wildcards + (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"), + (re.compile(r"/\*/"), "/.*/"), + (re.compile(r"/\xf6\xf6\xf6"), "/.*"), + (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"), + (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"), + (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"), + + (re.compile(r"XX+"), "\\\\w\xf7"), + (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"), + (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"), + (re.compile(r"_[AB]_"), "_\\\\w\xf7_"), + + # Recover [0-9] type of patterns + (re.compile(r"\xf4"), "["), + (re.compile(r"\xf5"), "]"), + + # Remove duplicated spaces + (re.compile(r"\s+"), r" "), + + # Special case: drop comparison as in: + # What: foo = <something> + # (this happens on a few IIO definitions) + (re.compile(r"\s*\=.*$"), ""), + + # Escape all other symbols + (re.compile(escape_symbols), r"\\\1"), + (re.compile(r"\\\\"), r"\\"), + (re.compile(r"\\([\[\]\(\)\|])"), r"\1"), + (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"), + + (re.compile(r"\xff"), r"\\d+"), + + # Special case: IIO ABI which a parenthesis. + (re.compile(r"sqrt(.*)"), r"sqrt(.*)"), + + # Simplify regexes with multiple .* + (re.compile(r"(?:\.\*){2,}"), ""), + + # Recover dot characters + (re.compile(r"\xf6"), "\\."), + # Recover plus characters + (re.compile(r"\xf7"), "+"), + ] + re_has_num = re.compile(r"\\d") + + # Symbol name after escape_chars that are considered a devnode basename + re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$") + + # List of popular group names to be skipped to minimize regex group size + # Use AbiDebug.SUBGROUP_SIZE to detect those + skip_names = set(["devices", "hwmon"]) + + def regex_append(self, what, new): + """ + Get a search group for a subset of regular expressions. + + As ABI may have thousands of symbols, using a for to search all + regular expressions is at least O(n^2). When there are wildcards, + the complexity increases substantially, eventually becoming exponential. + + To avoid spending too much time on them, use a logic to split + them into groups. The smaller the group, the better, as it would + mean that searches will be confined to a small number of regular + expressions. + + The conversion to a regex subset is tricky, as we need something + that can be easily obtained from the sysfs symbol and from the + regular expression. So, we need to discard nodes that have + wildcards. + + If it can't obtain a subgroup, place the regular expression inside + a special group (self.leave_others). + """ + + search_group = None + + for search_group in reversed(new.split("/")): + if not search_group or search_group in self.skip_names: + continue + if self.re_symbol_name.match(search_group): + break + + if not search_group: + search_group = self.leave_others + + if self.debug & AbiDebug.SUBGROUP_MAP: + self.log.debug("%s: mapped as %s", what, search_group) + + try: + if search_group not in self.regex_group: + self.regex_group[search_group] = [] + + self.regex_group[search_group].append(re.compile(new)) + if self.search_string: + if what.find(self.search_string) >= 0: + print(f"What: {what}") + except re.PatternError: + self.log.warning("Ignoring '%s' as it produced an invalid regex:\n" + " '%s'", what, new) + + def get_regexes(self, what): + """ + Given an ABI devnode, return a list of all regular expressions that + may match it, based on the sub-groups created by regex_append() + """ + + re_list = [] + + patches = what.split("/") + patches.reverse() + patches.append(self.leave_others) + + for search_group in patches: + if search_group in self.regex_group: + re_list += self.regex_group[search_group] + + return re_list + + def __init__(self, *args, **kwargs): + """ + Override init method to get verbose argument + """ + + self.regex_group = None + self.search_string = None + self.re_string = None + + if "search_string" in kwargs: + self.search_string = kwargs.get("search_string") + del kwargs["search_string"] + + if self.search_string: + + try: + self.re_string = re.compile(self.search_string) + except re.PatternError as e: + msg = f"{self.search_string} is not a valid regular expression" + raise ValueError(msg) from e + + super().__init__(*args, **kwargs) + + def parse_abi(self, *args, **kwargs): + + super().parse_abi(*args, **kwargs) + + self.regex_group = {} + + print("Converting ABI What fields into regexes...", file=sys.stderr) + + for t in sorted(self.data.items(), key=lambda x: x[0]): + v = t[1] + if v.get("type") == "File": + continue + + v["regex"] = [] + + for what in v.get("what", []): + if not what.startswith("/sys"): + continue + + new = what + for r, s in self.re_whats: + try: + new = r.sub(s, new) + except re.PatternError as e: + # Help debugging troubles with new regexes + raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e + + v["regex"].append(new) + + if self.debug & AbiDebug.REGEX: + self.log.debug("%-90s <== %s", new, what) + + # Store regex into a subgroup to speedup searches + self.regex_append(what, new) + + if self.debug & AbiDebug.SUBGROUP_DICT: + self.log.debug("%s", pformat(self.regex_group)) + + if self.debug & AbiDebug.SUBGROUP_SIZE: + biggestd_keys = sorted(self.regex_group.keys(), + key= lambda k: len(self.regex_group[k]), + reverse=True) + + print("Top regex subgroups:", file=sys.stderr) + for k in biggestd_keys[:10]: + print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr) diff --git a/scripts/lib/abi/helpers.py b/scripts/lib/abi/helpers.py new file mode 100644 index 000000000000..639b23e4ca33 --- /dev/null +++ b/scripts/lib/abi/helpers.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# pylint: disable=R0903 +# SPDX-License-Identifier: GPL-2.0 + +""" +Helper classes for ABI parser +""" + +ABI_DIR = "Documentation/ABI/" + + +class AbiDebug: + """Debug levels""" + + WHAT_PARSING = 1 + WHAT_OPEN = 2 + DUMP_ABI_STRUCTS = 4 + UNDEFINED = 8 + REGEX = 16 + SUBGROUP_MAP = 32 + SUBGROUP_DICT = 64 + SUBGROUP_SIZE = 128 + GRAPH = 256 + + +DEBUG_HELP = """ +1 - enable debug parsing logic +2 - enable debug messages on file open +4 - enable debug for ABI parse data +8 - enable extra debug information to identify troubles + with ABI symbols found at the local machine that + weren't found on ABI documentation (used only for + undefined subcommand) +16 - enable debug for what to regex conversion +32 - enable debug for symbol regex subgroups +64 - enable debug for sysfs graph tree variable +""" diff --git a/scripts/lib/abi/system_symbols.py b/scripts/lib/abi/system_symbols.py new file mode 100644 index 000000000000..f15c94a6e33c --- /dev/null +++ b/scripts/lib/abi/system_symbols.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 +# pylint: disable=R0902,R0912,R0914,R0915,R1702 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +import os +import re +import sys + +from concurrent import futures +from datetime import datetime +from random import shuffle + +from helpers import AbiDebug + +class SystemSymbols: + """Stores arguments for the class and initialize class vars""" + + def graph_add_file(self, path, link=None): + """ + add a file path to the sysfs graph stored at self.root + """ + + if path in self.files: + return + + name = "" + ref = self.root + for edge in path.split("/"): + name += edge + "/" + if edge not in ref: + ref[edge] = {"__name": [name.rstrip("/")]} + + ref = ref[edge] + + if link and link not in ref["__name"]: + ref["__name"].append(link.rstrip("/")) + + self.files.add(path) + + def print_graph(self, root_prefix="", root=None, level=0): + """Prints a reference tree graph using UTF-8 characters""" + + if not root: + root = self.root + level = 0 + + # Prevent endless traverse + if level > 5: + return + + if level > 0: + prefix = "├──" + last_prefix = "└──" + else: + prefix = "" + last_prefix = "" + + items = list(root.items()) + + names = root.get("__name", []) + for k, edge in items: + if k == "__name": + continue + + if not k: + k = "/" + + if len(names) > 1: + k += " links: " + ",".join(names[1:]) + + if edge == items[-1][1]: + print(root_prefix + last_prefix + k) + p = root_prefix + if level > 0: + p += " " + self.print_graph(p, edge, level + 1) + else: + print(root_prefix + prefix + k) + p = root_prefix + "│ " + self.print_graph(p, edge, level + 1) + + def _walk(self, root): + """ + Walk through sysfs to get all devnodes that aren't ignored. + + By default, uses /sys as sysfs mounting point. If another + directory is used, it replaces them to /sys at the patches. + """ + + with os.scandir(root) as obj: + for entry in obj: + path = os.path.join(root, entry.name) + if self.sysfs: + p = path.replace(self.sysfs, "/sys", count=1) + else: + p = path + + if self.re_ignore.search(p): + return + + # Handle link first to avoid directory recursion + if entry.is_symlink(): + real = os.path.realpath(path) + if not self.sysfs: + self.aliases[path] = real + else: + real = real.replace(self.sysfs, "/sys", count=1) + + # Add absfile location to graph if it doesn't exist + if not self.re_ignore.search(real): + # Add link to the graph + self.graph_add_file(real, p) + + elif entry.is_file(): + self.graph_add_file(p) + + elif entry.is_dir(): + self._walk(path) + + def __init__(self, abi, sysfs="/sys", hints=False): + """ + Initialize internal variables and get a list of all files inside + sysfs that can currently be parsed. + + Please notice that there are several entries on sysfs that aren't + documented as ABI. Ignore those. + + The real paths will be stored under self.files. Aliases will be + stored in separate, as self.aliases. + """ + + self.abi = abi + self.log = abi.log + + if sysfs != "/sys": + self.sysfs = sysfs.rstrip("/") + else: + self.sysfs = None + + self.hints = hints + + self.root = {} + self.aliases = {} + self.files = set() + + dont_walk = [ + # Those require root access and aren't documented at ABI + f"^{sysfs}/kernel/debug", + f"^{sysfs}/kernel/tracing", + f"^{sysfs}/fs/pstore", + f"^{sysfs}/fs/bpf", + f"^{sysfs}/fs/fuse", + + # This is not documented at ABI + f"^{sysfs}/module", + + f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI + f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings + "sections|notes", # aren't actually part of ABI + + # kernel-parameters.txt - not easy to parse + "parameters", + ] + + self.re_ignore = re.compile("|".join(dont_walk)) + + print(f"Reading {sysfs} directory contents...", file=sys.stderr) + self._walk(sysfs) + + def check_file(self, refs, found): + """Check missing ABI symbols for a given sysfs file""" + + res_list = [] + + try: + for names in refs: + fname = names[0] + + res = { + "found": False, + "fname": fname, + "msg": "", + } + res_list.append(res) + + re_what = self.abi.get_regexes(fname) + if not re_what: + self.abi.log.warning(f"missing rules for {fname}") + continue + + for name in names: + for r in re_what: + if self.abi.debug & AbiDebug.UNDEFINED: + self.log.debug("check if %s matches '%s'", name, r.pattern) + if r.match(name): + res["found"] = True + if found: + res["msg"] += f" {fname}: regex:\n\t" + continue + + if self.hints and not res["found"]: + res["msg"] += f" {fname} not found. Tested regexes:\n" + for r in re_what: + res["msg"] += " " + r.pattern + "\n" + + except KeyboardInterrupt: + pass + + return res_list + + def _ref_interactor(self, root): + """Recursive function to interact over the sysfs tree""" + + for k, v in root.items(): + if isinstance(v, dict): + yield from self._ref_interactor(v) + + if root == self.root or k == "__name": + continue + + if self.abi.re_string: + fname = v["__name"][0] + if self.abi.re_string.search(fname): + yield v + else: + yield v + + + def get_fileref(self, all_refs, chunk_size): + """Interactor to group refs into chunks""" + + n = 0 + refs = [] + + for ref in all_refs: + refs.append(ref) + + n += 1 + if n >= chunk_size: + yield refs + n = 0 + refs = [] + + yield refs + + def check_undefined_symbols(self, max_workers=None, chunk_size=50, + found=None, dry_run=None): + """Seach ABI for sysfs symbols missing documentation""" + + self.abi.parse_abi() + + if self.abi.debug & AbiDebug.GRAPH: + self.print_graph() + + all_refs = [] + for ref in self._ref_interactor(self.root): + all_refs.append(ref["__name"]) + + if dry_run: + print("Would check", file=sys.stderr) + for ref in all_refs: + print(", ".join(ref)) + + return + + print("Starting to search symbols (it may take several minutes):", + file=sys.stderr) + start = datetime.now() + old_elapsed = None + + # Python doesn't support multithreading due to limitations on its + # global lock (GIL). While Python 3.13 finally made GIL optional, + # there are still issues related to it. Also, we want to have + # backward compatibility with older versions of Python. + # + # So, use instead multiprocess. However, Python is very slow passing + # data from/to multiple processes. Also, it may consume lots of memory + # if the data to be shared is not small. So, we need to group workload + # in chunks that are big enough to generate performance gains while + # not being so big that would cause out-of-memory. + + num_refs = len(all_refs) + print(f"Number of references to parse: {num_refs}", file=sys.stderr) + + if not max_workers: + max_workers = os.cpu_count() + elif max_workers > os.cpu_count(): + max_workers = os.cpu_count() + + max_workers = max(max_workers, 1) + + max_chunk_size = int((num_refs + max_workers - 1) / max_workers) + chunk_size = min(chunk_size, max_chunk_size) + chunk_size = max(1, chunk_size) + + if max_workers > 1: + executor = futures.ProcessPoolExecutor + + # Place references in a random order. This may help improving + # performance, by mixing complex/simple expressions when creating + # chunks + shuffle(all_refs) + else: + # Python has a high overhead with processes. When there's just + # one worker, it is faster to not create a new process. + # Yet, User still deserves to have a progress print. So, use + # python's "thread", which is actually a single process, using + # an internal schedule to switch between tasks. No performance + # gains for non-IO tasks, but still it can be quickly interrupted + # from time to time to display progress. + executor = futures.ThreadPoolExecutor + + not_found = [] + f_list = [] + with executor(max_workers=max_workers) as exe: + for refs in self.get_fileref(all_refs, chunk_size): + if refs: + try: + f_list.append(exe.submit(self.check_file, refs, found)) + + except KeyboardInterrupt: + return + + total = len(f_list) + + if not total: + if self.abi.re_string: + print(f"No ABI symbol matches {self.abi.search_string}") + else: + self.abi.log.warning("No ABI symbols found") + return + + print(f"{len(f_list):6d} jobs queued on {max_workers} workers", + file=sys.stderr) + + while f_list: + try: + t = futures.wait(f_list, timeout=1, + return_when=futures.FIRST_COMPLETED) + + done = t[0] + + for fut in done: + res_list = fut.result() + + for res in res_list: + if not res["found"]: + not_found.append(res["fname"]) + if res["msg"]: + print(res["msg"]) + + f_list.remove(fut) + except KeyboardInterrupt: + return + + except RuntimeError as e: + self.abi.log.warning(f"Future: {e}") + break + + if sys.stderr.isatty(): + elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0] + if len(f_list) < total: + elapsed += f" ({total - len(f_list)}/{total} jobs completed). " + if elapsed != old_elapsed: + print(elapsed + "\r", end="", flush=True, + file=sys.stderr) + old_elapsed = elapsed + + elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0] + print(elapsed, file=sys.stderr) + + for f in sorted(not_found): + print(f"{f} not found.") |