Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

regmap: Implement regmap_multi_reg_read()

Merge series from Guenter Roeck <linux@roeck-us.net>:

regmap_multi_reg_read() is similar to regmap_bilk_read() but reads from
an array of non-sequential registers. It is helpful if multiple non-
sequential registers need to be read in a single operation which would
otherwise have to be mutex protected.

The name of the new function was chosen to match the existing function
regmap_multi_reg_write().

+3882 -1830
+1
.mailmap
··· 608 608 Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org> 609 609 Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org> 610 610 Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org> 611 + Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com> 611 612 Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com> 612 613 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 613 614 Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
-6
Documentation/admin-guide/kernel-parameters.txt
··· 2192 2192 Format: 0 | 1 2193 2193 Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON. 2194 2194 2195 - init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if 2196 - it was mlock'ed and not explicitly munlock'ed 2197 - afterwards. 2198 - Format: 0 | 1 2199 - Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON 2200 - 2201 2195 init_pkru= [X86] Specify the default memory protection keys rights 2202 2196 register contents for all processes. 0x55555554 by 2203 2197 default (disallow access to all but pkey 0). Can
+2 -2
Documentation/devicetree/bindings/dma/fsl,edma.yaml
··· 59 59 - 3 60 60 61 61 dma-channels: 62 - minItems: 1 63 - maxItems: 64 62 + minimum: 1 63 + maximum: 64 64 64 65 65 clocks: 66 66 minItems: 1
+1 -1
Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
··· 77 77 - clocks 78 78 79 79 allOf: 80 - - $ref: i2c-controller.yaml 80 + - $ref: /schemas/i2c/i2c-controller.yaml# 81 81 - if: 82 82 properties: 83 83 compatible:
+1 -1
Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
··· 21 21 google,cros-ec-spi or google,cros-ec-i2c. 22 22 23 23 allOf: 24 - - $ref: i2c-controller.yaml# 24 + - $ref: /schemas/i2c/i2c-controller.yaml# 25 25 26 26 properties: 27 27 compatible:
+8 -7
Documentation/i2c/i2c_bus.svg
··· 1 1 <?xml version="1.0" encoding="UTF-8" standalone="no"?> 2 2 <!-- Created with Inkscape (http://www.inkscape.org/) --> 3 + <!-- Updated to inclusive terminology by Wolfram Sang --> 3 4 4 5 <svg 5 6 xmlns:dc="http://purl.org/dc/elements/1.1/" ··· 1121 1120 <rect 1122 1121 style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" 1123 1122 id="rect4424-3-2-9-7" 1124 - width="112.5" 1123 + width="134.5" 1125 1124 height="113.75008" 1126 1125 x="112.5" 1127 1126 y="471.11221" ··· 1134 1133 y="521.46259" 1135 1134 id="text4349"><tspan 1136 1135 sodipodi:role="line" 1137 - x="167.5354" 1136 + x="178.5354" 1138 1137 y="521.46259" 1139 1138 style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle" 1140 1139 id="tspan1273">I2C</tspan><tspan 1141 1140 sodipodi:role="line" 1142 - x="167.5354" 1141 + x="178.5354" 1143 1142 y="552.71259" 1144 1143 style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle" 1145 - id="tspan1285">Master</tspan></text> 1144 + id="tspan1285">Controller</tspan></text> 1146 1145 <rect 1147 1146 style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate" 1148 1147 id="rect4424-3-2-9-7-3-3-5-3" ··· 1172 1171 x="318.59131" 1173 1172 y="552.08752" 1174 1173 style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px" 1175 - id="tspan1287">Slave</tspan></text> 1174 + id="tspan1287">Target</tspan></text> 1176 1175 <path 1177 1176 style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" 1178 1177 d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0" ··· 1234 1233 x="468.59131" 1235 1234 y="552.08746" 1236 1235 style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px" 1237 - id="tspan1287-6">Slave</tspan></text> 1236 + id="tspan1287-6">Target</tspan></text> 1238 1237 <rect 1239 1238 style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate" 1240 1239 id="rect4424-3-2-9-7-3-3-5-3-1" ··· 1259 1258 x="618.59131" 1260 1259 y="552.08746" 1261 1260 style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px" 1262 - id="tspan1287-9">Slave</tspan></text> 1261 + id="tspan1287-9">Target</tspan></text> 1263 1262 <path 1264 1263 style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)" 1265 1264 d="m 150,583.61221 v 93.75"
+50 -29
Documentation/i2c/summary.rst
··· 3 3 ============================= 4 4 5 5 I²C (pronounce: I squared C and written I2C in the kernel documentation) is 6 - a protocol developed by Philips. It is a slow two-wire protocol (variable 7 - speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides 6 + a protocol developed by Philips. It is a two-wire protocol with variable 7 + speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides 8 8 an inexpensive bus for connecting many types of devices with infrequent or 9 - low bandwidth communications needs. I2C is widely used with embedded 10 - systems. Some systems use variants that don't meet branding requirements, 9 + low bandwidth communications needs. I2C is widely used with embedded 10 + systems. Some systems use variants that don't meet branding requirements, 11 11 and so are not advertised as being I2C but come under different names, 12 12 e.g. TWI (Two Wire Interface), IIC. 13 13 14 - The latest official I2C specification is the `"I2C-bus specification and user 15 - manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_ 16 - published by NXP Semiconductors. However, you need to log-in to the site to 17 - access the PDF. An older version of the specification (revision 6) is archived 18 - `here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_. 14 + The latest official I2C specification is the `"I²C-bus specification and user 15 + manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_ 16 + published by NXP Semiconductors, version 7 as of this writing. 19 17 20 18 SMBus (System Management Bus) is based on the I2C protocol, and is mostly 21 - a subset of I2C protocols and signaling. Many I2C devices will work on an 19 + a subset of I2C protocols and signaling. Many I2C devices will work on an 22 20 SMBus, but some SMBus protocols add semantics beyond what is required to 23 - achieve I2C branding. Modern PC mainboards rely on SMBus. The most common 21 + achieve I2C branding. Modern PC mainboards rely on SMBus. The most common 24 22 devices connected through SMBus are RAM modules configured using I2C EEPROMs, 25 23 and hardware monitoring chips. 26 24 27 25 Because the SMBus is mostly a subset of the generalized I2C bus, we can 28 - use its protocols on many I2C systems. However, there are systems that don't 26 + use its protocols on many I2C systems. However, there are systems that don't 29 27 meet both SMBus and I2C electrical constraints; and others which can't 30 28 implement all the common SMBus protocol semantics or messages. 31 29 ··· 31 33 Terminology 32 34 =========== 33 35 34 - Using the terminology from the official documentation, the I2C bus connects 35 - one or more *master* chips and one or more *slave* chips. 36 + The I2C bus connects one or more controller chips and one or more target chips. 36 37 37 38 .. kernel-figure:: i2c_bus.svg 38 - :alt: Simple I2C bus with one master and 3 slaves 39 + :alt: Simple I2C bus with one controller and 3 targets 39 40 40 41 Simple I2C bus 41 42 42 - A **master** chip is a node that starts communications with slaves. In the 43 - Linux kernel implementation it is called an **adapter** or bus. Adapter 44 - drivers are in the ``drivers/i2c/busses/`` subdirectory. 43 + A **controller** chip is a node that starts communications with targets. In the 44 + Linux kernel implementation it is also called an "adapter" or "bus". Controller 45 + drivers are usually in the ``drivers/i2c/busses/`` subdirectory. 45 46 46 - An **algorithm** contains general code that can be used to implement a 47 - whole class of I2C adapters. Each specific adapter driver either depends on 48 - an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes 49 - its own implementation. 47 + An **algorithm** contains general code that can be used to implement a whole 48 + class of I2C controllers. Each specific controller driver either depends on an 49 + algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its 50 + own implementation. 50 51 51 - A **slave** chip is a node that responds to communications when addressed 52 - by the master. In Linux it is called a **client**. Client drivers are kept 53 - in a directory specific to the feature they provide, for example 54 - ``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for 52 + A **target** chip is a node that responds to communications when addressed by a 53 + controller. In the Linux kernel implementation it is also called a "client". 54 + While targets are usually separate external chips, Linux can also act as a 55 + target (needs hardware support) and respond to another controller on the bus. 56 + This is then called a **local target**. In contrast, an external chip is called 57 + a **remote target**. 58 + 59 + Target drivers are kept in a directory specific to the feature they provide, 60 + for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for 55 61 video-related chips. 56 62 57 - For the example configuration in figure, you will need a driver for your 58 - I2C adapter, and drivers for your I2C devices (usually one driver for each 59 - device). 63 + For the example configuration in the figure above, you will need one driver for 64 + the I2C controller, and drivers for your I2C targets. Usually one driver for 65 + each target. 66 + 67 + Synonyms 68 + -------- 69 + 70 + As mentioned above, the Linux I2C implementation historically uses the terms 71 + "adapter" for controller and "client" for target. A number of data structures 72 + have these synonyms in their name. So, when discussing implementation details, 73 + you should be aware of these terms as well. The official wording is preferred, 74 + though. 75 + 76 + Outdated terminology 77 + -------------------- 78 + 79 + In earlier I2C specifications, controller was named "master" and target was 80 + named "slave". These terms have been obsoleted with v7 of the specification and 81 + their use is also discouraged by the Linux Kernel Code of Conduct. You may 82 + still find them in references to documentation which has not been updated. The 83 + general attitude, however, is to use the inclusive terms: controller and 84 + target. Work to replace the old terminology in the Linux Kernel is on-going.
-2
Documentation/netlink/specs/nfsd.yaml
··· 123 123 doc: dump pending nfsd rpc 124 124 attribute-set: rpc-status 125 125 dump: 126 - pre: nfsd-nl-rpc-status-get-start 127 - post: nfsd-nl-rpc-status-get-done 128 126 reply: 129 127 attributes: 130 128 - xid
+1
Documentation/userspace-api/index.rst
··· 32 32 seccomp_filter 33 33 landlock 34 34 lsm 35 + mfd_noexec 35 36 spec_ctrl 36 37 tee 37 38
+86
Documentation/userspace-api/mfd_noexec.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + ================================== 4 + Introduction of non-executable mfd 5 + ================================== 6 + :Author: 7 + Daniel Verkamp <dverkamp@chromium.org> 8 + Jeff Xu <jeffxu@chromium.org> 9 + 10 + :Contributor: 11 + Aleksa Sarai <cyphar@cyphar.com> 12 + 13 + Since Linux introduced the memfd feature, memfds have always had their 14 + execute bit set, and the memfd_create() syscall doesn't allow setting 15 + it differently. 16 + 17 + However, in a secure-by-default system, such as ChromeOS, (where all 18 + executables should come from the rootfs, which is protected by verified 19 + boot), this executable nature of memfd opens a door for NoExec bypass 20 + and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm 21 + process created a memfd to share the content with an external process, 22 + however the memfd is overwritten and used for executing arbitrary code 23 + and root escalation. [2] lists more VRP of this kind. 24 + 25 + On the other hand, executable memfd has its legit use: runc uses memfd’s 26 + seal and executable feature to copy the contents of the binary then 27 + execute them. For such a system, we need a solution to differentiate runc's 28 + use of executable memfds and an attacker's [3]. 29 + 30 + To address those above: 31 + - Let memfd_create() set X bit at creation time. 32 + - Let memfd be sealed for modifying X bit when NX is set. 33 + - Add a new pid namespace sysctl: vm.memfd_noexec to help applications in 34 + migrating and enforcing non-executable MFD. 35 + 36 + User API 37 + ======== 38 + ``int memfd_create(const char *name, unsigned int flags)`` 39 + 40 + ``MFD_NOEXEC_SEAL`` 41 + When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created 42 + with NX. F_SEAL_EXEC is set and the memfd can't be modified to 43 + add X later. MFD_ALLOW_SEALING is also implied. 44 + This is the most common case for the application to use memfd. 45 + 46 + ``MFD_EXEC`` 47 + When MFD_EXEC bit is set in the ``flags``, memfd is created with X. 48 + 49 + Note: 50 + ``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that 51 + an app doesn't want sealing, it can add F_SEAL_SEAL after creation. 52 + 53 + 54 + Sysctl: 55 + ======== 56 + ``pid namespaced sysctl vm.memfd_noexec`` 57 + 58 + The new pid namespaced sysctl vm.memfd_noexec has 3 values: 59 + 60 + - 0: MEMFD_NOEXEC_SCOPE_EXEC 61 + memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like 62 + MFD_EXEC was set. 63 + 64 + - 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 65 + memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like 66 + MFD_NOEXEC_SEAL was set. 67 + 68 + - 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 69 + memfd_create() without MFD_NOEXEC_SEAL will be rejected. 70 + 71 + The sysctl allows finer control of memfd_create for old software that 72 + doesn't set the executable bit; for example, a container with 73 + vm.memfd_noexec=1 means the old software will create non-executable memfd 74 + by default while new software can create executable memfd by setting 75 + MFD_EXEC. 76 + 77 + The value of vm.memfd_noexec is passed to child namespace at creation 78 + time. In addition, the setting is hierarchical, i.e. during memfd_create, 79 + we will search from current ns to root ns and use the most restrictive 80 + setting. 81 + 82 + [1] https://crbug.com/1305267 83 + 84 + [2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1 85 + 86 + [3] https://lwn.net/Articles/781013/
+15 -6
Documentation/virt/hyperv/clocks.rst
··· 62 62 space code performs the same algorithm of reading the TSC and 63 63 applying the scale and offset to get the constant 10 MHz clock. 64 64 65 - Linux clockevents are based on Hyper-V synthetic timer 0. While 66 - Hyper-V offers 4 synthetic timers for each CPU, Linux only uses 67 - timer 0. Interrupts from stimer0 are recorded on the "HVS" line in 68 - /proc/interrupts. Clockevents based on the virtualized PIT and 69 - local APIC timer also work, but the Hyper-V synthetic timer is 70 - preferred. 65 + Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0). 66 + While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses 67 + timer 0. In older versions of Hyper-V, an interrupt from stimer0 68 + results in a VMBus control message that is demultiplexed by 69 + vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst 70 + documentation. In newer versions of Hyper-V, stimer0 interrupts can 71 + be mapped to an architectural interrupt, which is referred to as 72 + "Direct Mode". Linux prefers to use Direct Mode when available. Since 73 + x86/x64 doesn't support per-CPU interrupts, Direct Mode statically 74 + allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs 75 + and explicitly codes it to call the stimer0 interrupt handler. Hence 76 + interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts 77 + rather than being associated with a Linux IRQ. Clockevents based on the 78 + virtualized PIT and local APIC timer also work, but Hyper-V stimer0 79 + is preferred. 71 80 72 81 The driver for the Hyper-V synthetic system clock and timers is 73 82 drivers/clocksource/hyperv_timer.c.
+11 -11
Documentation/virt/hyperv/overview.rst
··· 40 40 arm64, these synthetic registers must be accessed using explicit 41 41 hypercalls. 42 42 43 - * VMbus: VMbus is a higher-level software construct that is built on 43 + * VMBus: VMBus is a higher-level software construct that is built on 44 44 the other 3 mechanisms. It is a message passing interface between 45 45 the Hyper-V host and the Linux guest. It uses memory that is shared 46 46 between Hyper-V and the guest, along with various signaling ··· 54 54 55 55 .. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs 56 56 57 - VMbus is not documented. This documentation provides a high-level 58 - overview of VMbus and how it works, but the details can be discerned 57 + VMBus is not documented. This documentation provides a high-level 58 + overview of VMBus and how it works, but the details can be discerned 59 59 only from the code. 60 60 61 61 Sharing Memory ··· 74 74 physical address space. How Hyper-V is told about the GPA or list 75 75 of GPAs varies. In some cases, a single GPA is written to a 76 76 synthetic register. In other cases, a GPA or list of GPAs is sent 77 - in a VMbus message. 77 + in a VMBus message. 78 78 79 79 * Hyper-V translates the GPAs into "real" physical memory addresses, 80 80 and creates a virtual mapping that it can use to access the memory. ··· 133 133 any hot-add CPUs. 134 134 135 135 A Linux guest CPU may be taken offline using the normal Linux 136 - mechanisms, provided no VMbus channel interrupts are assigned to 137 - the CPU. See the section on VMbus Interrupts for more details 138 - on how VMbus channel interrupts can be re-assigned to permit 136 + mechanisms, provided no VMBus channel interrupts are assigned to 137 + the CPU. See the section on VMBus Interrupts for more details 138 + on how VMBus channel interrupts can be re-assigned to permit 139 139 taking a CPU offline. 140 140 141 141 32-bit and 64-bit ··· 169 169 via flags in synthetic MSRs that Hyper-V provides to the guest, 170 170 and the guest code tests these flags. 171 171 172 - VMbus has its own protocol version that is negotiated during the 173 - initial VMbus connection from the guest to Hyper-V. This version 172 + VMBus has its own protocol version that is negotiated during the 173 + initial VMBus connection from the guest to Hyper-V. This version 174 174 number is also output to dmesg during boot. This version number 175 175 is checked in a few places in the code to determine if specific 176 176 functionality is present. 177 177 178 - Furthermore, each synthetic device on VMbus also has a protocol 179 - version that is separate from the VMbus protocol version. Device 178 + Furthermore, each synthetic device on VMBus also has a protocol 179 + version that is separate from the VMBus protocol version. Device 180 180 drivers for these synthetic devices typically negotiate the device 181 181 protocol version, and may test that protocol version to determine 182 182 if specific device functionality is present.
+82 -59
Documentation/virt/hyperv/vmbus.rst
··· 1 1 .. SPDX-License-Identifier: GPL-2.0 2 2 3 - VMbus 3 + VMBus 4 4 ===== 5 - VMbus is a software construct provided by Hyper-V to guest VMs. It 5 + VMBus is a software construct provided by Hyper-V to guest VMs. It 6 6 consists of a control path and common facilities used by synthetic 7 7 devices that Hyper-V presents to guest VMs. The control path is 8 8 used to offer synthetic devices to the guest VM and, in some cases, ··· 12 12 signaling primitives to allow Hyper-V and the guest to interrupt 13 13 each other. 14 14 15 - VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus 16 - entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c) 17 - establishes the VMbus control path with the Hyper-V host, then 15 + VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus 16 + entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c) 17 + establishes the VMBus control path with the Hyper-V host, then 18 18 registers itself as a Linux bus driver. It implements the standard 19 19 bus functions for adding and removing devices to/from the bus. 20 20 ··· 49 49 the synthetic SCSI controller is "storvsc". These drivers contain 50 50 functions with names like "storvsc_connect_to_vsp". 51 51 52 - VMbus channels 52 + VMBus channels 53 53 -------------- 54 - An instance of a synthetic device uses VMbus channels to communicate 54 + An instance of a synthetic device uses VMBus channels to communicate 55 55 between the VSP and the VSC. Channels are bi-directional and used 56 56 for passing messages. Most synthetic devices use a single channel, 57 57 but the synthetic SCSI controller and synthetic NIC may use multiple ··· 73 73 actual ring. The size of the ring is determined by the VSC in the 74 74 guest and is specific to each synthetic device. The list of GPAs 75 75 making up the ring is communicated to the Hyper-V host over the 76 - VMbus control path as a GPA Descriptor List (GPADL). See function 76 + VMBus control path as a GPA Descriptor List (GPADL). See function 77 77 vmbus_establish_gpadl(). 78 78 79 79 Each ring buffer is mapped into contiguous Linux kernel virtual ··· 102 102 approximately 1280 Mbytes. For versions prior to Windows Server 103 103 2019, the limit is approximately 384 Mbytes. 104 104 105 - VMbus messages 106 - -------------- 107 - All VMbus messages have a standard header that includes the message 108 - length, the offset of the message payload, some flags, and a 105 + VMBus channel messages 106 + ---------------------- 107 + All messages sent in a VMBus channel have a standard header that includes 108 + the message length, the offset of the message payload, some flags, and a 109 109 transactionID. The portion of the message after the header is 110 110 unique to each VSP/VSC pair. 111 111 ··· 137 137 buffer. For example, the storvsc driver uses this approach to 138 138 specify the data buffers to/from which disk I/O is done. 139 139 140 - Three functions exist to send VMbus messages: 140 + Three functions exist to send VMBus channel messages: 141 141 142 142 1. vmbus_sendpacket(): Control-only messages and messages with 143 143 embedded data -- no GPAs ··· 154 154 and valid messages, and Linux drivers for synthetic devices did not 155 155 fully validate messages. With the introduction of processor 156 156 technologies that fully encrypt guest memory and that allow the 157 - guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting 157 + guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting 158 158 the Hyper-V host is no longer a valid assumption. The drivers for 159 - VMbus synthetic devices are being updated to fully validate any 159 + VMBus synthetic devices are being updated to fully validate any 160 160 values read from memory that is shared with Hyper-V, which includes 161 - messages from VMbus devices. To facilitate such validation, 161 + messages from VMBus devices. To facilitate such validation, 162 162 messages read by the guest from the "in" ring buffer are copied to a 163 163 temporary buffer that is not shared with Hyper-V. Validation is 164 164 performed in this temporary buffer without the risk of Hyper-V 165 165 maliciously modifying the message after it is validated but before 166 166 it is used. 167 167 168 - VMbus interrupts 168 + Synthetic Interrupt Controller (synic) 169 + -------------------------------------- 170 + Hyper-V provides each guest CPU with a synthetic interrupt controller 171 + that is used by VMBus for host-guest communication. While each synic 172 + defines 16 synthetic interrupts (SINT), Linux uses only one of the 16 173 + (VMBUS_MESSAGE_SINT). All interrupts related to communication between 174 + the Hyper-V host and a guest CPU use that SINT. 175 + 176 + The SINT is mapped to a single per-CPU architectural interrupt (i.e, 177 + an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because 178 + each CPU in the guest has a synic and may receive VMBus interrupts, 179 + they are best modeled in Linux as per-CPU interrupts. This model works 180 + well on arm64 where a single per-CPU Linux IRQ is allocated for 181 + VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled 182 + "Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86 183 + interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR) 184 + across all CPUs and explicitly coded to call vmbus_isr(). In this case, 185 + there's no Linux IRQ, and the interrupts are visible in aggregate in 186 + /proc/interrupts on the "HYP" line. 187 + 188 + The synic provides the means to demultiplex the architectural interrupt into 189 + one or more logical interrupts and route the logical interrupt to the proper 190 + VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and 191 + related functions that access synic data structures. 192 + 193 + The synic is not modeled in Linux as an irq chip or irq domain, 194 + and the demultiplexed logical interrupts are not Linux IRQs. As such, 195 + they don't appear in /proc/interrupts or /proc/irq. The CPU 196 + affinity for one of these logical interrupts is controlled via an 197 + entry under /sys/bus/vmbus as described below. 198 + 199 + VMBus interrupts 169 200 ---------------- 170 - VMbus provides a mechanism for the guest to interrupt the host when 201 + VMBus provides a mechanism for the guest to interrupt the host when 171 202 the guest has queued new messages in a ring buffer. The host 172 203 expects that the guest will send an interrupt only when an "out" 173 204 ring buffer transitions from empty to non-empty. If the guest sends ··· 207 176 interrupts, the host may throttle that guest by suspending its 208 177 execution for a few seconds to prevent a denial-of-service attack. 209 178 210 - Similarly, the host will interrupt the guest when it sends a new 211 - message on the VMbus control path, or when a VMbus channel "in" ring 212 - buffer transitions from empty to non-empty. Each CPU in the guest 213 - may receive VMbus interrupts, so they are best modeled as per-CPU 214 - interrupts in Linux. This model works well on arm64 where a single 215 - per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for 216 - per-CPU IRQs, an x86 interrupt vector is statically allocated (see 217 - HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to 218 - call the VMbus interrupt service routine. These interrupts are 219 - visible in /proc/interrupts on the "HYP" line. 179 + Similarly, the host will interrupt the guest via the synic when 180 + it sends a new message on the VMBus control path, or when a VMBus 181 + channel "in" ring buffer transitions from empty to non-empty due to 182 + the host inserting a new VMBus channel message. The control message stream 183 + and each VMBus channel "in" ring buffer are separate logical interrupts 184 + that are demultiplexed by vmbus_isr(). It demultiplexes by first checking 185 + for channel interrupts by calling vmbus_chan_sched(), which looks at a synic 186 + bitmap to determine which channels have pending interrupts on this CPU. 187 + If multiple channels have pending interrupts for this CPU, they are 188 + processed sequentially. When all channel interrupts have been processed, 189 + vmbus_isr() checks for and processes any messages received on the VMBus 190 + control path. 220 191 221 - The guest CPU that a VMbus channel will interrupt is selected by the 192 + The guest CPU that a VMBus channel will interrupt is selected by the 222 193 guest when the channel is created, and the host is informed of that 223 - selection. VMbus devices are broadly grouped into two categories: 194 + selection. VMBus devices are broadly grouped into two categories: 224 195 225 - 1. "Slow" devices that need only one VMbus channel. The devices 196 + 1. "Slow" devices that need only one VMBus channel. The devices 226 197 (such as keyboard, mouse, heartbeat, and timesync) generate 227 - relatively few interrupts. Their VMbus channels are all 198 + relatively few interrupts. Their VMBus channels are all 228 199 assigned to interrupt the VMBUS_CONNECT_CPU, which is always 229 200 CPU 0. 230 201 231 - 2. "High speed" devices that may use multiple VMbus channels for 202 + 2. "High speed" devices that may use multiple VMBus channels for 232 203 higher parallelism and performance. These devices include the 233 - synthetic SCSI controller and synthetic NIC. Their VMbus 204 + synthetic SCSI controller and synthetic NIC. Their VMBus 234 205 channels interrupts are assigned to CPUs that are spread out 235 206 among the available CPUs in the VM so that interrupts on 236 207 multiple channels can be processed in parallel. 237 208 238 - The assignment of VMbus channel interrupts to CPUs is done in the 209 + The assignment of VMBus channel interrupts to CPUs is done in the 239 210 function init_vp_index(). This assignment is done outside of the 240 211 normal Linux interrupt affinity mechanism, so the interrupts are 241 212 neither "unmanaged" nor "managed" interrupts. 242 213 243 - The CPU that a VMbus channel will interrupt can be seen in 214 + The CPU that a VMBus channel will interrupt can be seen in 244 215 /sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu. 245 216 When running on later versions of Hyper-V, the CPU can be changed 246 - by writing a new value to this sysfs entry. Because the interrupt 247 - assignment is done outside of the normal Linux affinity mechanism, 248 - there are no entries in /proc/irq corresponding to individual 249 - VMbus channel interrupts. 217 + by writing a new value to this sysfs entry. Because VMBus channel 218 + interrupts are not Linux IRQs, there are no entries in /proc/interrupts 219 + or /proc/irq corresponding to individual VMBus channel interrupts. 250 220 251 221 An online CPU in a Linux guest may not be taken offline if it has 252 - VMbus channel interrupts assigned to it. Any such channel 222 + VMBus channel interrupts assigned to it. Any such channel 253 223 interrupts must first be manually reassigned to another CPU as 254 224 described above. When no channel interrupts are assigned to the 255 225 CPU, it can be taken offline. 256 226 257 - When a guest CPU receives a VMbus interrupt from the host, the 258 - function vmbus_isr() handles the interrupt. It first checks for 259 - channel interrupts by calling vmbus_chan_sched(), which looks at a 260 - bitmap setup by the host to determine which channels have pending 261 - interrupts on this CPU. If multiple channels have pending 262 - interrupts for this CPU, they are processed sequentially. When all 263 - channel interrupts have been processed, vmbus_isr() checks for and 264 - processes any message received on the VMbus control path. 265 - 266 - The VMbus channel interrupt handling code is designed to work 227 + The VMBus channel interrupt handling code is designed to work 267 228 correctly even if an interrupt is received on a CPU other than the 268 229 CPU assigned to the channel. Specifically, the code does not use 269 230 CPU-based exclusion for correctness. In normal operation, Hyper-V ··· 265 242 even if there is a time lag before Hyper-V starts interrupting the 266 243 new CPU. See comments in target_cpu_store(). 267 244 268 - VMbus device creation/deletion 245 + VMBus device creation/deletion 269 246 ------------------------------ 270 247 Hyper-V and the Linux guest have a separate message-passing path 271 248 that is used for synthetic device creation and deletion. This 272 - path does not use a VMbus channel. See vmbus_post_msg() and 249 + path does not use a VMBus channel. See vmbus_post_msg() and 273 250 vmbus_on_msg_dpc(). 274 251 275 252 The first step is for the guest to connect to the generic 276 - Hyper-V VMbus mechanism. As part of establishing this connection, 277 - the guest and Hyper-V agree on a VMbus protocol version they will 253 + Hyper-V VMBus mechanism. As part of establishing this connection, 254 + the guest and Hyper-V agree on a VMBus protocol version they will 278 255 use. This negotiation allows newer Linux kernels to run on older 279 256 Hyper-V versions, and vice versa. 280 257 281 258 The guest then tells Hyper-V to "send offers". Hyper-V sends an 282 259 offer message to the guest for each synthetic device that the VM 283 - is configured to have. Each VMbus device type has a fixed GUID 284 - known as the "class ID", and each VMbus device instance is also 260 + is configured to have. Each VMBus device type has a fixed GUID 261 + known as the "class ID", and each VMBus device instance is also 285 262 identified by a GUID. The offer message from Hyper-V contains 286 263 both GUIDs to uniquely (within the VM) identify the device. 287 264 There is one offer message for each device instance, so a VM with ··· 298 275 the device. Driver/device matching is performed using the standard 299 276 Linux mechanism. 300 277 301 - The device driver probe function opens the primary VMbus channel to 278 + The device driver probe function opens the primary VMBus channel to 302 279 the corresponding VSP. It allocates guest memory for the channel 303 280 ring buffers and shares the ring buffer with the Hyper-V host by 304 281 giving the host a list of GPAs for the ring buffer memory. See ··· 308 285 setup messages via the primary channel. These messages may include 309 286 negotiating the device protocol version to be used between the Linux 310 287 VSC and the VSP on the Hyper-V host. The setup messages may also 311 - include creating additional VMbus channels, which are somewhat 288 + include creating additional VMBus channels, which are somewhat 312 289 mis-named as "sub-channels" since they are functionally 313 290 equivalent to the primary channel once they are created. 314 291
+15 -17
MAINTAINERS
··· 3980 3980 R: Yonghong Song <yonghong.song@linux.dev> 3981 3981 R: John Fastabend <john.fastabend@gmail.com> 3982 3982 R: KP Singh <kpsingh@kernel.org> 3983 - R: Stanislav Fomichev <sdf@google.com> 3983 + R: Stanislav Fomichev <sdf@fomichev.me> 3984 3984 R: Hao Luo <haoluo@google.com> 3985 3985 R: Jiri Olsa <jolsa@kernel.org> 3986 3986 L: bpf@vger.kernel.org ··· 5295 5295 5296 5296 CLANG CONTROL FLOW INTEGRITY SUPPORT 5297 5297 M: Sami Tolvanen <samitolvanen@google.com> 5298 - M: Kees Cook <keescook@chromium.org> 5298 + M: Kees Cook <kees@kernel.org> 5299 5299 R: Nathan Chancellor <nathan@kernel.org> 5300 5300 L: llvm@lists.linux.dev 5301 5301 S: Supported ··· 8211 8211 8212 8212 EXEC & BINFMT API, ELF 8213 8213 R: Eric Biederman <ebiederm@xmission.com> 8214 - R: Kees Cook <keescook@chromium.org> 8214 + R: Kees Cook <kees@kernel.org> 8215 8215 L: linux-mm@kvack.org 8216 8216 S: Supported 8217 8217 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve ··· 8612 8612 F: drivers/net/ethernet/nvidia/* 8613 8613 8614 8614 FORTIFY_SOURCE 8615 - M: Kees Cook <keescook@chromium.org> 8615 + M: Kees Cook <kees@kernel.org> 8616 8616 L: linux-hardening@vger.kernel.org 8617 8617 S: Supported 8618 8618 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening ··· 9102 9102 F: include/linux/platform_data/gsc_hwmon.h 9103 9103 9104 9104 GCC PLUGINS 9105 - M: Kees Cook <keescook@chromium.org> 9105 + M: Kees Cook <kees@kernel.org> 9106 9106 L: linux-hardening@vger.kernel.org 9107 9107 S: Maintained 9108 9108 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening ··· 9236 9236 F: drivers/input/touchscreen/resistive-adc-touch.c 9237 9237 9238 9238 GENERIC STRING LIBRARY 9239 - M: Kees Cook <keescook@chromium.org> 9239 + M: Kees Cook <kees@kernel.org> 9240 9240 R: Andy Shevchenko <andy@kernel.org> 9241 9241 L: linux-hardening@vger.kernel.org 9242 9242 S: Supported ··· 11950 11950 F: usr/ 11951 11951 11952 11952 KERNEL HARDENING (not covered by other areas) 11953 - M: Kees Cook <keescook@chromium.org> 11953 + M: Kees Cook <kees@kernel.org> 11954 11954 R: Gustavo A. R. Silva <gustavoars@kernel.org> 11955 11955 L: linux-hardening@vger.kernel.org 11956 11956 S: Supported ··· 12382 12382 12383 12383 KVM PARAVIRT (KVM/paravirt) 12384 12384 M: Paolo Bonzini <pbonzini@redhat.com> 12385 - R: Wanpeng Li <wanpengli@tencent.com> 12386 12385 R: Vitaly Kuznetsov <vkuznets@redhat.com> 12387 12386 L: kvm@vger.kernel.org 12388 12387 S: Supported ··· 12477 12478 12478 12479 LEAKING_ADDRESSES 12479 12480 M: Tycho Andersen <tycho@tycho.pizza> 12480 - R: Kees Cook <keescook@chromium.org> 12481 + R: Kees Cook <kees@kernel.org> 12481 12482 L: linux-hardening@vger.kernel.org 12482 12483 S: Maintained 12483 12484 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening ··· 12773 12774 F: arch/powerpc/platforms/83xx/ 12774 12775 12775 12776 LINUX KERNEL DUMP TEST MODULE (LKDTM) 12776 - M: Kees Cook <keescook@chromium.org> 12777 + M: Kees Cook <kees@kernel.org> 12777 12778 S: Maintained 12778 12779 F: drivers/misc/lkdtm/* 12779 12780 F: tools/testing/selftests/lkdtm/* ··· 12903 12904 F: drivers/media/usb/dvb-usb-v2/lmedm04* 12904 12905 12905 12906 LOADPIN SECURITY MODULE 12906 - M: Kees Cook <keescook@chromium.org> 12907 + M: Kees Cook <kees@kernel.org> 12907 12908 S: Supported 12908 12909 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening 12909 12910 F: Documentation/admin-guide/LSM/LoadPin.rst ··· 17995 17996 17996 17997 PROC SYSCTL 17997 17998 M: Luis Chamberlain <mcgrof@kernel.org> 17998 - M: Kees Cook <keescook@chromium.org> 17999 + M: Kees Cook <kees@kernel.org> 17999 18000 M: Joel Granados <j.granados@samsung.com> 18000 18001 L: linux-kernel@vger.kernel.org 18001 18002 L: linux-fsdevel@vger.kernel.org ··· 18051 18052 F: drivers/net/pse-pd/ 18052 18053 18053 18054 PSTORE FILESYSTEM 18054 - M: Kees Cook <keescook@chromium.org> 18055 + M: Kees Cook <kees@kernel.org> 18055 18056 R: Tony Luck <tony.luck@intel.com> 18056 18057 R: Guilherme G. Piccoli <gpiccoli@igalia.com> 18057 18058 L: linux-hardening@vger.kernel.org ··· 20057 20058 F: drivers/media/cec/platform/seco/seco-cec.h 20058 20059 20059 20060 SECURE COMPUTING 20060 - M: Kees Cook <keescook@chromium.org> 20061 + M: Kees Cook <kees@kernel.org> 20061 20062 R: Andy Lutomirski <luto@amacapital.net> 20062 20063 R: Will Drewry <wad@chromium.org> 20063 20064 S: Supported ··· 22971 22972 F: include/uapi/linux/ublk_cmd.h 22972 22973 22973 22974 UBSAN 22974 - M: Kees Cook <keescook@chromium.org> 22975 + M: Kees Cook <kees@kernel.org> 22975 22976 R: Marco Elver <elver@google.com> 22976 22977 R: Andrey Konovalov <andreyknvl@gmail.com> 22977 22978 R: Andrey Ryabinin <ryabinin.a.a@gmail.com> ··· 23973 23974 M: Andrew Morton <akpm@linux-foundation.org> 23974 23975 R: Uladzislau Rezki <urezki@gmail.com> 23975 23976 R: Christoph Hellwig <hch@infradead.org> 23976 - R: Lorenzo Stoakes <lstoakes@gmail.com> 23977 23977 L: linux-mm@kvack.org 23978 23978 S: Maintained 23979 23979 W: http://www.linux-mm.org ··· 24808 24810 F: include/linux/yam.h 24809 24811 24810 24812 YAMA SECURITY MODULE 24811 - M: Kees Cook <keescook@chromium.org> 24813 + M: Kees Cook <kees@kernel.org> 24812 24814 S: Supported 24813 24815 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening 24814 24816 F: Documentation/admin-guide/LSM/Yama.rst
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 10 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
··· 85 85 }; 86 86 }; 87 87 88 - panel { 88 + panel_dpi: panel { 89 89 compatible = "sii,43wvf1g"; 90 90 pinctrl-names = "default"; 91 91 pinctrl-0 = <&pinctrl_display_power>;
+4 -2
arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
··· 10 10 /plugin/; 11 11 12 12 &{/} { 13 - /delete-node/ panel; 14 - 15 13 hdmi: connector-hdmi { 16 14 compatible = "hdmi-connector"; 17 15 label = "hdmi"; ··· 78 80 }; 79 81 }; 80 82 }; 83 + }; 84 + 85 + &panel_dpi { 86 + status = "disabled"; 81 87 }; 82 88 83 89 &tve {
+13
arch/arm/include/asm/efi.h
··· 14 14 #include <asm/mach/map.h> 15 15 #include <asm/mmu_context.h> 16 16 #include <asm/ptrace.h> 17 + #include <asm/uaccess.h> 17 18 18 19 #ifdef CONFIG_EFI 19 20 void efi_init(void); ··· 25 24 26 25 #define arch_efi_call_virt_setup() efi_virtmap_load() 27 26 #define arch_efi_call_virt_teardown() efi_virtmap_unload() 27 + 28 + #ifdef CONFIG_CPU_TTBR0_PAN 29 + #undef arch_efi_call_virt 30 + #define arch_efi_call_virt(p, f, args...) ({ \ 31 + unsigned int flags = uaccess_save_and_enable(); \ 32 + efi_status_t res = _Generic((p)->f(args), \ 33 + efi_status_t: (p)->f(args), \ 34 + default: ((p)->f(args), EFI_ABORTED)); \ 35 + uaccess_restore(flags); \ 36 + res; \ 37 + }) 38 + #endif 28 39 29 40 #define ARCH_EFI_IRQ_FLAGS_MASK \ 30 41 (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
+2 -1
arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
··· 6 6 #include <dt-bindings/phy/phy-imx8-pcie.h> 7 7 #include <dt-bindings/pwm/pwm.h> 8 8 #include "imx8mm.dtsi" 9 + #include "imx8mm-overdrive.dtsi" 9 10 10 11 / { 11 12 chosen { ··· 936 935 /* Verdin GPIO_9_DSI (pulled-up as active-low) */ 937 936 pinctrl_gpio_9_dsi: gpio9dsigrp { 938 937 fsl,pins = 939 - <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */ 938 + <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */ 940 939 }; 941 940 942 941 /* Verdin GPIO_10_DSI (pulled-up as active-low) */
+1 -1
arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
··· 254 254 <&clk IMX8MP_CLK_CLKOUT2>, 255 255 <&clk IMX8MP_AUDIO_PLL2_OUT>; 256 256 assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>; 257 - assigned-clock-rates = <13000000>, <13000000>, <156000000>; 257 + assigned-clock-rates = <13000000>, <13000000>, <208000000>; 258 258 reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>; 259 259 status = "disabled"; 260 260
+1 -1
arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
··· 219 219 220 220 bluetooth { 221 221 compatible = "brcm,bcm4330-bt"; 222 - shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>; 222 + shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; 223 223 }; 224 224 }; 225 225
+1 -1
arch/arm64/boot/dts/freescale/imx8qm-mek.dts
··· 36 36 regulator-name = "SD1_SPWR"; 37 37 regulator-min-microvolt = <3000000>; 38 38 regulator-max-microvolt = <3000000>; 39 - gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>; 39 + gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>; 40 40 enable-active-high; 41 41 }; 42 42
-1
arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
··· 296 296 vmmc-supply = <&reg_usdhc2_vmmc>; 297 297 bus-width = <4>; 298 298 status = "okay"; 299 - no-sdio; 300 299 no-mmc; 301 300 }; 302 301
+2
arch/arm64/kernel/efi.c
··· 9 9 10 10 #include <linux/efi.h> 11 11 #include <linux/init.h> 12 + #include <linux/kmemleak.h> 12 13 #include <linux/screen_info.h> 13 14 #include <linux/vmalloc.h> 14 15 ··· 214 213 return -ENOMEM; 215 214 } 216 215 216 + kmemleak_not_leak(p); 217 217 efi_rt_stack_top = p + THREAD_SIZE; 218 218 return 0; 219 219 }
+12
arch/arm64/kvm/hyp/nvhe/ffa.c
··· 177 177 res); 178 178 } 179 179 180 + static void ffa_rx_release(struct arm_smccc_res *res) 181 + { 182 + arm_smccc_1_1_smc(FFA_RX_RELEASE, 183 + 0, 0, 184 + 0, 0, 0, 0, 0, 185 + res); 186 + } 187 + 180 188 static void do_ffa_rxtx_map(struct arm_smccc_res *res, 181 189 struct kvm_cpu_context *ctxt) 182 190 { ··· 551 543 if (WARN_ON(offset > len || 552 544 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { 553 545 ret = FFA_RET_ABORTED; 546 + ffa_rx_release(res); 554 547 goto out_unlock; 555 548 } 556 549 557 550 if (len > ffa_desc_buf.len) { 558 551 ret = FFA_RET_NO_MEMORY; 552 + ffa_rx_release(res); 559 553 goto out_unlock; 560 554 } 561 555 562 556 buf = ffa_desc_buf.buf; 563 557 memcpy(buf, hyp_buffers.rx, fraglen); 558 + ffa_rx_release(res); 564 559 565 560 for (fragoff = fraglen; fragoff < len; fragoff += fraglen) { 566 561 ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff); ··· 574 563 575 564 fraglen = res->a3; 576 565 memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen); 566 + ffa_rx_release(res); 577 567 } 578 568 579 569 ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
+1 -1
arch/arm64/kvm/vgic/vgic-init.c
··· 391 391 392 392 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 393 393 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) 394 - vgic_v3_free_redist_region(rdreg); 394 + vgic_v3_free_redist_region(kvm, rdreg); 395 395 INIT_LIST_HEAD(&dist->rd_regions); 396 396 } else { 397 397 dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
+13 -2
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 919 919 return ret; 920 920 } 921 921 922 - void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg) 922 + void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg) 923 923 { 924 + struct kvm_vcpu *vcpu; 925 + unsigned long c; 926 + 927 + lockdep_assert_held(&kvm->arch.config_lock); 928 + 929 + /* Garbage collect the region */ 930 + kvm_for_each_vcpu(c, vcpu, kvm) { 931 + if (vcpu->arch.vgic_cpu.rdreg == rdreg) 932 + vcpu->arch.vgic_cpu.rdreg = NULL; 933 + } 934 + 924 935 list_del(&rdreg->list); 925 936 kfree(rdreg); 926 937 } ··· 956 945 957 946 mutex_lock(&kvm->arch.config_lock); 958 947 rdreg = vgic_v3_rdist_region_from_index(kvm, index); 959 - vgic_v3_free_redist_region(rdreg); 948 + vgic_v3_free_redist_region(kvm, rdreg); 960 949 mutex_unlock(&kvm->arch.config_lock); 961 950 return ret; 962 951 }
+1 -1
arch/arm64/kvm/vgic/vgic.h
··· 316 316 317 317 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, 318 318 u32 index); 319 - void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg); 319 + void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg); 320 320 321 321 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size); 322 322
+4 -1
arch/loongarch/Kconfig
··· 143 143 select HAVE_LIVEPATCH 144 144 select HAVE_MOD_ARCH_SPECIFIC 145 145 select HAVE_NMI 146 - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS 146 + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG 147 147 select HAVE_PCI 148 148 select HAVE_PERF_EVENTS 149 149 select HAVE_PERF_REGS ··· 260 260 261 261 config AS_HAS_FCSR_CLASS 262 262 def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) 263 + 264 + config AS_HAS_THIN_ADD_SUB 265 + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) 263 266 264 267 config AS_HAS_LSX_EXTENSION 265 268 def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
+1
arch/loongarch/Kconfig.debug
··· 28 28 29 29 config UNWINDER_ORC 30 30 bool "ORC unwinder" 31 + depends on HAVE_OBJTOOL 31 32 select OBJTOOL 32 33 help 33 34 This option enables the ORC (Oops Rewind Capability) unwinder for
+3 -1
arch/loongarch/include/asm/hw_breakpoint.h
··· 75 75 #define CSR_MWPC_NUM 0x3f 76 76 77 77 #define CTRL_PLV_ENABLE 0x1e 78 + #define CTRL_PLV0_ENABLE 0x02 79 + #define CTRL_PLV3_ENABLE 0x10 78 80 79 81 #define MWPnCFG3_LoadEn 8 80 82 #define MWPnCFG3_StoreEn 9 ··· 103 101 struct perf_event_attr; 104 102 105 103 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 106 - int *gen_len, int *gen_type, int *offset); 104 + int *gen_len, int *gen_type); 107 105 extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); 108 106 extern int hw_breakpoint_arch_parse(struct perf_event *bp, 109 107 const struct perf_event_attr *attr,
+55 -41
arch/loongarch/kernel/hw_breakpoint.c
··· 174 174 static int hw_breakpoint_control(struct perf_event *bp, 175 175 enum hw_breakpoint_ops ops) 176 176 { 177 - u32 ctrl; 177 + u32 ctrl, privilege; 178 178 int i, max_slots, enable; 179 + struct pt_regs *regs; 179 180 struct perf_event **slots; 180 181 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 182 + 183 + if (arch_check_bp_in_kernelspace(info)) 184 + privilege = CTRL_PLV0_ENABLE; 185 + else 186 + privilege = CTRL_PLV3_ENABLE; 187 + 188 + /* Whether bp belongs to a task. */ 189 + if (bp->hw.target) 190 + regs = task_pt_regs(bp->hw.target); 181 191 182 192 if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 183 193 /* Breakpoint */ ··· 207 197 switch (ops) { 208 198 case HW_BREAKPOINT_INSTALL: 209 199 /* Set the FWPnCFG/MWPnCFG 1~4 register. */ 210 - write_wb_reg(CSR_CFG_ADDR, i, 0, info->address); 211 - write_wb_reg(CSR_CFG_ADDR, i, 1, info->address); 212 - write_wb_reg(CSR_CFG_MASK, i, 0, info->mask); 213 - write_wb_reg(CSR_CFG_MASK, i, 1, info->mask); 214 - write_wb_reg(CSR_CFG_ASID, i, 0, 0); 215 - write_wb_reg(CSR_CFG_ASID, i, 1, 0); 216 200 if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 217 - write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE); 201 + write_wb_reg(CSR_CFG_ADDR, i, 0, info->address); 202 + write_wb_reg(CSR_CFG_MASK, i, 0, info->mask); 203 + write_wb_reg(CSR_CFG_ASID, i, 0, 0); 204 + write_wb_reg(CSR_CFG_CTRL, i, 0, privilege); 218 205 } else { 206 + write_wb_reg(CSR_CFG_ADDR, i, 1, info->address); 207 + write_wb_reg(CSR_CFG_MASK, i, 1, info->mask); 208 + write_wb_reg(CSR_CFG_ASID, i, 1, 0); 219 209 ctrl = encode_ctrl_reg(info->ctrl); 220 - write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE); 210 + write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege); 221 211 } 222 212 enable = csr_read64(LOONGARCH_CSR_CRMD); 223 213 csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD); 214 + if (bp->hw.target) 215 + regs->csr_prmd |= CSR_PRMD_PWE; 224 216 break; 225 217 case HW_BREAKPOINT_UNINSTALL: 226 218 /* Reset the FWPnCFG/MWPnCFG 1~4 register. */ 227 - write_wb_reg(CSR_CFG_ADDR, i, 0, 0); 228 - write_wb_reg(CSR_CFG_ADDR, i, 1, 0); 229 - write_wb_reg(CSR_CFG_MASK, i, 0, 0); 230 - write_wb_reg(CSR_CFG_MASK, i, 1, 0); 231 - write_wb_reg(CSR_CFG_CTRL, i, 0, 0); 232 - write_wb_reg(CSR_CFG_CTRL, i, 1, 0); 233 - write_wb_reg(CSR_CFG_ASID, i, 0, 0); 234 - write_wb_reg(CSR_CFG_ASID, i, 1, 0); 219 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 220 + write_wb_reg(CSR_CFG_ADDR, i, 0, 0); 221 + write_wb_reg(CSR_CFG_MASK, i, 0, 0); 222 + write_wb_reg(CSR_CFG_CTRL, i, 0, 0); 223 + write_wb_reg(CSR_CFG_ASID, i, 0, 0); 224 + } else { 225 + write_wb_reg(CSR_CFG_ADDR, i, 1, 0); 226 + write_wb_reg(CSR_CFG_MASK, i, 1, 0); 227 + write_wb_reg(CSR_CFG_CTRL, i, 1, 0); 228 + write_wb_reg(CSR_CFG_ASID, i, 1, 0); 229 + } 230 + if (bp->hw.target) 231 + regs->csr_prmd &= ~CSR_PRMD_PWE; 235 232 break; 236 233 } 237 234 ··· 300 283 * to generic breakpoint descriptions. 301 284 */ 302 285 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 303 - int *gen_len, int *gen_type, int *offset) 286 + int *gen_len, int *gen_type) 304 287 { 305 288 /* Type */ 306 289 switch (ctrl.type) { ··· 319 302 default: 320 303 return -EINVAL; 321 304 } 322 - 323 - if (!ctrl.len) 324 - return -EINVAL; 325 - 326 - *offset = __ffs(ctrl.len); 327 305 328 306 /* Len */ 329 307 switch (ctrl.len) { ··· 398 386 struct arch_hw_breakpoint *hw) 399 387 { 400 388 int ret; 401 - u64 alignment_mask, offset; 389 + u64 alignment_mask; 402 390 403 391 /* Build the arch_hw_breakpoint. */ 404 392 ret = arch_build_bp_info(bp, attr, hw); 405 393 if (ret) 406 394 return ret; 407 395 408 - if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE) 409 - alignment_mask = 0x7; 410 - else 396 + if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 411 397 alignment_mask = 0x3; 412 - offset = hw->address & alignment_mask; 413 - 414 - hw->address &= ~alignment_mask; 415 - hw->ctrl.len <<= offset; 398 + hw->address &= ~alignment_mask; 399 + } 416 400 417 401 return 0; 418 402 } ··· 479 471 slots = this_cpu_ptr(bp_on_reg); 480 472 481 473 for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) { 482 - bp = slots[i]; 483 - if (bp == NULL) 484 - continue; 485 - perf_bp_event(bp, regs); 474 + if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) { 475 + bp = slots[i]; 476 + if (bp == NULL) 477 + continue; 478 + perf_bp_event(bp, regs); 479 + csr_write32(0x1 << i, LOONGARCH_CSR_FWPS); 480 + update_bp_registers(regs, 0, 0); 481 + } 486 482 } 487 - update_bp_registers(regs, 0, 0); 488 483 } 489 484 NOKPROBE_SYMBOL(breakpoint_handler); 490 485 ··· 499 488 slots = this_cpu_ptr(wp_on_reg); 500 489 501 490 for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) { 502 - wp = slots[i]; 503 - if (wp == NULL) 504 - continue; 505 - perf_bp_event(wp, regs); 491 + if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) { 492 + wp = slots[i]; 493 + if (wp == NULL) 494 + continue; 495 + perf_bp_event(wp, regs); 496 + csr_write32(0x1 << i, LOONGARCH_CSR_MWPS); 497 + update_bp_registers(regs, 0, 1); 498 + } 506 499 } 507 - update_bp_registers(regs, 0, 1); 508 500 } 509 501 NOKPROBE_SYMBOL(watchpoint_handler); 510 502
+27 -20
arch/loongarch/kernel/ptrace.c
··· 494 494 struct arch_hw_breakpoint_ctrl ctrl, 495 495 struct perf_event_attr *attr) 496 496 { 497 - int err, len, type, offset; 497 + int err, len, type; 498 498 499 - err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 499 + err = arch_bp_generic_fields(ctrl, &len, &type); 500 500 if (err) 501 501 return err; 502 502 503 - switch (note_type) { 504 - case NT_LOONGARCH_HW_BREAK: 505 - if ((type & HW_BREAKPOINT_X) != type) 506 - return -EINVAL; 507 - break; 508 - case NT_LOONGARCH_HW_WATCH: 509 - if ((type & HW_BREAKPOINT_RW) != type) 510 - return -EINVAL; 511 - break; 512 - default: 513 - return -EINVAL; 514 - } 515 - 516 503 attr->bp_len = len; 517 504 attr->bp_type = type; 518 - attr->bp_addr += offset; 519 505 520 506 return 0; 521 507 } ··· 595 609 return PTR_ERR(bp); 596 610 597 611 attr = bp->attr; 598 - decode_ctrl_reg(uctrl, &ctrl); 599 - err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 600 - if (err) 601 - return err; 612 + 613 + switch (note_type) { 614 + case NT_LOONGARCH_HW_BREAK: 615 + ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE; 616 + ctrl.len = LOONGARCH_BREAKPOINT_LEN_4; 617 + break; 618 + case NT_LOONGARCH_HW_WATCH: 619 + decode_ctrl_reg(uctrl, &ctrl); 620 + break; 621 + default: 622 + return -EINVAL; 623 + } 624 + 625 + if (uctrl & CTRL_PLV_ENABLE) { 626 + err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 627 + if (err) 628 + return err; 629 + attr.disabled = 0; 630 + } else { 631 + attr.disabled = 1; 632 + } 602 633 603 634 return modify_user_hw_breakpoint(bp, &attr); 604 635 } ··· 645 642 { 646 643 struct perf_event *bp; 647 644 struct perf_event_attr attr; 645 + 646 + /* Kernel-space address cannot be monitored by user-space */ 647 + if ((unsigned long)addr >= XKPRANGE) 648 + return -EINVAL; 648 649 649 650 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 650 651 if (IS_ERR(bp))
+1 -1
arch/loongarch/kvm/exit.c
··· 761 761 default: 762 762 ret = KVM_HCALL_INVALID_CODE; 763 763 break; 764 - }; 764 + } 765 765 766 766 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); 767 767 }
+2 -1
arch/mips/bmips/setup.c
··· 110 110 * RAC flush causes kernel panics on BCM6358 when booting from TP1 111 111 * because the bootloader is not initializing it properly. 112 112 */ 113 - bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)); 113 + bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) || 114 + !!BMIPS_GET_CBR(); 114 115 } 115 116 116 117 static void bcm6368_quirks(void)
+1 -1
arch/mips/include/asm/mipsmtregs.h
··· 322 322 " .set push \n" \ 323 323 " .set "MIPS_ISA_LEVEL" \n" \ 324 324 _ASM_SET_MFTC0 \ 325 - " mftc0 $1, " #rt ", " #sel " \n" \ 325 + " mftc0 %0, " #rt ", " #sel " \n" \ 326 326 _ASM_UNSET_MFTC0 \ 327 327 " .set pop \n" \ 328 328 : "=r" (__res)); \
+1 -1
arch/mips/kernel/syscalls/syscall_o32.tbl
··· 27 27 17 o32 break sys_ni_syscall 28 28 # 18 was sys_stat 29 29 18 o32 unused18 sys_ni_syscall 30 - 19 o32 lseek sys_lseek 30 + 19 o32 lseek sys_lseek compat_sys_lseek 31 31 20 o32 getpid sys_getpid 32 32 21 o32 mount sys_mount 33 33 22 o32 umount sys_oldumount
+2 -2
arch/mips/pci/ops-rc32434.c
··· 112 112 * gives them time to settle 113 113 */ 114 114 if (where == PCI_VENDOR_ID) { 115 - if (ret == 0xffffffff || ret == 0x00000000 || 116 - ret == 0x0000ffff || ret == 0xffff0000) { 115 + if (*val == 0xffffffff || *val == 0x00000000 || 116 + *val == 0x0000ffff || *val == 0xffff0000) { 117 117 if (delay > 4) 118 118 return 0; 119 119 delay *= 2;
+2
arch/powerpc/crypto/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 aesp10-ppc.S 3 + aesp8-ppc.S 3 4 ghashp10-ppc.S 5 + ghashp8-ppc.S
+13 -5
arch/powerpc/kvm/book3s_64_vio.c
··· 130 130 } 131 131 rcu_read_unlock(); 132 132 133 - fdput(f); 134 - 135 - if (!found) 133 + if (!found) { 134 + fdput(f); 136 135 return -EINVAL; 136 + } 137 137 138 138 table_group = iommu_group_get_iommudata(grp); 139 - if (WARN_ON(!table_group)) 139 + if (WARN_ON(!table_group)) { 140 + fdput(f); 140 141 return -EFAULT; 142 + } 141 143 142 144 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 143 145 struct iommu_table *tbltmp = table_group->tables[i]; ··· 160 158 break; 161 159 } 162 160 } 163 - if (!tbl) 161 + if (!tbl) { 162 + fdput(f); 164 163 return -EINVAL; 164 + } 165 165 166 166 rcu_read_lock(); 167 167 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { ··· 174 170 /* stit is being destroyed */ 175 171 iommu_tce_table_put(tbl); 176 172 rcu_read_unlock(); 173 + fdput(f); 177 174 return -ENOTTY; 178 175 } 179 176 /* ··· 182 177 * its KVM reference counter and can return. 183 178 */ 184 179 rcu_read_unlock(); 180 + fdput(f); 185 181 return 0; 186 182 } 187 183 rcu_read_unlock(); ··· 190 184 stit = kzalloc(sizeof(*stit), GFP_KERNEL); 191 185 if (!stit) { 192 186 iommu_tce_table_put(tbl); 187 + fdput(f); 193 188 return -ENOMEM; 194 189 } 195 190 ··· 199 192 200 193 list_add_rcu(&stit->next, &stt->iommu_tables); 201 194 195 + fdput(f); 202 196 return 0; 203 197 } 204 198
+1
arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
··· 45 45 no-1-8-v; 46 46 no-mmc; 47 47 no-sdio; 48 + disable-wp; 48 49 }; 49 50 50 51 &uart0 {
-1
arch/x86/include/asm/efi.h
··· 401 401 struct efi_memory_map_data *data); 402 402 extern void __efi_memmap_free(u64 phys, unsigned long size, 403 403 unsigned long flags); 404 - #define __efi_memmap_free __efi_memmap_free 405 404 406 405 extern int __init efi_memmap_install(struct efi_memory_map_data *data); 407 406 extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+2 -1
arch/x86/kernel/cpu/resctrl/monitor.c
··· 519 519 * allows architectures that ignore the closid parameter to avoid an 520 520 * unnecessary check. 521 521 */ 522 - if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, 522 + if (!resctrl_arch_mon_capable() || 523 + idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, 523 524 RESCTRL_RESERVED_RMID)) 524 525 return; 525 526
+2 -2
arch/x86/kvm/svm/svm.c
··· 2843 2843 2844 2844 if (sev_es_prevent_msr_access(vcpu, msr_info)) { 2845 2845 msr_info->data = 0; 2846 - return -EINVAL; 2846 + return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; 2847 2847 } 2848 2848 2849 2849 switch (msr_info->index) { ··· 2998 2998 u64 data = msr->data; 2999 2999 3000 3000 if (sev_es_prevent_msr_access(vcpu, msr)) 3001 - return -EINVAL; 3001 + return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; 3002 3002 3003 3003 switch (ecx) { 3004 3004 case MSR_AMD64_TSC_RATIO:
+4 -5
arch/x86/kvm/x86.c
··· 10718 10718 10719 10719 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10720 10720 10721 + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10722 + 10721 10723 if (irqchip_split(vcpu->kvm)) 10722 10724 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10723 - else { 10724 - static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10725 - if (ioapic_in_kernel(vcpu->kvm)) 10726 - kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10727 - } 10725 + else if (ioapic_in_kernel(vcpu->kvm)) 10726 + kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10728 10727 10729 10728 if (is_guest_mode(vcpu)) 10730 10729 vcpu->arch.load_eoi_exitmap_pending = true;
+11 -1
arch/x86/platform/efi/memmap.c
··· 92 92 */ 93 93 int __init efi_memmap_install(struct efi_memory_map_data *data) 94 94 { 95 + unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map; 96 + unsigned long flags = efi.memmap.flags; 97 + u64 phys = efi.memmap.phys_map; 98 + int ret; 99 + 95 100 efi_memmap_unmap(); 96 101 97 102 if (efi_enabled(EFI_PARAVIRT)) 98 103 return 0; 99 104 100 - return __efi_memmap_init(data); 105 + ret = __efi_memmap_init(data); 106 + if (ret) 107 + return ret; 108 + 109 + __efi_memmap_free(phys, size, flags); 110 + return 0; 101 111 } 102 112 103 113 /**
+2 -21
drivers/acpi/acpica/exregion.c
··· 44 44 struct acpi_mem_mapping *mm = mem_info->cur_mm; 45 45 u32 length; 46 46 acpi_size map_length; 47 - acpi_size page_boundary_map_length; 48 47 #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED 49 48 u32 remainder; 50 49 #endif ··· 137 138 map_length = (acpi_size) 138 139 ((mem_info->address + mem_info->length) - address); 139 140 140 - /* 141 - * If mapping the entire remaining portion of the region will cross 142 - * a page boundary, just map up to the page boundary, do not cross. 143 - * On some systems, crossing a page boundary while mapping regions 144 - * can cause warnings if the pages have different attributes 145 - * due to resource management. 146 - * 147 - * This has the added benefit of constraining a single mapping to 148 - * one page, which is similar to the original code that used a 4k 149 - * maximum window. 150 - */ 151 - page_boundary_map_length = (acpi_size) 152 - (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address); 153 - if (page_boundary_map_length == 0) { 154 - page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; 155 - } 156 - 157 - if (map_length > page_boundary_map_length) { 158 - map_length = page_boundary_map_length; 159 - } 141 + if (map_length > ACPI_DEFAULT_PAGE_SIZE) 142 + map_length = ACPI_DEFAULT_PAGE_SIZE; 160 143 161 144 /* Create a new mapping starting at the address given */ 162 145
+4
drivers/acpi/internal.h
··· 302 302 void acpi_mipi_scan_crs_csi2(void); 303 303 void acpi_mipi_init_crs_csi2_swnodes(void); 304 304 void acpi_mipi_crs_csi2_cleanup(void); 305 + #ifdef CONFIG_X86 305 306 bool acpi_graph_ignore_port(acpi_handle handle); 307 + #else 308 + static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; } 309 + #endif 306 310 307 311 #endif /* _ACPI_INTERNAL_H_ */
+19 -9
drivers/acpi/mipi-disco-img.c
··· 725 725 acpi_mipi_del_crs_csi2(csi2); 726 726 } 727 727 728 - static const struct dmi_system_id dmi_ignore_port_nodes[] = { 729 - { 730 - .matches = { 731 - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 732 - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"), 733 - }, 734 - }, 735 - { } 728 + #ifdef CONFIG_X86 729 + #include <asm/cpu_device_id.h> 730 + #include <asm/intel-family.h> 731 + 732 + /* CPU matches for Dell generations with broken ACPI MIPI DISCO info */ 733 + static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = { 734 + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), 735 + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), 736 + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), 737 + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), 738 + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), 739 + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), 740 + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), 741 + {} 736 742 }; 737 743 738 744 static const char *strnext(const char *s1, const char *s2) ··· 767 761 static bool dmi_tested, ignore_port; 768 762 769 763 if (!dmi_tested) { 770 - ignore_port = dmi_first_match(dmi_ignore_port_nodes); 764 + if (dmi_name_in_vendors("Dell Inc.") && 765 + x86_match_cpu(dell_broken_mipi_disco_cpu_gens)) 766 + ignore_port = true; 767 + 771 768 dmi_tested = true; 772 769 } 773 770 ··· 803 794 kfree(orig_path); 804 795 return false; 805 796 } 797 + #endif
+8
drivers/ata/ahci.c
··· 1735 1735 if (ap->pflags & ATA_PFLAG_EXTERNAL) 1736 1736 return; 1737 1737 1738 + /* If no LPM states are supported by the HBA, do not bother with LPM */ 1739 + if ((ap->host->flags & ATA_HOST_NO_PART) && 1740 + (ap->host->flags & ATA_HOST_NO_SSC) && 1741 + (ap->host->flags & ATA_HOST_NO_DEVSLP)) { 1742 + ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n"); 1743 + return; 1744 + } 1745 + 1738 1746 /* user modified policy via module param */ 1739 1747 if (mobile_lpm_policy != -1) { 1740 1748 policy = mobile_lpm_policy;
+68 -35
drivers/base/regmap/regmap.c
··· 3101 3101 } 3102 3102 EXPORT_SYMBOL_GPL(regmap_fields_read); 3103 3103 3104 + static int _regmap_bulk_read(struct regmap *map, unsigned int reg, 3105 + unsigned int *regs, void *val, size_t val_count) 3106 + { 3107 + u32 *u32 = val; 3108 + u16 *u16 = val; 3109 + u8 *u8 = val; 3110 + int ret, i; 3111 + 3112 + map->lock(map->lock_arg); 3113 + 3114 + for (i = 0; i < val_count; i++) { 3115 + unsigned int ival; 3116 + 3117 + if (regs) { 3118 + if (!IS_ALIGNED(regs[i], map->reg_stride)) { 3119 + ret = -EINVAL; 3120 + goto out; 3121 + } 3122 + ret = _regmap_read(map, regs[i], &ival); 3123 + } else { 3124 + ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); 3125 + } 3126 + if (ret != 0) 3127 + goto out; 3128 + 3129 + switch (map->format.val_bytes) { 3130 + case 4: 3131 + u32[i] = ival; 3132 + break; 3133 + case 2: 3134 + u16[i] = ival; 3135 + break; 3136 + case 1: 3137 + u8[i] = ival; 3138 + break; 3139 + default: 3140 + ret = -EINVAL; 3141 + goto out; 3142 + } 3143 + } 3144 + out: 3145 + map->unlock(map->lock_arg); 3146 + return ret; 3147 + } 3148 + 3104 3149 /** 3105 - * regmap_bulk_read() - Read multiple registers from the device 3150 + * regmap_bulk_read() - Read multiple sequential registers from the device 3106 3151 * 3107 3152 * @map: Register map to read from 3108 3153 * @reg: First register to be read from ··· 3177 3132 for (i = 0; i < val_count * val_bytes; i += val_bytes) 3178 3133 map->format.parse_inplace(val + i); 3179 3134 } else { 3180 - u32 *u32 = val; 3181 - u16 *u16 = val; 3182 - u8 *u8 = val; 3183 - 3184 - map->lock(map->lock_arg); 3185 - 3186 - for (i = 0; i < val_count; i++) { 3187 - unsigned int ival; 3188 - 3189 - ret = _regmap_read(map, reg + regmap_get_offset(map, i), 3190 - &ival); 3191 - if (ret != 0) 3192 - goto out; 3193 - 3194 - switch (map->format.val_bytes) { 3195 - case 4: 3196 - u32[i] = ival; 3197 - break; 3198 - case 2: 3199 - u16[i] = ival; 3200 - break; 3201 - case 1: 3202 - u8[i] = ival; 3203 - break; 3204 - default: 3205 - ret = -EINVAL; 3206 - goto out; 3207 - } 3208 - } 3209 - 3210 - out: 3211 - map->unlock(map->lock_arg); 3135 + ret = _regmap_bulk_read(map, reg, NULL, val, val_count); 3212 3136 } 3213 - 3214 3137 if (!ret) 3215 3138 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count); 3216 - 3217 3139 return ret; 3218 3140 } 3219 3141 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3142 + 3143 + /** 3144 + * regmap_multi_reg_read() - Read multiple non-sequential registers from the device 3145 + * 3146 + * @map: Register map to read from 3147 + * @regs: Array of registers to read from 3148 + * @val: Pointer to store read value, in native register size for device 3149 + * @val_count: Number of registers to read 3150 + * 3151 + * A value of zero will be returned on success, a negative errno will 3152 + * be returned in error cases. 3153 + */ 3154 + int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val, 3155 + size_t val_count) 3156 + { 3157 + if (val_count == 0) 3158 + return -EINVAL; 3159 + 3160 + return _regmap_bulk_read(map, 0, regs, val, val_count); 3161 + } 3162 + EXPORT_SYMBOL_GPL(regmap_multi_reg_read); 3220 3163 3221 3164 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3222 3165 unsigned int mask, unsigned int val,
+1 -1
drivers/dma/Kconfig
··· 394 394 395 395 config MCF_EDMA 396 396 tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs" 397 - depends on M5441x || COMPILE_TEST 397 + depends on M5441x || (COMPILE_TEST && FSL_EDMA=n) 398 398 select DMA_ENGINE 399 399 select DMA_VIRTUAL_CHANNELS 400 400 help
+3 -1
drivers/dma/idxd/irq.c
··· 611 611 612 612 spin_unlock(&irq_entry->list_lock); 613 613 614 - list_for_each_entry(desc, &flist, list) { 614 + list_for_each_entry_safe(desc, n, &flist, list) { 615 615 /* 616 616 * Check against the original status as ABORT is software defined 617 617 * and 0xff, which DSA_COMP_STATUS_MASK can mask out. 618 618 */ 619 + list_del(&desc->list); 620 + 619 621 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { 620 622 idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); 621 623 continue;
+30 -25
drivers/dma/ioat/init.c
··· 534 534 return err; 535 535 } 536 536 537 - static int ioat_register(struct ioatdma_device *ioat_dma) 538 - { 539 - int err = dma_async_device_register(&ioat_dma->dma_dev); 540 - 541 - if (err) { 542 - ioat_disable_interrupts(ioat_dma); 543 - dma_pool_destroy(ioat_dma->completion_pool); 544 - } 545 - 546 - return err; 547 - } 548 - 549 537 static void ioat_dma_remove(struct ioatdma_device *ioat_dma) 550 538 { 551 539 struct dma_device *dma = &ioat_dma->dma_dev; ··· 1169 1181 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 1170 1182 } 1171 1183 1172 - err = ioat_register(ioat_dma); 1184 + err = dma_async_device_register(&ioat_dma->dma_dev); 1173 1185 if (err) 1174 - return err; 1186 + goto err_disable_interrupts; 1175 1187 1176 1188 ioat_kobject_add(ioat_dma, &ioat_ktype); 1177 1189 ··· 1180 1192 1181 1193 /* disable relaxed ordering */ 1182 1194 err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16); 1183 - if (err) 1184 - return pcibios_err_to_errno(err); 1195 + if (err) { 1196 + err = pcibios_err_to_errno(err); 1197 + goto err_disable_interrupts; 1198 + } 1185 1199 1186 1200 /* clear relaxed ordering enable */ 1187 1201 val16 &= ~PCI_EXP_DEVCTL_RELAX_EN; 1188 1202 err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16); 1189 - if (err) 1190 - return pcibios_err_to_errno(err); 1203 + if (err) { 1204 + err = pcibios_err_to_errno(err); 1205 + goto err_disable_interrupts; 1206 + } 1191 1207 1192 1208 if (ioat_dma->cap & IOAT_CAP_DPS) 1193 1209 writeb(ioat_pending_level + 1, 1194 1210 ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); 1195 1211 1196 1212 return 0; 1213 + 1214 + err_disable_interrupts: 1215 + ioat_disable_interrupts(ioat_dma); 1216 + dma_pool_destroy(ioat_dma->completion_pool); 1217 + return err; 1197 1218 } 1198 1219 1199 1220 static void ioat_shutdown(struct pci_dev *pdev) ··· 1347 1350 void __iomem * const *iomap; 1348 1351 struct device *dev = &pdev->dev; 1349 1352 struct ioatdma_device *device; 1353 + unsigned int i; 1354 + u8 version; 1350 1355 int err; 1351 1356 1352 1357 err = pcim_enable_device(pdev); ··· 1362 1363 if (!iomap) 1363 1364 return -ENOMEM; 1364 1365 1366 + version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET); 1367 + if (version < IOAT_VER_3_0) 1368 + return -ENODEV; 1369 + 1365 1370 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1366 1371 if (err) 1367 1372 return err; ··· 1376 1373 pci_set_master(pdev); 1377 1374 pci_set_drvdata(pdev, device); 1378 1375 1379 - device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1376 + device->version = version; 1380 1377 if (device->version >= IOAT_VER_3_4) 1381 1378 ioat_dca_enabled = 0; 1382 - if (device->version >= IOAT_VER_3_0) { 1383 - if (is_skx_ioat(pdev)) 1384 - device->version = IOAT_VER_3_2; 1385 - err = ioat3_dma_probe(device, ioat_dca_enabled); 1386 - } else 1387 - return -ENODEV; 1388 1379 1380 + if (is_skx_ioat(pdev)) 1381 + device->version = IOAT_VER_3_2; 1382 + 1383 + err = ioat3_dma_probe(device, ioat_dca_enabled); 1389 1384 if (err) { 1385 + for (i = 0; i < IOAT_MAX_CHANS; i++) 1386 + kfree(device->idx[i]); 1387 + kfree(device); 1390 1388 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); 1391 1389 return -ENODEV; 1392 1390 } ··· 1449 1445 static void __exit ioat_exit_module(void) 1450 1446 { 1451 1447 pci_unregister_driver(&ioat_pci_driver); 1448 + kmem_cache_destroy(ioat_sed_cache); 1452 1449 kmem_cache_destroy(ioat_cache); 1453 1450 } 1454 1451 module_exit(ioat_exit_module);
+1 -4
drivers/dma/ti/k3-udma-glue.c
··· 200 200 201 201 ret = of_k3_udma_glue_parse(udmax_np, common); 202 202 if (ret) 203 - goto out_put_spec; 203 + return ret; 204 204 205 205 ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); 206 - 207 - out_put_spec: 208 - of_node_put(udmax_np); 209 206 return ret; 210 207 } 211 208
+2 -2
drivers/dma/xilinx/xdma.c
··· 885 885 u32 st; 886 886 bool repeat_tx; 887 887 888 + spin_lock(&xchan->vchan.lock); 889 + 888 890 if (xchan->stop_requested) 889 891 complete(&xchan->last_interrupt); 890 - 891 - spin_lock(&xchan->vchan.lock); 892 892 893 893 /* get submitted request */ 894 894 vd = vchan_next_desc(&xchan->vchan);
-9
drivers/firmware/efi/memmap.c
··· 15 15 #include <asm/early_ioremap.h> 16 16 #include <asm/efi.h> 17 17 18 - #ifndef __efi_memmap_free 19 - #define __efi_memmap_free(phys, size, flags) do { } while (0) 20 - #endif 21 - 22 18 /** 23 19 * __efi_memmap_init - Common code for mapping the EFI memory map 24 20 * @data: EFI memory map data ··· 46 50 pr_err("Could not map the memory map!\n"); 47 51 return -ENOMEM; 48 52 } 49 - 50 - if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB)) 51 - __efi_memmap_free(efi.memmap.phys_map, 52 - efi.memmap.desc_size * efi.memmap.nr_map, 53 - efi.memmap.flags); 54 53 55 54 map.phys_map = data->phys_map; 56 55 map.nr_map = data->size / data->desc_size;
+3 -1
drivers/firmware/psci/psci.c
··· 497 497 498 498 static int psci_system_suspend(unsigned long unused) 499 499 { 500 + int err; 500 501 phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); 501 502 502 - return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), 503 + err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), 503 504 pa_cpu_resume, 0, 0); 505 + return psci_to_linux_errno(err); 504 506 } 505 507 506 508 static int psci_system_suspend_enter(suspend_state_t state)
-34
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 41 41 #include <linux/dma-buf.h> 42 42 #include <linux/dma-fence-array.h> 43 43 #include <linux/pci-p2pdma.h> 44 - #include <linux/pm_runtime.h> 45 - #include "amdgpu_trace.h" 46 44 47 45 /** 48 46 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation ··· 56 58 struct drm_gem_object *obj = dmabuf->priv; 57 59 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 58 60 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 59 - int r; 60 61 61 62 if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) 62 63 attach->peer2peer = false; 63 64 64 - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 65 - trace_amdgpu_runpm_reference_dumps(1, __func__); 66 - if (r < 0) 67 - goto out; 68 - 69 65 return 0; 70 - 71 - out: 72 - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 73 - trace_amdgpu_runpm_reference_dumps(0, __func__); 74 - return r; 75 - } 76 - 77 - /** 78 - * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation 79 - * 80 - * @dmabuf: DMA-buf where we remove the attachment from 81 - * @attach: the attachment to remove 82 - * 83 - * Called when an attachment is removed from the DMA-buf. 84 - */ 85 - static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, 86 - struct dma_buf_attachment *attach) 87 - { 88 - struct drm_gem_object *obj = dmabuf->priv; 89 - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 90 - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 91 - 92 - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 93 - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 94 - trace_amdgpu_runpm_reference_dumps(0, __func__); 95 66 } 96 67 97 68 /** ··· 234 267 235 268 const struct dma_buf_ops amdgpu_dmabuf_ops = { 236 269 .attach = amdgpu_dma_buf_attach, 237 - .detach = amdgpu_dma_buf_detach, 238 270 .pin = amdgpu_dma_buf_pin, 239 271 .unpin = amdgpu_dma_buf_unpin, 240 272 .map_dma_buf = amdgpu_dma_buf_map,
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 181 181 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 182 182 seq, flags | AMDGPU_FENCE_FLAG_INT); 183 183 pm_runtime_get_noresume(adev_to_drm(adev)->dev); 184 - trace_amdgpu_runpm_reference_dumps(1, __func__); 185 184 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 186 185 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 187 186 struct dma_fence *old; ··· 308 309 dma_fence_put(fence); 309 310 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 310 311 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 311 - trace_amdgpu_runpm_reference_dumps(0, __func__); 312 312 } while (last_seq != seq); 313 313 314 314 return true;
+34 -32
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 684 684 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; 685 685 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; 686 686 unsigned int ndw; 687 - signed long r; 687 + int r; 688 688 uint32_t seq; 689 689 690 - if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready || 691 - !down_read_trylock(&adev->reset_domain->sem)) { 690 + /* 691 + * A GPU reset should flush all TLBs anyway, so no need to do 692 + * this while one is ongoing. 693 + */ 694 + if (!down_read_trylock(&adev->reset_domain->sem)) 695 + return 0; 692 696 697 + if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { 693 698 if (adev->gmc.flush_tlb_needs_extra_type_2) 694 699 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, 695 700 2, all_hub, ··· 708 703 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, 709 704 flush_type, all_hub, 710 705 inst); 711 - return 0; 712 - } 706 + r = 0; 707 + } else { 708 + /* 2 dwords flush + 8 dwords fence */ 709 + ndw = kiq->pmf->invalidate_tlbs_size + 8; 713 710 714 - /* 2 dwords flush + 8 dwords fence */ 715 - ndw = kiq->pmf->invalidate_tlbs_size + 8; 711 + if (adev->gmc.flush_tlb_needs_extra_type_2) 712 + ndw += kiq->pmf->invalidate_tlbs_size; 716 713 717 - if (adev->gmc.flush_tlb_needs_extra_type_2) 718 - ndw += kiq->pmf->invalidate_tlbs_size; 714 + if (adev->gmc.flush_tlb_needs_extra_type_0) 715 + ndw += kiq->pmf->invalidate_tlbs_size; 719 716 720 - if (adev->gmc.flush_tlb_needs_extra_type_0) 721 - ndw += kiq->pmf->invalidate_tlbs_size; 717 + spin_lock(&adev->gfx.kiq[inst].ring_lock); 718 + amdgpu_ring_alloc(ring, ndw); 719 + if (adev->gmc.flush_tlb_needs_extra_type_2) 720 + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); 722 721 723 - spin_lock(&adev->gfx.kiq[inst].ring_lock); 724 - amdgpu_ring_alloc(ring, ndw); 725 - if (adev->gmc.flush_tlb_needs_extra_type_2) 726 - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); 722 + if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) 723 + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); 727 724 728 - if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) 729 - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); 725 + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); 726 + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 727 + if (r) { 728 + amdgpu_ring_undo(ring); 729 + spin_unlock(&adev->gfx.kiq[inst].ring_lock); 730 + goto error_unlock_reset; 731 + } 730 732 731 - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); 732 - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 733 - if (r) { 734 - amdgpu_ring_undo(ring); 733 + amdgpu_ring_commit(ring); 735 734 spin_unlock(&adev->gfx.kiq[inst].ring_lock); 736 - goto error_unlock_reset; 735 + if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) { 736 + dev_err(adev->dev, "timeout waiting for kiq fence\n"); 737 + r = -ETIME; 738 + } 737 739 } 738 - 739 - amdgpu_ring_commit(ring); 740 - spin_unlock(&adev->gfx.kiq[inst].ring_lock); 741 - r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); 742 - if (r < 1) { 743 - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); 744 - r = -ETIME; 745 - goto error_unlock_reset; 746 - } 747 - r = 0; 748 740 749 741 error_unlock_reset: 750 742 up_read(&adev->reset_domain->sem);
-15
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 554 554 __entry->value) 555 555 ); 556 556 557 - TRACE_EVENT(amdgpu_runpm_reference_dumps, 558 - TP_PROTO(uint32_t index, const char *func), 559 - TP_ARGS(index, func), 560 - TP_STRUCT__entry( 561 - __field(uint32_t, index) 562 - __string(func, func) 563 - ), 564 - TP_fast_assign( 565 - __entry->index = index; 566 - __assign_str(func); 567 - ), 568 - TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n", 569 - __entry->index, 570 - __get_str(func)) 571 - ); 572 557 #undef AMDGPU_JOB_GET_TIMELINE_NAME 573 558 #endif 574 559
+13 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 4195 4195 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 4196 4196 struct amdgpu_cu_info *cu_info) 4197 4197 { 4198 - int i, j, k, counter, xcc_id, active_cu_number = 0; 4199 - u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 4198 + int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0; 4199 + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp; 4200 4200 unsigned disable_masks[4 * 4]; 4201 + bool is_symmetric_cus; 4201 4202 4202 4203 if (!adev || !cu_info) 4203 4204 return -EINVAL; ··· 4216 4215 4217 4216 mutex_lock(&adev->grbm_idx_mutex); 4218 4217 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 4218 + is_symmetric_cus = true; 4219 4219 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4220 4220 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4221 4221 mask = 1; ··· 4244 4242 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4245 4243 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4246 4244 } 4245 + if (i && is_symmetric_cus && prev_counter != counter) 4246 + is_symmetric_cus = false; 4247 + prev_counter = counter; 4248 + } 4249 + if (is_symmetric_cus) { 4250 + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG); 4251 + tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1); 4252 + tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1); 4253 + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp); 4247 4254 } 4248 4255 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4249 4256 xcc_id);
+48 -28
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 154 154 void *pkt, int size, 155 155 int api_status_off) 156 156 { 157 - int ndw = size / 4; 158 - signed long r; 159 - union MESAPI__MISC *x_pkt = pkt; 160 - struct MES_API_STATUS *api_status; 157 + union MESAPI__QUERY_MES_STATUS mes_status_pkt; 158 + signed long timeout = 3000000; /* 3000 ms */ 161 159 struct amdgpu_device *adev = mes->adev; 162 160 struct amdgpu_ring *ring = &mes->ring; 163 - unsigned long flags; 164 - signed long timeout = 3000000; /* 3000 ms */ 161 + struct MES_API_STATUS *api_status; 162 + union MESAPI__MISC *x_pkt = pkt; 165 163 const char *op_str, *misc_op_str; 166 - u32 fence_offset; 167 - u64 fence_gpu_addr; 168 - u64 *fence_ptr; 164 + unsigned long flags; 165 + u64 status_gpu_addr; 166 + u32 status_offset; 167 + u64 *status_ptr; 168 + signed long r; 169 169 int ret; 170 170 171 171 if (x_pkt->header.opcode >= MES_SCH_API_MAX) ··· 177 177 /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ 178 178 timeout = 15 * 600 * 1000; 179 179 } 180 - BUG_ON(size % 4 != 0); 181 180 182 - ret = amdgpu_device_wb_get(adev, &fence_offset); 181 + ret = amdgpu_device_wb_get(adev, &status_offset); 183 182 if (ret) 184 183 return ret; 185 - fence_gpu_addr = 186 - adev->wb.gpu_addr + (fence_offset * 4); 187 - fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; 188 - *fence_ptr = 0; 184 + 185 + status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); 186 + status_ptr = (u64 *)&adev->wb.wb[status_offset]; 187 + *status_ptr = 0; 189 188 190 189 spin_lock_irqsave(&mes->ring_lock, flags); 191 - if (amdgpu_ring_alloc(ring, ndw)) { 192 - spin_unlock_irqrestore(&mes->ring_lock, flags); 193 - amdgpu_device_wb_free(adev, fence_offset); 194 - return -ENOMEM; 195 - } 190 + r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4); 191 + if (r) 192 + goto error_unlock_free; 196 193 197 194 api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off); 198 - api_status->api_completion_fence_addr = fence_gpu_addr; 195 + api_status->api_completion_fence_addr = status_gpu_addr; 199 196 api_status->api_completion_fence_value = 1; 200 197 201 - amdgpu_ring_write_multiple(ring, pkt, ndw); 198 + amdgpu_ring_write_multiple(ring, pkt, size / 4); 199 + 200 + memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); 201 + mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; 202 + mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; 203 + mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 204 + mes_status_pkt.api_status.api_completion_fence_addr = 205 + ring->fence_drv.gpu_addr; 206 + mes_status_pkt.api_status.api_completion_fence_value = 207 + ++ring->fence_drv.sync_seq; 208 + 209 + amdgpu_ring_write_multiple(ring, &mes_status_pkt, 210 + sizeof(mes_status_pkt) / 4); 211 + 202 212 amdgpu_ring_commit(ring); 203 213 spin_unlock_irqrestore(&mes->ring_lock, flags); 204 214 ··· 216 206 misc_op_str = mes_v11_0_get_misc_op_string(x_pkt); 217 207 218 208 if (misc_op_str) 219 - dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str); 209 + dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, 210 + misc_op_str); 220 211 else if (op_str) 221 212 dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str); 222 213 else 223 - dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode); 214 + dev_dbg(adev->dev, "MES msg=%d was emitted\n", 215 + x_pkt->header.opcode); 224 216 225 - r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout); 226 - amdgpu_device_wb_free(adev, fence_offset); 227 - if (r < 1) { 217 + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout); 218 + if (r < 1 || !*status_ptr) { 228 219 229 220 if (misc_op_str) 230 221 dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n", ··· 240 229 while (halt_if_hws_hang) 241 230 schedule(); 242 231 243 - return -ETIMEDOUT; 232 + r = -ETIMEDOUT; 233 + goto error_wb_free; 244 234 } 245 235 236 + amdgpu_device_wb_free(adev, status_offset); 246 237 return 0; 238 + 239 + error_unlock_free: 240 + spin_unlock_irqrestore(&mes->ring_lock, flags); 241 + 242 + error_wb_free: 243 + amdgpu_device_wb_free(adev, status_offset); 244 + return r; 247 245 } 248 246 249 247 static int convert_to_mes_queue_type(int queue_type)
+5
drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
··· 32 32 #include "mp/mp_14_0_2_sh_mask.h" 33 33 34 34 MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); 35 + MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); 35 36 MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); 37 + MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); 36 38 37 39 /* For large FW files the time to complete can be very long */ 38 40 #define USBC_PD_POLLING_LIMIT_S 240 ··· 66 64 case IP_VERSION(14, 0, 2): 67 65 case IP_VERSION(14, 0, 3): 68 66 err = psp_init_sos_microcode(psp, ucode_prefix); 67 + if (err) 68 + return err; 69 + err = psp_init_ta_microcode(psp, ucode_prefix); 69 70 if (err) 70 71 return err; 71 72 break;
+1 -1
drivers/gpu/drm/amd/display/Kconfig
··· 8 8 depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 9 9 select SND_HDA_COMPONENT if SND_HDA_CORE 10 10 # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 11 - select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG) 11 + select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV)) 12 12 help 13 13 Choose this option if you want to use the new display engine 14 14 support for AMDGPU. This adds required support for Vega and
+10 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 9169 9169 9170 9170 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9171 9171 9172 - if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) 9173 - dc_allow_idle_optimizations(dm->dc, false); 9174 - 9175 9172 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9176 9173 drm_dp_mst_atomic_wait_for_dependencies(state); 9177 9174 ··· 11437 11440 mutex_unlock(&adev->dm.dc_lock); 11438 11441 } 11439 11442 11443 + static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 11444 + { 11445 + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 11446 + dc_exit_ips_for_hw_access(dc); 11447 + } 11448 + 11440 11449 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 11441 11450 u32 value, const char *func_name) 11442 11451 { ··· 11453 11450 return; 11454 11451 } 11455 11452 #endif 11453 + 11454 + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 11456 11455 cgs_write_register(ctx->cgs_device, address, value); 11457 11456 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 11458 11457 } ··· 11477 11472 ASSERT(false); 11478 11473 return 0; 11479 11474 } 11475 + 11476 + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 11480 11477 11481 11478 value = cgs_read_register(ctx->cgs_device, address); 11482 11479
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 177 177 .urgent_latency_pixel_data_only_us = 4.0, 178 178 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 179 179 .urgent_latency_vm_data_only_us = 4.0, 180 - .dram_clock_change_latency_us = 11.72, 180 + .dram_clock_change_latency_us = 34.0, 181 181 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 182 182 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 183 183 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
··· 215 215 .urgent_latency_pixel_data_only_us = 4.0, 216 216 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 217 217 .urgent_latency_vm_data_only_us = 4.0, 218 - .dram_clock_change_latency_us = 11.72, 218 + .dram_clock_change_latency_us = 34, 219 219 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 220 220 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 221 221 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+72
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 1439 1439 } 1440 1440 } 1441 1441 } 1442 + 1443 + static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) 1444 + { 1445 + /* Calculate average pixel count per TU, return false if under ~2.00 to 1446 + * avoid empty TUs. This is only required for DPIA tunneling as empty TUs 1447 + * are legal to generate for native DP links. Assume TU size 64 as there 1448 + * is currently no scenario where it's reprogrammed from HW default. 1449 + * MTPs have no such limitation, so this does not affect MST use cases. 1450 + */ 1451 + unsigned int pix_clk_mhz; 1452 + unsigned int symclk_mhz; 1453 + unsigned int avg_pix_per_tu_x1000; 1454 + unsigned int tu_size_bytes = 64; 1455 + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 1456 + struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; 1457 + const struct dc *dc = pipe_ctx->stream->link->dc; 1458 + 1459 + if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) 1460 + return false; 1461 + 1462 + // Not necessary for MST configurations 1463 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 1464 + return false; 1465 + 1466 + pix_clk_mhz = timing->pix_clk_100hz / 10000; 1467 + 1468 + // If this is true, can't block due to dynamic ODM 1469 + if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz) 1470 + return false; 1471 + 1472 + switch (link_settings->link_rate) { 1473 + case LINK_RATE_LOW: 1474 + symclk_mhz = 162; 1475 + break; 1476 + case LINK_RATE_HIGH: 1477 + symclk_mhz = 270; 1478 + break; 1479 + case LINK_RATE_HIGH2: 1480 + symclk_mhz = 540; 1481 + break; 1482 + case LINK_RATE_HIGH3: 1483 + symclk_mhz = 810; 1484 + break; 1485 + default: 1486 + // We shouldn't be tunneling any other rates, something is wrong 1487 + ASSERT(0); 1488 + return false; 1489 + } 1490 + 1491 + avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes) 1492 + / (symclk_mhz * link_settings->lane_count); 1493 + 1494 + // Add small empirically-decided margin to account for potential jitter 1495 + return (avg_pix_per_tu_x1000 < 2020); 1496 + } 1497 + 1498 + bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) 1499 + { 1500 + struct dc *dc = pipe_ctx->stream->ctx->dc; 1501 + 1502 + if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) 1503 + return false; 1504 + 1505 + if (should_avoid_empty_tu(pipe_ctx)) 1506 + return false; 1507 + 1508 + if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && 1509 + dc->debug.enable_dp_dig_pixel_rate_div_policy) 1510 + return true; 1511 + 1512 + return false; 1513 + }
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
··· 95 95 void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, 96 96 int num_pipes, uint32_t v_total_min, uint32_t v_total_max); 97 97 98 + bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); 99 + 98 100 #endif /* __DC_HWSS_DCN35_H__ */
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
··· 158 158 .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, 159 159 .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, 160 160 .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, 161 - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, 161 + .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, 162 162 .dsc_pg_control = dcn35_dsc_pg_control, 163 163 .dsc_pg_status = dcn32_dsc_pg_status, 164 164 .enable_plane = dcn35_enable_plane,
+2
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
··· 164 164 165 165 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 166 166 if (table[i].ulSupportedSCLK != 0) { 167 + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) 168 + continue; 167 169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 168 170 table[i].usVoltageID; 169 171 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+4
drivers/gpu/drm/i915/display/intel_dp.c
··· 442 442 struct intel_encoder *encoder = &intel_dig_port->base; 443 443 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 444 444 445 + /* eDP MSO is not compatible with joiner */ 446 + if (intel_dp->mso_link_count) 447 + return false; 448 + 445 449 return DISPLAY_VER(dev_priv) >= 12 || 446 450 (DISPLAY_VER(dev_priv) == 11 && 447 451 encoder->port != PORT_A);
+2
drivers/gpu/drm/radeon/sumo_dpm.c
··· 1619 1619 1620 1620 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 1621 1621 if (table[i].ulSupportedSCLK != 0) { 1622 + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) 1623 + continue; 1622 1624 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 1623 1625 table[i].usVoltageID; 1624 1626 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+1 -1
drivers/gpu/drm/vmwgfx/Kconfig
··· 2 2 config DRM_VMWGFX 3 3 tristate "DRM driver for VMware Virtual GPU" 4 4 depends on DRM && PCI && MMU 5 - depends on X86 || ARM64 5 + depends on (X86 && HYPERVISOR_GUEST) || ARM64 6 6 select DRM_TTM 7 7 select DRM_TTM_HELPER 8 8 select MAPPING_DIRTY_HELPERS
+2 -2
drivers/gpu/drm/xe/xe_guc.c
··· 631 631 struct xe_device *xe = guc_to_xe(guc); 632 632 int err; 633 633 634 - guc_enable_irq(guc); 635 - 636 634 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) { 637 635 struct xe_gt *gt = guc_to_gt(guc); 638 636 struct xe_tile *tile = gt_to_tile(gt); ··· 638 640 err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); 639 641 if (err) 640 642 return err; 643 + } else { 644 + guc_enable_irq(guc); 641 645 } 642 646 643 647 xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
+18 -19
drivers/hv/hv.c
··· 45 45 * This involves a hypercall. 46 46 */ 47 47 int hv_post_message(union hv_connection_id connection_id, 48 - enum hv_message_type message_type, 49 - void *payload, size_t payload_size) 48 + enum hv_message_type message_type, 49 + void *payload, size_t payload_size) 50 50 { 51 51 struct hv_input_post_message *aligned_msg; 52 52 unsigned long flags; ··· 86 86 status = HV_STATUS_INVALID_PARAMETER; 87 87 } else { 88 88 status = hv_do_hypercall(HVCALL_POST_MESSAGE, 89 - aligned_msg, NULL); 89 + aligned_msg, NULL); 90 90 } 91 91 92 92 local_irq_restore(flags); ··· 111 111 112 112 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), 113 113 GFP_KERNEL); 114 - if (hv_context.hv_numa_map == NULL) { 114 + if (!hv_context.hv_numa_map) { 115 115 pr_err("Unable to allocate NUMA map\n"); 116 116 goto err; 117 117 } ··· 120 120 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 121 121 122 122 tasklet_init(&hv_cpu->msg_dpc, 123 - vmbus_on_msg_dpc, (unsigned long) hv_cpu); 123 + vmbus_on_msg_dpc, (unsigned long)hv_cpu); 124 124 125 125 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { 126 126 hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC); 127 - if (hv_cpu->post_msg_page == NULL) { 127 + if (!hv_cpu->post_msg_page) { 128 128 pr_err("Unable to allocate post msg page\n"); 129 129 goto err; 130 130 } ··· 147 147 if (!ms_hyperv.paravisor_present && !hv_root_partition) { 148 148 hv_cpu->synic_message_page = 149 149 (void *)get_zeroed_page(GFP_ATOMIC); 150 - if (hv_cpu->synic_message_page == NULL) { 150 + if (!hv_cpu->synic_message_page) { 151 151 pr_err("Unable to allocate SYNIC message page\n"); 152 152 goto err; 153 153 } 154 154 155 155 hv_cpu->synic_event_page = 156 156 (void *)get_zeroed_page(GFP_ATOMIC); 157 - if (hv_cpu->synic_event_page == NULL) { 157 + if (!hv_cpu->synic_event_page) { 158 158 pr_err("Unable to allocate SYNIC event page\n"); 159 159 160 160 free_page((unsigned long)hv_cpu->synic_message_page); ··· 203 203 return ret; 204 204 } 205 205 206 - 207 206 void hv_synic_free(void) 208 207 { 209 208 int cpu, ret; 210 209 211 210 for_each_present_cpu(cpu) { 212 - struct hv_per_cpu_context *hv_cpu 213 - = per_cpu_ptr(hv_context.cpu_context, cpu); 211 + struct hv_per_cpu_context *hv_cpu = 212 + per_cpu_ptr(hv_context.cpu_context, cpu); 214 213 215 214 /* It's better to leak the page if the encryption fails. */ 216 215 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { ··· 261 262 */ 262 263 void hv_synic_enable_regs(unsigned int cpu) 263 264 { 264 - struct hv_per_cpu_context *hv_cpu 265 - = per_cpu_ptr(hv_context.cpu_context, cpu); 265 + struct hv_per_cpu_context *hv_cpu = 266 + per_cpu_ptr(hv_context.cpu_context, cpu); 266 267 union hv_synic_simp simp; 267 268 union hv_synic_siefp siefp; 268 269 union hv_synic_sint shared_sint; ··· 276 277 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 277 278 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & 278 279 ~ms_hyperv.shared_gpa_boundary; 279 - hv_cpu->synic_message_page 280 - = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 280 + hv_cpu->synic_message_page = 281 + (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 281 282 if (!hv_cpu->synic_message_page) 282 283 pr_err("Fail to map synic message page.\n"); 283 284 } else { ··· 295 296 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 296 297 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & 297 298 ~ms_hyperv.shared_gpa_boundary; 298 - hv_cpu->synic_event_page 299 - = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 299 + hv_cpu->synic_event_page = 300 + (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 300 301 if (!hv_cpu->synic_event_page) 301 302 pr_err("Fail to map synic event page.\n"); 302 303 } else { ··· 347 348 */ 348 349 void hv_synic_disable_regs(unsigned int cpu) 349 350 { 350 - struct hv_per_cpu_context *hv_cpu 351 - = per_cpu_ptr(hv_context.cpu_context, cpu); 351 + struct hv_per_cpu_context *hv_cpu = 352 + per_cpu_ptr(hv_context.cpu_context, cpu); 352 353 union hv_synic_sint shared_sint; 353 354 union hv_synic_simp simp; 354 355 union hv_synic_siefp siefp;
+80 -110
drivers/hv/hv_balloon.c
··· 25 25 #include <linux/notifier.h> 26 26 #include <linux/percpu_counter.h> 27 27 #include <linux/page_reporting.h> 28 + #include <linux/sizes.h> 28 29 29 30 #include <linux/hyperv.h> 30 31 #include <asm/hyperv-tlfs.h> ··· 41 40 * 42 41 * Begin protocol definitions. 43 42 */ 44 - 45 - 46 43 47 44 /* 48 45 * Protocol versions. The low word is the minor version, the high word the major ··· 70 71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 71 72 }; 72 73 73 - 74 - 75 74 /* 76 75 * Message Types 77 76 */ ··· 98 101 DM_VERSION_1_MAX = 12 99 102 }; 100 103 101 - 102 104 /* 103 105 * Structures defining the dynamic memory management 104 106 * protocol. ··· 110 114 }; 111 115 __u32 version; 112 116 } __packed; 113 - 114 117 115 118 union dm_caps { 116 119 struct { ··· 143 148 __u64 page_range; 144 149 } __packed; 145 150 146 - 147 - 148 151 /* 149 152 * The header for all dynamic memory messages: 150 153 * ··· 166 173 struct dm_header hdr; 167 174 __u8 data[]; /* enclosed message */ 168 175 } __packed; 169 - 170 176 171 177 /* 172 178 * Specific message types supporting the dynamic memory protocol. ··· 263 271 __u32 io_diff; 264 272 } __packed; 265 273 266 - 267 274 /* 268 275 * Message to ask the guest to allocate memory - balloon up message. 269 276 * This message is sent from the host to the guest. The guest may not be ··· 277 286 __u32 reservedz; 278 287 } __packed; 279 288 280 - 281 289 /* 282 290 * Balloon response message; this message is sent from the guest 283 291 * to the host in response to the balloon message. 284 292 * 285 293 * reservedz: Reserved; must be set to zero. 286 294 * more_pages: If FALSE, this is the last message of the transaction. 287 - * if TRUE there will atleast one more message from the guest. 295 + * if TRUE there will be at least one more message from the guest. 288 296 * 289 297 * range_count: The number of ranges in the range array. 290 298 * ··· 304 314 * to the guest to give guest more memory. 305 315 * 306 316 * more_pages: If FALSE, this is the last message of the transaction. 307 - * if TRUE there will atleast one more message from the guest. 317 + * if TRUE there will be at least one more message from the guest. 308 318 * 309 319 * reservedz: Reserved; must be set to zero. 310 320 * ··· 331 341 struct dm_unballoon_response { 332 342 struct dm_header hdr; 333 343 } __packed; 334 - 335 344 336 345 /* 337 346 * Hot add request message. Message sent from the host to the guest. ··· 379 390 MAX_INFO_TYPE 380 391 }; 381 392 382 - 383 393 /* 384 394 * Header for the information message. 385 395 */ ··· 413 425 * The range start_pfn : end_pfn specifies the range 414 426 * that the host has asked us to hot add. The range 415 427 * start_pfn : ha_end_pfn specifies the range that we have 416 - * currently hot added. We hot add in multiples of 128M 417 - * chunks; it is possible that we may not be able to bring 418 - * online all the pages in the region. The range 428 + * currently hot added. We hot add in chunks equal to the 429 + * memory block size; it is possible that we may not be able 430 + * to bring online all the pages in the region. The range 419 431 * covered_start_pfn:covered_end_pfn defines the pages that can 420 - * be brough online. 432 + * be brought online. 421 433 */ 422 434 423 435 struct hv_hotadd_state { ··· 468 480 469 481 static int hv_hypercall_multi_failure; 470 482 471 - module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 483 + module_param(hot_add, bool, 0644); 472 484 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 473 485 474 - module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); 486 + module_param(pressure_report_delay, uint, 0644); 475 487 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); 476 488 static atomic_t trans_id = ATOMIC_INIT(0); 477 489 ··· 490 502 DM_INIT_ERROR 491 503 }; 492 504 493 - 494 505 static __u8 recv_buffer[HV_HYP_PAGE_SIZE]; 495 506 static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE]; 507 + 508 + static unsigned long ha_pages_in_chunk; 509 + #define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT) 510 + 496 511 #define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE) 497 - #define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE) 498 512 499 513 struct hv_dynmem_device { 500 514 struct hv_device *dev; ··· 585 595 struct hv_hotadd_gap *gap; 586 596 587 597 /* The page is not backed. */ 588 - if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn)) 598 + if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn) 589 599 return false; 590 600 591 601 /* Check for gaps. */ 592 602 list_for_each_entry(gap, &has->gap_list, list) { 593 - if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) 603 + if (pfn >= gap->start_pfn && pfn < gap->end_pfn) 594 604 return false; 595 605 } 596 606 ··· 714 724 unsigned long processed_pfn; 715 725 unsigned long total_pfn = pfn_count; 716 726 717 - for (i = 0; i < (size/HA_CHUNK); i++) { 718 - start_pfn = start + (i * HA_CHUNK); 727 + for (i = 0; i < (size/ha_pages_in_chunk); i++) { 728 + start_pfn = start + (i * ha_pages_in_chunk); 719 729 720 730 scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { 721 - has->ha_end_pfn += HA_CHUNK; 722 - 723 - if (total_pfn > HA_CHUNK) { 724 - processed_pfn = HA_CHUNK; 725 - total_pfn -= HA_CHUNK; 726 - } else { 727 - processed_pfn = total_pfn; 728 - total_pfn = 0; 729 - } 730 - 731 - has->covered_end_pfn += processed_pfn; 731 + has->ha_end_pfn += ha_pages_in_chunk; 732 + processed_pfn = umin(total_pfn, ha_pages_in_chunk); 733 + total_pfn -= processed_pfn; 734 + has->covered_end_pfn += processed_pfn; 732 735 } 733 736 734 737 reinit_completion(&dm_device.ol_waitevent); 735 738 736 739 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); 737 740 ret = add_memory(nid, PFN_PHYS((start_pfn)), 738 - (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE); 741 + HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE); 739 742 740 743 if (ret) { 741 744 pr_err("hot_add memory failed error is %d\n", ret); ··· 743 760 do_hot_add = false; 744 761 } 745 762 scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { 746 - has->ha_end_pfn -= HA_CHUNK; 763 + has->ha_end_pfn -= ha_pages_in_chunk; 747 764 has->covered_end_pfn -= processed_pfn; 748 765 } 749 766 break; ··· 770 787 guard(spinlock_irqsave)(&dm_device.ha_lock); 771 788 list_for_each_entry(has, &dm_device.ha_region_list, list) { 772 789 /* The page belongs to a different HAS. */ 773 - if ((pfn < has->start_pfn) || 774 - (pfn + (1UL << order) > has->end_pfn)) 790 + if (pfn < has->start_pfn || 791 + (pfn + (1UL << order) > has->end_pfn)) 775 792 continue; 776 793 777 794 hv_bring_pgs_online(has, pfn, 1UL << order); ··· 783 800 { 784 801 struct hv_hotadd_state *has; 785 802 struct hv_hotadd_gap *gap; 786 - unsigned long residual, new_inc; 803 + unsigned long residual; 787 804 int ret = 0; 788 805 789 806 guard(spinlock_irqsave)(&dm_device.ha_lock); ··· 819 836 * our current limit; extend it. 820 837 */ 821 838 if ((start_pfn + pfn_cnt) > has->end_pfn) { 839 + /* Extend the region by multiples of ha_pages_in_chunk */ 822 840 residual = (start_pfn + pfn_cnt - has->end_pfn); 823 - /* 824 - * Extend the region by multiples of HA_CHUNK. 825 - */ 826 - new_inc = (residual / HA_CHUNK) * HA_CHUNK; 827 - if (residual % HA_CHUNK) 828 - new_inc += HA_CHUNK; 829 - 830 - has->end_pfn += new_inc; 841 + has->end_pfn += ALIGN(residual, ha_pages_in_chunk); 831 842 } 832 843 833 844 ret = 1; ··· 832 855 } 833 856 834 857 static unsigned long handle_pg_range(unsigned long pg_start, 835 - unsigned long pg_count) 858 + unsigned long pg_count) 836 859 { 837 860 unsigned long start_pfn = pg_start; 838 861 unsigned long pfn_cnt = pg_count; ··· 843 866 unsigned long res = 0, flags; 844 867 845 868 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, 846 - pg_start); 869 + pg_start); 847 870 848 871 spin_lock_irqsave(&dm_device.ha_lock, flags); 849 872 list_for_each_entry(has, &dm_device.ha_region_list, list) { ··· 879 902 if (start_pfn > has->start_pfn && 880 903 online_section_nr(pfn_to_section_nr(start_pfn))) 881 904 hv_bring_pgs_online(has, start_pfn, pgs_ol); 882 - 883 905 } 884 906 885 - if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { 907 + if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) { 886 908 /* 887 909 * We have some residual hot add range 888 910 * that needs to be hot added; hot add 889 911 * it now. Hot add a multiple of 890 - * HA_CHUNK that fully covers the pages 912 + * ha_pages_in_chunk that fully covers the pages 891 913 * we have. 892 914 */ 893 915 size = (has->end_pfn - has->ha_end_pfn); 894 916 if (pfn_cnt <= size) { 895 - size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); 896 - if (pfn_cnt % HA_CHUNK) 897 - size += HA_CHUNK; 917 + size = ALIGN(pfn_cnt, ha_pages_in_chunk); 898 918 } else { 899 919 pfn_cnt = size; 900 920 } ··· 984 1010 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; 985 1011 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; 986 1012 987 - if ((rg_start == 0) && (!dm->host_specified_ha_region)) { 988 - unsigned long region_size; 989 - unsigned long region_start; 990 - 1013 + if (rg_start == 0 && !dm->host_specified_ha_region) { 991 1014 /* 992 1015 * The host has not specified the hot-add region. 993 1016 * Based on the hot-add page range being specified, ··· 992 1021 * that need to be hot-added while ensuring the alignment 993 1022 * and size requirements of Linux as it relates to hot-add. 994 1023 */ 995 - region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; 996 - if (pfn_cnt % HA_CHUNK) 997 - region_size += HA_CHUNK; 998 - 999 - region_start = (pg_start / HA_CHUNK) * HA_CHUNK; 1000 - 1001 - rg_start = region_start; 1002 - rg_sz = region_size; 1024 + rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk); 1025 + rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk); 1003 1026 } 1004 1027 1005 1028 if (do_hot_add) 1006 1029 resp.page_count = process_hot_add(pg_start, pfn_cnt, 1007 - rg_start, rg_sz); 1030 + rg_start, rg_sz); 1008 1031 1009 1032 dm->num_pages_added += resp.page_count; 1010 1033 #endif ··· 1176 1211 sizeof(struct dm_status), 1177 1212 (unsigned long)NULL, 1178 1213 VM_PKT_DATA_INBAND, 0); 1179 - 1180 1214 } 1181 1215 1182 1216 static void free_balloon_pages(struct hv_dynmem_device *dm, 1183 - union dm_mem_page_range *range_array) 1217 + union dm_mem_page_range *range_array) 1184 1218 { 1185 1219 int num_pages = range_array->finfo.page_cnt; 1186 1220 __u64 start_frame = range_array->finfo.start_page; ··· 1194 1230 adjust_managed_page_count(pg, 1); 1195 1231 } 1196 1232 } 1197 - 1198 - 1199 1233 1200 1234 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, 1201 1235 unsigned int num_pages, ··· 1240 1278 page_to_pfn(pg); 1241 1279 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; 1242 1280 bl_resp->hdr.size += sizeof(union dm_mem_page_range); 1243 - 1244 1281 } 1245 1282 1246 1283 return i * alloc_unit; ··· 1293 1332 1294 1333 if (num_ballooned == 0 || num_ballooned == num_pages) { 1295 1334 pr_debug("Ballooned %u out of %u requested pages.\n", 1296 - num_pages, dm_device.balloon_wrk.num_pages); 1335 + num_pages, dm_device.balloon_wrk.num_pages); 1297 1336 1298 1337 bl_resp->more_pages = 0; 1299 1338 done = true; ··· 1327 1366 1328 1367 for (i = 0; i < bl_resp->range_count; i++) 1329 1368 free_balloon_pages(&dm_device, 1330 - &bl_resp->range_array[i]); 1369 + &bl_resp->range_array[i]); 1331 1370 1332 1371 done = true; 1333 1372 } 1334 1373 } 1335 - 1336 1374 } 1337 1375 1338 1376 static void balloon_down(struct hv_dynmem_device *dm, 1339 - struct dm_unballoon_request *req) 1377 + struct dm_unballoon_request *req) 1340 1378 { 1341 1379 union dm_mem_page_range *range_array = req->range_array; 1342 1380 int range_count = req->range_count; ··· 1349 1389 } 1350 1390 1351 1391 pr_debug("Freed %u ballooned pages.\n", 1352 - prev_pages_ballooned - dm->num_pages_ballooned); 1392 + prev_pages_ballooned - dm->num_pages_ballooned); 1353 1393 1354 1394 if (req->more_pages == 1) 1355 1395 return; ··· 1374 1414 struct hv_dynmem_device *dm = dm_dev; 1375 1415 1376 1416 while (!kthread_should_stop()) { 1377 - wait_for_completion_interruptible_timeout( 1378 - &dm_device.config_event, 1*HZ); 1417 + wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ); 1379 1418 /* 1380 1419 * The host expects us to post information on the memory 1381 1420 * pressure every second. ··· 1398 1439 return 0; 1399 1440 } 1400 1441 1401 - 1402 1442 static void version_resp(struct hv_dynmem_device *dm, 1403 - struct dm_version_response *vresp) 1443 + struct dm_version_response *vresp) 1404 1444 { 1405 1445 struct dm_version_request version_req; 1406 1446 int ret; ··· 1460 1502 } 1461 1503 1462 1504 static void cap_resp(struct hv_dynmem_device *dm, 1463 - struct dm_capabilities_resp_msg *cap_resp) 1505 + struct dm_capabilities_resp_msg *cap_resp) 1464 1506 { 1465 1507 if (!cap_resp->is_accepted) { 1466 1508 pr_err("Capabilities not accepted by host\n"); ··· 1493 1535 switch (dm_hdr->type) { 1494 1536 case DM_VERSION_RESPONSE: 1495 1537 version_resp(dm, 1496 - (struct dm_version_response *)dm_msg); 1538 + (struct dm_version_response *)dm_msg); 1497 1539 break; 1498 1540 1499 1541 case DM_CAPABILITIES_RESPONSE: ··· 1523 1565 1524 1566 dm->state = DM_BALLOON_DOWN; 1525 1567 balloon_down(dm, 1526 - (struct dm_unballoon_request *)recv_buffer); 1568 + (struct dm_unballoon_request *)recv_buffer); 1527 1569 break; 1528 1570 1529 1571 case DM_MEM_HOT_ADD_REQUEST: ··· 1561 1603 1562 1604 default: 1563 1605 pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); 1564 - 1565 1606 } 1566 1607 } 1567 - 1568 1608 } 1569 1609 1570 1610 #define HV_LARGE_REPORTING_ORDER 9 1571 1611 #define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \ 1572 1612 HV_LARGE_REPORTING_ORDER) 1573 1613 static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, 1574 - struct scatterlist *sgl, unsigned int nents) 1614 + struct scatterlist *sgl, unsigned int nents) 1575 1615 { 1576 1616 unsigned long flags; 1577 1617 struct hv_memory_hint *hint; ··· 1604 1648 */ 1605 1649 1606 1650 /* page reporting for pages 2MB or higher */ 1607 - if (order >= HV_LARGE_REPORTING_ORDER ) { 1651 + if (order >= HV_LARGE_REPORTING_ORDER) { 1608 1652 range->page.largepage = 1; 1609 1653 range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; 1610 1654 range->base_large_pfn = page_to_hvpfn( ··· 1618 1662 range->page.additional_pages = 1619 1663 (sg->length / HV_HYP_PAGE_SIZE) - 1; 1620 1664 } 1621 - 1622 1665 } 1623 1666 1624 1667 status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0, 1625 1668 hint, NULL); 1626 1669 local_irq_restore(flags); 1627 1670 if (!hv_result_success(status)) { 1628 - 1629 1671 pr_err("Cold memory discard hypercall failed with status %llx\n", 1630 - status); 1672 + status); 1631 1673 if (hv_hypercall_multi_failure > 0) 1632 1674 hv_hypercall_multi_failure++; 1633 1675 1634 1676 if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) { 1635 1677 pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n"); 1636 1678 pr_err("Defaulting to page_reporting_order %d\n", 1637 - pageblock_order); 1679 + pageblock_order); 1638 1680 page_reporting_order = pageblock_order; 1639 1681 hv_hypercall_multi_failure++; 1640 1682 return -EINVAL; ··· 1666 1712 pr_err("Failed to enable cold memory discard: %d\n", ret); 1667 1713 } else { 1668 1714 pr_info("Cold memory discard hint enabled with order %d\n", 1669 - page_reporting_order); 1715 + page_reporting_order); 1670 1716 } 1671 1717 } 1672 1718 ··· 1749 1795 if (ret) 1750 1796 goto out; 1751 1797 1752 - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1798 + t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ); 1753 1799 if (t == 0) { 1754 1800 ret = -ETIMEDOUT; 1755 1801 goto out; ··· 1785 1831 cap_msg.caps.cap_bits.hot_add = hot_add_enabled(); 1786 1832 1787 1833 /* 1788 - * Specify our alignment requirements as it relates 1789 - * memory hot-add. Specify 128MB alignment. 1834 + * Specify our alignment requirements for memory hot-add. The value is 1835 + * the log base 2 of the number of megabytes in a chunk. For example, 1836 + * with 256 MiB chunks, the value is 8. The number of MiB in a chunk 1837 + * must be a power of 2. 1790 1838 */ 1791 - cap_msg.caps.cap_bits.hot_add_alignment = 7; 1839 + cap_msg.caps.cap_bits.hot_add_alignment = 1840 + ilog2(HA_BYTES_IN_CHUNK / SZ_1M); 1792 1841 1793 1842 /* 1794 1843 * Currently the host does not use these ··· 1807 1850 if (ret) 1808 1851 goto out; 1809 1852 1810 - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1853 + t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ); 1811 1854 if (t == 0) { 1812 1855 ret = -ETIMEDOUT; 1813 1856 goto out; ··· 1848 1891 char *sname; 1849 1892 1850 1893 seq_printf(f, "%-22s: %u.%u\n", "host_version", 1851 - DYNMEM_MAJOR_VERSION(dm->version), 1852 - DYNMEM_MINOR_VERSION(dm->version)); 1894 + DYNMEM_MAJOR_VERSION(dm->version), 1895 + DYNMEM_MINOR_VERSION(dm->version)); 1853 1896 1854 1897 seq_printf(f, "%-22s:", "capabilities"); 1855 1898 if (ballooning_enabled()) ··· 1898 1941 seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned); 1899 1942 1900 1943 seq_printf(f, "%-22s: %lu\n", "total_pages_committed", 1901 - get_pages_committed(dm)); 1944 + get_pages_committed(dm)); 1902 1945 1903 1946 seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count", 1904 - dm->max_dynamic_page_count); 1947 + dm->max_dynamic_page_count); 1905 1948 1906 1949 return 0; 1907 1950 } ··· 1911 1954 static void hv_balloon_debugfs_init(struct hv_dynmem_device *b) 1912 1955 { 1913 1956 debugfs_create_file("hv-balloon", 0444, NULL, b, 1914 - &hv_balloon_debug_fops); 1957 + &hv_balloon_debug_fops); 1915 1958 } 1916 1959 1917 1960 static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) ··· 1941 1984 hot_add = false; 1942 1985 1943 1986 #ifdef CONFIG_MEMORY_HOTPLUG 1987 + /* 1988 + * Hot-add must operate in chunks that are of size equal to the 1989 + * memory block size because that's what the core add_memory() 1990 + * interface requires. The Hyper-V interface requires that the memory 1991 + * block size be a power of 2, which is guaranteed by the check in 1992 + * memory_dev_init(). 1993 + */ 1994 + ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE; 1944 1995 do_hot_add = hot_add; 1945 1996 #else 1997 + /* 1998 + * Without MEMORY_HOTPLUG, the guest returns a failure status for all 1999 + * hot add requests from Hyper-V, and the chunk size is used only to 2000 + * specify alignment to Hyper-V as required by the host/guest protocol. 2001 + * Somewhat arbitrarily, use 128 MiB. 2002 + */ 2003 + ha_pages_in_chunk = SZ_128M / PAGE_SIZE; 1946 2004 do_hot_add = false; 1947 2005 #endif 1948 2006 dm_device.dev = dev; ··· 2069 2097 tasklet_enable(&hv_dev->channel->callback_event); 2070 2098 2071 2099 return 0; 2072 - 2073 2100 } 2074 2101 2075 2102 static int balloon_resume(struct hv_device *dev) ··· 2127 2156 2128 2157 static int __init init_balloon_drv(void) 2129 2158 { 2130 - 2131 2159 return vmbus_driver_register(&balloon_drv); 2132 2160 } 2133 2161
+1 -1
drivers/i2c/busses/i2c-ocores.c
··· 431 431 oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); 432 432 433 433 /* Init the device */ 434 - oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); 435 434 oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN); 435 + oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); 436 436 437 437 return 0; 438 438 }
+1 -3
drivers/infiniband/hw/bnxt_re/bnxt_re.h
··· 107 107 struct bnxt_re_sqp_entries *sqp_tbl; 108 108 }; 109 109 110 - #define BNXT_RE_MIN_MSIX 2 111 - #define BNXT_RE_MAX_MSIX 9 112 110 #define BNXT_RE_AEQ_IDX 0 113 111 #define BNXT_RE_NQ_IDX 1 114 112 #define BNXT_RE_GEN_P5_MAX_VF 64 ··· 166 168 struct bnxt_qplib_rcfw rcfw; 167 169 168 170 /* NQ */ 169 - struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; 171 + struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; 170 172 171 173 /* Device Resources */ 172 174 struct bnxt_qplib_dev_attr dev_attr;
+1
drivers/infiniband/hw/mana/mr.c
··· 112 112 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", 113 113 start, iova, length, access_flags); 114 114 115 + access_flags &= ~IB_ACCESS_OPTIONAL; 115 116 if (access_flags & ~VALID_MR_FLAGS) 116 117 return ERR_PTR(-EINVAL); 117 118
+2 -2
drivers/infiniband/hw/mlx5/main.c
··· 3759 3759 spin_lock_init(&dev->dm.lock); 3760 3760 dev->dm.dev = mdev; 3761 3761 return 0; 3762 - err: 3763 - mlx5r_macsec_dealloc_gids(dev); 3764 3762 err_mp: 3765 3763 mlx5_ib_cleanup_multiport_master(dev); 3764 + err: 3765 + mlx5r_macsec_dealloc_gids(dev); 3766 3766 return err; 3767 3767 } 3768 3768
+4 -4
drivers/infiniband/hw/mlx5/mr.c
··· 246 246 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); 247 247 MLX5_SET(mkc, mkc, access_mode_4_2, 248 248 (ent->rb_key.access_mode >> 2) & 0x7); 249 + MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); 249 250 250 251 MLX5_SET(mkc, mkc, translations_octword_size, 251 252 get_mkc_octo_size(ent->rb_key.access_mode, ··· 642 641 new = &((*new)->rb_left); 643 642 if (cmp < 0) 644 643 new = &((*new)->rb_right); 645 - if (cmp == 0) { 646 - mutex_unlock(&cache->rb_lock); 644 + if (cmp == 0) 647 645 return -EEXIST; 648 - } 649 646 } 650 647 651 648 /* Add new node and rebalance tree. */ ··· 718 719 } 719 720 mr->mmkey.cache_ent = ent; 720 721 mr->mmkey.type = MLX5_MKEY_MR; 722 + mr->mmkey.rb_key = ent->rb_key; 723 + mr->mmkey.cacheable = true; 721 724 init_waitqueue_head(&mr->mmkey.wait); 722 725 return mr; 723 726 } ··· 1170 1169 mr->ibmr.pd = pd; 1171 1170 mr->umem = umem; 1172 1171 mr->page_shift = order_base_2(page_size); 1173 - mr->mmkey.cacheable = true; 1174 1172 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1175 1173 1176 1174 return mr;
+8 -5
drivers/infiniband/hw/mlx5/srq.c
··· 199 199 int err; 200 200 struct mlx5_srq_attr in = {}; 201 201 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 202 + __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / 203 + sizeof(struct mlx5_wqe_data_seg); 202 204 203 205 if (init_attr->srq_type != IB_SRQT_BASIC && 204 206 init_attr->srq_type != IB_SRQT_XRC && 205 207 init_attr->srq_type != IB_SRQT_TM) 206 208 return -EOPNOTSUPP; 207 209 208 - /* Sanity check SRQ size before proceeding */ 209 - if (init_attr->attr.max_wr >= max_srq_wqes) { 210 - mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 211 - init_attr->attr.max_wr, 212 - max_srq_wqes); 210 + /* Sanity check SRQ and sge size before proceeding */ 211 + if (init_attr->attr.max_wr >= max_srq_wqes || 212 + init_attr->attr.max_sge > max_sge_sz) { 213 + mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n", 214 + init_attr->attr.max_wr, max_srq_wqes, 215 + init_attr->attr.max_sge, max_sge_sz); 213 216 return -EINVAL; 214 217 } 215 218
+13
drivers/infiniband/sw/rxe/rxe_resp.c
··· 344 344 * receive buffer later. For rmda operations additional 345 345 * length checks are performed in check_rkey. 346 346 */ 347 + if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { 348 + unsigned int payload = payload_size(pkt); 349 + unsigned int recv_buffer_len = 0; 350 + int i; 351 + 352 + for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) 353 + recv_buffer_len += qp->resp.wqe->dma.sge[i].length; 354 + if (payload + 40 > recv_buffer_len) { 355 + rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); 356 + return RESPST_ERR_LENGTH; 357 + } 358 + } 359 + 347 360 if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || 348 361 (qp_type(qp) == IB_QPT_UC))) { 349 362 unsigned int mtu = qp->mtu;
+1 -1
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 812 812 int i; 813 813 814 814 for (i = 0; i < ibwr->num_sge; i++, sge++) { 815 - memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); 815 + memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length); 816 816 p += sge->length; 817 817 } 818 818 }
+1
drivers/mfd/axp20x.c
··· 210 210 211 211 static const struct regmap_range axp717_writeable_ranges[] = { 212 212 regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN), 213 + regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE), 213 214 regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL), 214 215 }; 215 216
+5 -3
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 732 732 return NETDEV_TX_OK; 733 733 734 734 tx_dma_error: 735 - if (BNXT_TX_PTP_IS_SET(lflags)) 736 - atomic_inc(&bp->ptp_cfg->tx_avail); 737 - 738 735 last_frag = i; 739 736 740 737 /* start back at beginning and unmap skb */ ··· 753 756 tx_free: 754 757 dev_kfree_skb_any(skb); 755 758 tx_kick_pending: 759 + if (BNXT_TX_PTP_IS_SET(lflags)) 760 + atomic_inc(&bp->ptp_cfg->tx_avail); 756 761 if (txr->kick_pending) 757 762 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 758 763 txr->tx_buf_ring[txr->tx_prod].skb = NULL; ··· 8995 8996 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 8996 8997 #endif 8997 8998 } 8999 + bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); 8998 9000 8999 9001 hwrm_func_qcaps_exit: 9000 9002 hwrm_req_drop(bp, req); ··· 15363 15363 dev->priv_flags |= IFF_UNICAST_FLT; 15364 15364 15365 15365 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 15366 + if (bp->tso_max_segs) 15367 + netif_set_tso_max_segs(dev, bp->tso_max_segs); 15366 15368 15367 15369 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 15368 15370 NETDEV_XDP_ACT_RX_SG;
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2318 2318 u8 rss_hash_key_updated:1; 2319 2319 2320 2320 u16 max_mtu; 2321 + u16 tso_max_segs; 2321 2322 u8 max_tc; 2322 2323 u8 max_lltc; /* lossless TCs */ 2323 2324 struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+178 -133
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
··· 2 2 * 3 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 4 * Copyright (c) 2014-2018 Broadcom Limited 5 - * Copyright (c) 2018-2023 Broadcom Inc. 5 + * Copyright (c) 2018-2024 Broadcom Inc. 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License as published by ··· 500 500 #define HWRM_TFC_IF_TBL_GET 0x399UL 501 501 #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL 502 502 #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL 503 + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL 504 + #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL 505 + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL 503 506 #define HWRM_SV 0x400UL 507 + #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL 504 508 #define HWRM_DBG_READ_DIRECT 0xff10UL 505 509 #define HWRM_DBG_READ_INDIRECT 0xff11UL 506 510 #define HWRM_DBG_WRITE_DIRECT 0xff12UL ··· 613 609 #define HWRM_VERSION_MAJOR 1 614 610 #define HWRM_VERSION_MINOR 10 615 611 #define HWRM_VERSION_UPDATE 3 616 - #define HWRM_VERSION_RSVD 39 617 - #define HWRM_VERSION_STR "1.10.3.39" 612 + #define HWRM_VERSION_RSVD 44 613 + #define HWRM_VERSION_STR "1.10.3.44" 618 614 619 615 /* hwrm_ver_get_input (size:192b/24B) */ 620 616 struct hwrm_ver_get_input { ··· 668 664 #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL 669 665 #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL 670 666 #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL 667 + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL 671 668 u8 roce_fw_maj_8b; 672 669 u8 roce_fw_min_8b; 673 670 u8 roce_fw_bld_8b; ··· 848 843 #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL 849 844 #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL 850 845 #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL 851 - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL 846 + #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL 847 + #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL 848 + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL 852 849 #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL 853 850 #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL 854 851 #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR ··· 1333 1326 u8 timestamp_lo; 1334 1327 __le16 timestamp_hi; 1335 1328 __le32 event_data1; 1336 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL 1337 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 1338 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL 1339 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL 1340 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL 1341 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL 1342 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL 1329 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL 1330 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 1331 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL 1332 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL 1333 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL 1334 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL 1335 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL 1343 1336 #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL 1344 1337 #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL 1345 1338 #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED ··· 1821 1814 #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL 1822 1815 #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL 1823 1816 #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL 1817 + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL 1818 + #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL 1819 + #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL 1824 1820 __le16 tunnel_disable_flag; 1825 1821 #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL 1826 1822 #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL ··· 1838 1828 #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL 1839 1829 u8 device_serial_number[8]; 1840 1830 __le16 ctxs_per_partition; 1841 - u8 unused_2[2]; 1831 + __le16 max_tso_segs; 1842 1832 __le32 roce_vf_max_av; 1843 1833 __le32 roce_vf_max_cq; 1844 1834 __le32 roce_vf_max_mrw; ··· 2459 2449 #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL 2460 2450 #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL 2461 2451 #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL 2452 + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL 2462 2453 __le32 enables; 2463 2454 #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL 2464 2455 #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL ··· 3671 3660 __le16 target_id; 3672 3661 __le64 resp_addr; 3673 3662 __le16 type; 3674 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL 3675 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL 3676 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL 3677 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL 3678 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL 3679 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL 3680 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL 3681 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL 3682 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL 3683 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL 3684 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL 3685 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL 3686 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL 3687 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL 3688 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL 3689 - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL 3663 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL 3664 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL 3665 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL 3666 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL 3667 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL 3668 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL 3669 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL 3670 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL 3671 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL 3672 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL 3673 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL 3674 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL 3675 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL 3676 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL 3677 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL 3678 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL 3679 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL 3680 + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL 3690 3681 #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL 3691 3682 #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL 3692 3683 #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL ··· 3785 3772 __le16 seq_id; 3786 3773 __le16 resp_len; 3787 3774 __le16 type; 3788 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL 3789 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL 3790 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL 3791 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL 3792 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL 3793 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL 3794 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL 3795 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL 3796 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL 3797 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL 3798 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL 3799 - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL 3775 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL 3776 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL 3777 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL 3778 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL 3779 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL 3780 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL 3781 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL 3782 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL 3783 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL 3784 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL 3785 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL 3786 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL 3787 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL 3788 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL 3800 3789 #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL 3801 3790 #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL 3802 3791 #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL ··· 3891 3876 __le16 target_id; 3892 3877 __le64 resp_addr; 3893 3878 __le16 type; 3894 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL 3895 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL 3896 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL 3897 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL 3898 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL 3899 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL 3900 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL 3901 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL 3902 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL 3903 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL 3904 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL 3905 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL 3906 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL 3907 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL 3908 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL 3909 - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL 3879 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL 3880 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL 3881 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL 3882 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL 3883 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL 3884 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL 3885 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL 3886 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL 3887 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL 3888 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL 3889 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL 3890 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL 3891 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL 3892 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL 3893 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL 3894 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL 3895 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL 3896 + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL 3910 3897 #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL 3911 3898 #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL 3912 3899 #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL ··· 3928 3911 __le16 seq_id; 3929 3912 __le16 resp_len; 3930 3913 __le16 type; 3931 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL 3932 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL 3933 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL 3934 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL 3935 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL 3936 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL 3937 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL 3938 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL 3939 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL 3940 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL 3941 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL 3942 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL 3943 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL 3944 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL 3945 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL 3946 - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL 3914 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL 3915 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL 3916 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL 3917 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL 3918 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL 3919 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL 3920 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL 3921 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL 3922 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL 3923 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL 3924 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL 3925 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL 3926 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL 3927 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL 3928 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL 3929 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL 3930 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL 3931 + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL 3947 3932 #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL 3948 3933 #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL 3949 3934 #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL ··· 4221 4202 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL 4222 4203 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL 4223 4204 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL 4224 - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 4205 + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL 4206 + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 4225 4207 __le16 auto_link_speeds2_mask; 4226 4208 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL 4227 4209 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL ··· 4237 4217 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL 4238 4218 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL 4239 4219 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL 4220 + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL 4240 4221 u8 unused_2[6]; 4241 4222 }; 4242 4223 ··· 4313 4292 #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL 4314 4293 #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL 4315 4294 #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL 4295 + #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL 4316 4296 #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL 4317 4297 #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 4318 4298 u8 duplex_cfg; ··· 4473 4451 #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL 4474 4452 #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL 4475 4453 #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL 4476 - #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 4454 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL 4455 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL 4456 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL 4457 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL 4458 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL 4459 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL 4460 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 4477 4461 u8 media_type; 4478 4462 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL 4479 4463 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL ··· 5077 5049 u8 valid; 5078 5050 }; 5079 5051 5080 - /* hwrm_port_lpbk_qstats_input (size:128b/16B) */ 5052 + /* hwrm_port_lpbk_qstats_input (size:256b/32B) */ 5081 5053 struct hwrm_port_lpbk_qstats_input { 5082 5054 __le16 req_type; 5083 5055 __le16 cmpl_ring; 5084 5056 __le16 seq_id; 5085 5057 __le16 target_id; 5086 5058 __le64 resp_addr; 5059 + __le16 lpbk_stat_size; 5060 + u8 flags; 5061 + #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL 5062 + u8 unused_0[5]; 5063 + __le64 lpbk_stat_host_addr; 5087 5064 }; 5088 5065 5089 - /* hwrm_port_lpbk_qstats_output (size:768b/96B) */ 5066 + /* hwrm_port_lpbk_qstats_output (size:128b/16B) */ 5090 5067 struct hwrm_port_lpbk_qstats_output { 5091 5068 __le16 error_code; 5092 5069 __le16 req_type; 5093 5070 __le16 seq_id; 5094 5071 __le16 resp_len; 5072 + __le16 lpbk_stat_size; 5073 + u8 unused_0[5]; 5074 + u8 valid; 5075 + }; 5076 + 5077 + /* port_lpbk_stats (size:640b/80B) */ 5078 + struct port_lpbk_stats { 5095 5079 __le64 lpbk_ucast_frames; 5096 5080 __le64 lpbk_mcast_frames; 5097 5081 __le64 lpbk_bcast_frames; 5098 5082 __le64 lpbk_ucast_bytes; 5099 5083 __le64 lpbk_mcast_bytes; 5100 5084 __le64 lpbk_bcast_bytes; 5101 - __le64 tx_stat_discard; 5102 - __le64 tx_stat_error; 5103 - __le64 rx_stat_discard; 5104 - __le64 rx_stat_error; 5105 - u8 unused_0[7]; 5106 - u8 valid; 5085 + __le64 lpbk_tx_discards; 5086 + __le64 lpbk_tx_errors; 5087 + __le64 lpbk_rx_discards; 5088 + __le64 lpbk_rx_errors; 5107 5089 }; 5108 5090 5109 5091 /* hwrm_port_ecn_qstats_input (size:256b/32B) */ ··· 5178 5140 u8 valid; 5179 5141 }; 5180 5142 5181 - /* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ 5143 + /* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */ 5182 5144 struct hwrm_port_lpbk_clr_stats_input { 5183 5145 __le16 req_type; 5184 5146 __le16 cmpl_ring; 5185 5147 __le16 seq_id; 5186 5148 __le16 target_id; 5187 5149 __le64 resp_addr; 5150 + __le16 port_id; 5151 + u8 unused_0[6]; 5188 5152 }; 5189 5153 5190 5154 /* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ ··· 5327 5287 #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL 5328 5288 #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL 5329 5289 __le16 flags2; 5330 - #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL 5331 - #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL 5332 - #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL 5333 - #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL 5290 + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL 5291 + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL 5292 + #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL 5293 + #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL 5294 + #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL 5334 5295 u8 internal_port_cnt; 5335 5296 u8 unused_0; 5336 5297 __le16 supported_speeds2_force_mode; ··· 7484 7443 __le16 target_id; 7485 7444 __le64 resp_addr; 7486 7445 __le32 flags; 7487 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL 7488 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL 7489 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL 7490 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 7491 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL 7492 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL 7493 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 7494 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) 7495 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) 7496 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) 7497 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE 7446 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL 7447 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL 7448 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL 7449 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 7450 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL 7451 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL 7452 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 7453 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) 7454 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) 7455 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) 7456 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE 7498 7457 #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL 7499 7458 #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 7500 7459 #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) ··· 8561 8520 __le16 target_id; 8562 8521 __le64 resp_addr; 8563 8522 u8 tunnel_type; 8564 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8565 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8566 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8567 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8568 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8569 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8570 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8571 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8572 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL 8573 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8574 - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL 8523 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8524 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8525 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8526 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8527 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8528 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8529 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8530 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8531 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL 8532 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8533 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL 8575 8534 #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL 8576 8535 #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL 8577 8536 #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL ··· 8617 8576 __le16 target_id; 8618 8577 __le64 resp_addr; 8619 8578 u8 tunnel_type; 8620 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8621 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8622 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8623 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8624 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8625 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8626 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8627 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8628 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL 8629 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8630 - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL 8579 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8580 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8581 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8582 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8583 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8584 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8585 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8586 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8587 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL 8588 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8589 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL 8631 8590 #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL 8632 8591 #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL 8633 8592 #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL ··· 8676 8635 __le16 target_id; 8677 8636 __le64 resp_addr; 8678 8637 u8 tunnel_type; 8679 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8680 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8681 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8682 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8683 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8684 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8685 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8686 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8687 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL 8688 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8689 - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL 8638 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL 8639 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL 8640 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL 8641 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL 8642 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL 8643 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL 8644 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL 8645 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL 8646 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL 8647 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL 8648 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL 8690 8649 #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL 8691 8650 #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL 8692 8651 #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL ··· 9150 9109 #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL 9151 9110 #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL 9152 9111 #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL 9112 + #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL 9153 9113 #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL 9154 9114 #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL 9155 9115 #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL ··· 9800 9758 __le16 instance; 9801 9759 __le16 unused_0; 9802 9760 u8 seg_flags; 9761 + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL 9762 + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL 9763 + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL 9803 9764 u8 unused_1[7]; 9804 9765 }; 9805 9766 ··· 10478 10433 10479 10434 /* dbc_dbc (size:64b/8B) */ 10480 10435 struct dbc_dbc { 10481 - u32 index; 10436 + __le32 index; 10482 10437 #define DBC_DBC_INDEX_MASK 0xffffffUL 10483 10438 #define DBC_DBC_INDEX_SFT 0 10484 10439 #define DBC_DBC_EPOCH 0x1000000UL 10485 10440 #define DBC_DBC_TOGGLE_MASK 0x6000000UL 10486 10441 #define DBC_DBC_TOGGLE_SFT 25 10487 - u32 type_path_xid; 10442 + __le32 type_path_xid; 10488 10443 #define DBC_DBC_XID_MASK 0xfffffUL 10489 10444 #define DBC_DBC_XID_SFT 0 10490 10445 #define DBC_DBC_PATH_MASK 0x3000000UL
+21 -2
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 1339 1339 1340 1340 for (i = 0; i < count; i++) { 1341 1341 bool last = false; 1342 + int try_cnt = 0; 1342 1343 int status; 1343 1344 1344 1345 bh = (struct ice_buf_hdr *)(bufs + start + i); ··· 1347 1346 if (indicate_last) 1348 1347 last = ice_is_last_download_buffer(bh, i, count); 1349 1348 1350 - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 1351 - &offset, &info, NULL); 1349 + while (1) { 1350 + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, 1351 + last, &offset, &info, 1352 + NULL); 1353 + if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC && 1354 + hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG) 1355 + break; 1356 + 1357 + try_cnt++; 1358 + 1359 + if (try_cnt == 5) 1360 + break; 1361 + 1362 + msleep(20); 1363 + } 1364 + 1365 + if (try_cnt) 1366 + dev_dbg(ice_hw_to_dev(hw), 1367 + "ice_aq_download_pkg number of retries: %d\n", 1368 + try_cnt); 1352 1369 1353 1370 /* Save AQ status from download package */ 1354 1371 if (status) {
+9 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 805 805 } 806 806 807 807 switch (vsi->port_info->phy.link_info.link_speed) { 808 + case ICE_AQ_LINK_SPEED_200GB: 809 + speed = "200 G"; 810 + break; 808 811 case ICE_AQ_LINK_SPEED_100GB: 809 812 speed = "100 G"; 810 813 break; ··· 5567 5564 */ 5568 5565 disabled = ice_service_task_stop(pf); 5569 5566 5570 - ice_unplug_aux_dev(pf); 5567 + ice_deinit_rdma(pf); 5571 5568 5572 5569 /* Already suspended?, then there is nothing to do */ 5573 5570 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { ··· 5646 5643 ret = ice_reinit_interrupt_scheme(pf); 5647 5644 if (ret) 5648 5645 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5646 + 5647 + ret = ice_init_rdma(pf); 5648 + if (ret) 5649 + dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", 5650 + ret); 5649 5651 5650 5652 clear_bit(ICE_DOWN, pf->state); 5651 5653 /* Now perform PF reset and rebuild */
+4 -2
drivers/net/ethernet/intel/ice/ice_switch.c
··· 1899 1899 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1900 1900 lkup_type == ICE_SW_LKUP_PROMISC || 1901 1901 lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1902 - lkup_type == ICE_SW_LKUP_DFLT) { 1902 + lkup_type == ICE_SW_LKUP_DFLT || 1903 + lkup_type == ICE_SW_LKUP_LAST) { 1903 1904 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 1904 1905 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 1905 1906 if (opc == ice_aqc_opc_alloc_res) ··· 2923 2922 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 2924 2923 lkup_type == ICE_SW_LKUP_PROMISC || 2925 2924 lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 2926 - lkup_type == ICE_SW_LKUP_DFLT) 2925 + lkup_type == ICE_SW_LKUP_DFLT || 2926 + lkup_type == ICE_SW_LKUP_LAST) 2927 2927 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 2928 2928 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 2929 2929 else if (lkup_type == ICE_SW_LKUP_VLAN)
+4 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 4014 4014 } 4015 4015 } 4016 4016 4017 - skb = build_skb(data, frag_size); 4017 + if (frag_size) 4018 + skb = build_skb(data, frag_size); 4019 + else 4020 + skb = slab_build_skb(data); 4018 4021 if (!skb) { 4019 4022 netdev_warn(port->dev, "skb build failed\n"); 4020 4023 goto err_drop_frame;
+1 -2
drivers/net/ethernet/marvell/octeontx2/nic/Makefile
··· 9 9 rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ 10 10 otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ 11 11 otx2_devlink.o qos_sq.o qos.o 12 - rvu_nicvf-y := otx2_vf.o otx2_devlink.o 12 + rvu_nicvf-y := otx2_vf.o 13 13 14 14 rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o 15 - rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o 16 15 rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o 17 16 18 17 ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+7
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
··· 54 54 55 55 return 0; 56 56 } 57 + EXPORT_SYMBOL(otx2_pfc_txschq_config); 57 58 58 59 static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) 59 60 { ··· 123 122 124 123 return 0; 125 124 } 125 + EXPORT_SYMBOL(otx2_pfc_txschq_alloc); 126 126 127 127 static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) 128 128 { ··· 262 260 263 261 return 0; 264 262 } 263 + EXPORT_SYMBOL(otx2_pfc_txschq_update); 265 264 266 265 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) 267 266 { ··· 285 282 286 283 return 0; 287 284 } 285 + EXPORT_SYMBOL(otx2_pfc_txschq_stop); 288 286 289 287 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) 290 288 { ··· 325 321 mutex_unlock(&pfvf->mbox.lock); 326 322 return err; 327 323 } 324 + EXPORT_SYMBOL(otx2_config_priority_flow_ctrl); 328 325 329 326 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, 330 327 bool pfc_enable) ··· 390 385 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n", 391 386 qidx, err); 392 387 } 388 + EXPORT_SYMBOL(otx2_update_bpid_in_rqctx); 393 389 394 390 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) 395 391 { ··· 478 472 479 473 return 0; 480 474 } 475 + EXPORT_SYMBOL(otx2_dcbnl_set_ops);
+2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
··· 113 113 devlink_free(dl); 114 114 return err; 115 115 } 116 + EXPORT_SYMBOL(otx2_register_dl); 116 117 117 118 void otx2_unregister_dl(struct otx2_nic *pfvf) 118 119 { ··· 125 124 ARRAY_SIZE(otx2_dl_params)); 126 125 devlink_free(dl); 127 126 } 127 + EXPORT_SYMBOL(otx2_unregister_dl);
+4 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
··· 1174 1174 1175 1175 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { 1176 1176 /* Insert vlan tag before giving pkt to tso */ 1177 - if (skb_vlan_tag_present(skb)) 1177 + if (skb_vlan_tag_present(skb)) { 1178 1178 skb = __vlan_hwaccel_push_inside(skb); 1179 + if (!skb) 1180 + return true; 1181 + } 1179 1182 otx2_sq_append_tso(pfvf, sq, skb, qidx); 1180 1183 return true; 1181 1184 }
+40 -4
drivers/net/ethernet/microchip/lan743x_ethtool.c
··· 1127 1127 if (netdev->phydev) 1128 1128 phy_ethtool_get_wol(netdev->phydev, wol); 1129 1129 1130 - wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | 1131 - WAKE_MAGIC | WAKE_PHY | WAKE_ARP; 1130 + if (wol->supported != adapter->phy_wol_supported) 1131 + netif_warn(adapter, drv, adapter->netdev, 1132 + "PHY changed its supported WOL! old=%x, new=%x\n", 1133 + adapter->phy_wol_supported, wol->supported); 1134 + 1135 + wol->supported |= MAC_SUPPORTED_WAKES; 1132 1136 1133 1137 if (adapter->is_pci11x1x) 1134 1138 wol->supported |= WAKE_MAGICSECURE; ··· 1147 1143 { 1148 1144 struct lan743x_adapter *adapter = netdev_priv(netdev); 1149 1145 1146 + /* WAKE_MAGICSEGURE is a modifier of and only valid together with 1147 + * WAKE_MAGIC 1148 + */ 1149 + if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC)) 1150 + return -EINVAL; 1151 + 1152 + if (netdev->phydev) { 1153 + struct ethtool_wolinfo phy_wol; 1154 + int ret; 1155 + 1156 + phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported; 1157 + 1158 + /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC 1159 + * for PHYs that do not support WAKE_MAGICSECURE 1160 + */ 1161 + if (wol->wolopts & WAKE_MAGICSECURE && 1162 + !(adapter->phy_wol_supported & WAKE_MAGICSECURE)) 1163 + phy_wol.wolopts &= ~WAKE_MAGIC; 1164 + 1165 + ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol); 1166 + if (ret && (ret != -EOPNOTSUPP)) 1167 + return ret; 1168 + 1169 + if (ret == -EOPNOTSUPP) 1170 + adapter->phy_wolopts = 0; 1171 + else 1172 + adapter->phy_wolopts = phy_wol.wolopts; 1173 + } else { 1174 + adapter->phy_wolopts = 0; 1175 + } 1176 + 1150 1177 adapter->wolopts = 0; 1178 + wol->wolopts &= ~adapter->phy_wolopts; 1151 1179 if (wol->wolopts & WAKE_UCAST) 1152 1180 adapter->wolopts |= WAKE_UCAST; 1153 1181 if (wol->wolopts & WAKE_MCAST) ··· 1200 1164 memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX); 1201 1165 } 1202 1166 1167 + wol->wolopts = adapter->wolopts | adapter->phy_wolopts; 1203 1168 device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); 1204 1169 1205 - return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) 1206 - : -ENETDOWN; 1170 + return 0; 1207 1171 } 1208 1172 #endif /* CONFIG_PM */ 1209 1173
+40 -8
drivers/net/ethernet/microchip/lan743x_main.c
··· 3118 3118 if (ret) 3119 3119 goto close_tx; 3120 3120 } 3121 + 3122 + #ifdef CONFIG_PM 3123 + if (adapter->netdev->phydev) { 3124 + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 3125 + 3126 + phy_ethtool_get_wol(netdev->phydev, &wol); 3127 + adapter->phy_wol_supported = wol.supported; 3128 + adapter->phy_wolopts = wol.wolopts; 3129 + } 3130 + #endif 3131 + 3121 3132 return 0; 3122 3133 3123 3134 close_tx: ··· 3586 3575 3587 3576 /* clear wake settings */ 3588 3577 pmtctl = lan743x_csr_read(adapter, PMT_CTL); 3589 - pmtctl |= PMT_CTL_WUPS_MASK_; 3578 + pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_; 3590 3579 pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | 3591 3580 PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | 3592 3581 PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); ··· 3598 3587 3599 3588 pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; 3600 3589 3601 - if (adapter->wolopts & WAKE_PHY) { 3602 - pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; 3590 + if (adapter->phy_wolopts) 3603 3591 pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; 3604 - } 3592 + 3605 3593 if (adapter->wolopts & WAKE_MAGIC) { 3606 3594 wucsr |= MAC_WUCSR_MPEN_; 3607 3595 macrx |= MAC_RX_RXEN_; ··· 3696 3686 lan743x_csr_write(adapter, MAC_WUCSR2, 0); 3697 3687 lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); 3698 3688 3699 - if (adapter->wolopts) 3689 + if (adapter->wolopts || adapter->phy_wolopts) 3700 3690 lan743x_pm_set_wol(adapter); 3701 3691 3702 3692 if (adapter->is_pci11x1x) { ··· 3720 3710 struct pci_dev *pdev = to_pci_dev(dev); 3721 3711 struct net_device *netdev = pci_get_drvdata(pdev); 3722 3712 struct lan743x_adapter *adapter = netdev_priv(netdev); 3713 + u32 data; 3723 3714 int ret; 3724 3715 3725 3716 pci_set_power_state(pdev, PCI_D0); ··· 3739 3728 return ret; 3740 3729 } 3741 3730 3731 + ret = lan743x_csr_read(adapter, MAC_WK_SRC); 3732 + netif_dbg(adapter, drv, adapter->netdev, 3733 + "Wakeup source : 0x%08X\n", ret); 3734 + 3735 + /* Clear the wol configuration and status bits. Note that 3736 + * the status bits are "Write One to Clear (W1C)" 3737 + */ 3738 + data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ | 3739 + MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ | 3740 + MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_; 3741 + lan743x_csr_write(adapter, MAC_WUCSR, data); 3742 + 3743 + data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ | 3744 + MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_; 3745 + lan743x_csr_write(adapter, MAC_WUCSR2, data); 3746 + 3747 + data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ | 3748 + MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ | 3749 + MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ | 3750 + MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ | 3751 + MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ | 3752 + MAC_WK_SRC_WK_FR_SAVED_; 3753 + lan743x_csr_write(adapter, MAC_WK_SRC, data); 3754 + 3742 3755 /* open netdev when netdev is at running state while resume. 3743 3756 * For instance, it is true when system wakesup after pm-suspend 3744 3757 * However, it is false when system wakes up after suspend GUI menu ··· 3771 3736 lan743x_netdev_open(netdev); 3772 3737 3773 3738 netif_device_attach(netdev); 3774 - ret = lan743x_csr_read(adapter, MAC_WK_SRC); 3775 - netif_info(adapter, drv, adapter->netdev, 3776 - "Wakeup source : 0x%08X\n", ret); 3777 3739 3778 3740 return 0; 3779 3741 }
+28
drivers/net/ethernet/microchip/lan743x_main.h
··· 61 61 #define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) 62 62 #define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) 63 63 #define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) 64 + #define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8) 64 65 #define PMT_CTL_READY_ BIT(7) 65 66 #define PMT_CTL_ETH_PHY_RST_ BIT(4) 66 67 #define PMT_CTL_WOL_EN_ BIT(3) ··· 228 227 #define MAC_WUCSR (0x140) 229 228 #define MAC_MP_SO_EN_ BIT(21) 230 229 #define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) 230 + #define MAC_WUCSR_EEE_TX_WAKE_ BIT(13) 231 + #define MAC_WUCSR_EEE_RX_WAKE_ BIT(11) 232 + #define MAC_WUCSR_RFE_WAKE_FR_ BIT(9) 233 + #define MAC_WUCSR_PFDA_FR_ BIT(7) 234 + #define MAC_WUCSR_WUFR_ BIT(6) 235 + #define MAC_WUCSR_MPR_ BIT(5) 236 + #define MAC_WUCSR_BCAST_FR_ BIT(4) 231 237 #define MAC_WUCSR_PFDA_EN_ BIT(3) 232 238 #define MAC_WUCSR_WAKE_EN_ BIT(2) 233 239 #define MAC_WUCSR_MPEN_ BIT(1) 234 240 #define MAC_WUCSR_BCST_EN_ BIT(0) 235 241 236 242 #define MAC_WK_SRC (0x144) 243 + #define MAC_WK_SRC_ETH_PHY_WK_ BIT(17) 244 + #define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16) 245 + #define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15) 246 + #define MAC_WK_SRC_EEE_TX_WK_ BIT(14) 247 + #define MAC_WK_SRC_EEE_RX_WK_ BIT(13) 248 + #define MAC_WK_SRC_RFE_FR_WK_ BIT(12) 249 + #define MAC_WK_SRC_PFDA_FR_WK_ BIT(11) 250 + #define MAC_WK_SRC_MP_FR_WK_ BIT(10) 251 + #define MAC_WK_SRC_BCAST_FR_WK_ BIT(9) 252 + #define MAC_WK_SRC_WU_FR_WK_ BIT(8) 253 + #define MAC_WK_SRC_WK_FR_SAVED_ BIT(7) 254 + 237 255 #define MAC_MP_SO_HI (0x148) 238 256 #define MAC_MP_SO_LO (0x14C) 239 257 ··· 315 295 #define RFE_INDX(index) (0x580 + (index << 2)) 316 296 317 297 #define MAC_WUCSR2 (0x600) 298 + #define MAC_WUCSR2_NS_RCD_ BIT(7) 299 + #define MAC_WUCSR2_ARP_RCD_ BIT(6) 300 + #define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5) 301 + #define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4) 318 302 319 303 #define SGMII_ACC (0x720) 320 304 #define SGMII_ACC_SGMII_BZY_ BIT(31) ··· 1042 1018 LINK_2500_SLAVE 1043 1019 }; 1044 1020 1021 + #define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \ 1022 + WAKE_MAGIC | WAKE_ARP) 1045 1023 struct lan743x_adapter { 1046 1024 struct net_device *netdev; 1047 1025 struct mii_bus *mdiobus; ··· 1051 1025 #ifdef CONFIG_PM 1052 1026 u32 wolopts; 1053 1027 u8 sopass[SOPASS_MAX]; 1028 + u32 phy_wolopts; 1029 + u32 phy_wol_supported; 1054 1030 #endif 1055 1031 struct pci_dev *pdev; 1056 1032 struct lan743x_csr csr;
+2 -4
drivers/net/ethernet/qualcomm/qca_debug.c
··· 98 98 99 99 seq_printf(s, "IRQ : %d\n", 100 100 qca->spi_dev->irq); 101 - seq_printf(s, "INTR REQ : %u\n", 102 - qca->intr_req); 103 - seq_printf(s, "INTR SVC : %u\n", 104 - qca->intr_svc); 101 + seq_printf(s, "INTR : %lx\n", 102 + qca->intr); 105 103 106 104 seq_printf(s, "SPI max speed : %lu\n", 107 105 (unsigned long)qca->spi_dev->max_speed_hz);
+8 -8
drivers/net/ethernet/qualcomm/qca_spi.c
··· 35 35 36 36 #define MAX_DMA_BURST_LEN 5000 37 37 38 + #define SPI_INTR 0 39 + 38 40 /* Modules parameters */ 39 41 #define QCASPI_CLK_SPEED_MIN 1000000 40 42 #define QCASPI_CLK_SPEED_MAX 16000000 ··· 581 579 continue; 582 580 } 583 581 584 - if ((qca->intr_req == qca->intr_svc) && 582 + if (!test_bit(SPI_INTR, &qca->intr) && 585 583 !qca->txr.skb[qca->txr.head]) 586 584 schedule(); 587 585 588 586 set_current_state(TASK_RUNNING); 589 587 590 - netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", 591 - qca->intr_req - qca->intr_svc, 588 + netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n", 589 + qca->intr, 592 590 qca->txr.skb[qca->txr.head]); 593 591 594 592 qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); ··· 602 600 msleep(QCASPI_QCA7K_REBOOT_TIME_MS); 603 601 } 604 602 605 - if (qca->intr_svc != qca->intr_req) { 606 - qca->intr_svc = qca->intr_req; 603 + if (test_and_clear_bit(SPI_INTR, &qca->intr)) { 607 604 start_spi_intr_handling(qca, &intr_cause); 608 605 609 606 if (intr_cause & SPI_INT_CPU_ON) { ··· 664 663 { 665 664 struct qcaspi *qca = data; 666 665 667 - qca->intr_req++; 666 + set_bit(SPI_INTR, &qca->intr); 668 667 if (qca->spi_thread) 669 668 wake_up_process(qca->spi_thread); 670 669 ··· 680 679 if (!qca) 681 680 return -EINVAL; 682 681 683 - qca->intr_req = 1; 684 - qca->intr_svc = 0; 682 + set_bit(SPI_INTR, &qca->intr); 685 683 qca->sync = QCASPI_SYNC_UNKNOWN; 686 684 qcafrm_fsm_init_spi(&qca->frm_handle); 687 685
+1 -2
drivers/net/ethernet/qualcomm/qca_spi.h
··· 81 81 struct qcafrm_handle frm_handle; 82 82 struct sk_buff *rx_skb; 83 83 84 - unsigned int intr_req; 85 - unsigned int intr_svc; 84 + unsigned long intr; 86 85 u16 reset_count; 87 86 88 87 #ifdef CONFIG_DEBUG_FS
+5 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
··· 218 218 { 219 219 u32 num_snapshot, ts_status, tsync_int; 220 220 struct ptp_clock_event event; 221 + u32 acr_value, channel; 221 222 unsigned long flags; 222 223 u64 ptp_time; 223 224 int i; ··· 244 243 num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> 245 244 GMAC_TIMESTAMP_ATSNS_SHIFT; 246 245 246 + acr_value = readl(priv->ptpaddr + PTP_ACR); 247 + channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value)); 248 + 247 249 for (i = 0; i < num_snapshot; i++) { 248 250 read_lock_irqsave(&priv->ptp_lock, flags); 249 251 get_ptptime(priv->ptpaddr, &ptp_time); 250 252 read_unlock_irqrestore(&priv->ptp_lock, flags); 251 253 event.type = PTP_CLOCK_EXTTS; 252 - event.index = 0; 254 + event.index = channel; 253 255 event.timestamp = ptp_time; 254 256 ptp_clock_event(priv->ptp_clock, &event); 255 257 }
+22 -18
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
··· 358 358 359 359 port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope; 360 360 361 - /* Port Transmit Rate and Speed Divider */ 362 - switch (div_s64(port_transmit_rate_kbps, 1000)) { 363 - case SPEED_10000: 364 - case SPEED_5000: 365 - ptr = 32; 366 - break; 367 - case SPEED_2500: 368 - case SPEED_1000: 369 - ptr = 8; 370 - break; 371 - case SPEED_100: 372 - ptr = 4; 373 - break; 374 - default: 375 - netdev_err(priv->dev, 376 - "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n", 377 - port_transmit_rate_kbps); 378 - return -EINVAL; 361 + if (qopt->enable) { 362 + /* Port Transmit Rate and Speed Divider */ 363 + switch (div_s64(port_transmit_rate_kbps, 1000)) { 364 + case SPEED_10000: 365 + case SPEED_5000: 366 + ptr = 32; 367 + break; 368 + case SPEED_2500: 369 + case SPEED_1000: 370 + ptr = 8; 371 + break; 372 + case SPEED_100: 373 + ptr = 4; 374 + break; 375 + default: 376 + netdev_err(priv->dev, 377 + "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n", 378 + port_transmit_rate_kbps); 379 + return -EINVAL; 380 + } 381 + } else { 382 + ptr = 0; 379 383 } 380 384 381 385 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+34 -4
drivers/net/phy/dp83tg720.c
··· 17 17 #define DP83TG720S_PHY_RESET 0x1f 18 18 #define DP83TG720S_HW_RESET BIT(15) 19 19 20 + #define DP83TG720S_LPS_CFG3 0x18c 21 + /* Power modes are documented as bit fields but used as values */ 22 + /* Power Mode 0 is Normal mode */ 23 + #define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0) 24 + 20 25 #define DP83TG720S_RGMII_DELAY_CTRL 0x602 21 26 /* In RGMII mode, Enable or disable the internal delay for RXD */ 22 27 #define DP83TG720S_RGMII_RX_CLK_SEL BIT(1) ··· 36 31 37 32 static int dp83tg720_config_aneg(struct phy_device *phydev) 38 33 { 34 + int ret; 35 + 39 36 /* Autoneg is not supported and this PHY supports only one speed. 40 37 * We need to care only about master/slave configuration if it was 41 38 * changed by user. 42 39 */ 43 - return genphy_c45_pma_baset1_setup_master_slave(phydev); 40 + ret = genphy_c45_pma_baset1_setup_master_slave(phydev); 41 + if (ret) 42 + return ret; 43 + 44 + /* Re-read role configuration to make changes visible even if 45 + * the link is in administrative down state. 46 + */ 47 + return genphy_c45_pma_baset1_read_master_slave(phydev); 44 48 } 45 49 46 50 static int dp83tg720_read_status(struct phy_device *phydev) ··· 78 64 return ret; 79 65 80 66 /* After HW reset we need to restore master/slave configuration. 67 + * genphy_c45_pma_baset1_read_master_slave() call will be done 68 + * by the dp83tg720_config_aneg() function. 81 69 */ 82 70 ret = dp83tg720_config_aneg(phydev); 83 71 if (ret) ··· 170 154 */ 171 155 usleep_range(1000, 2000); 172 156 173 - if (phy_interface_is_rgmii(phydev)) 174 - return dp83tg720_config_rgmii_delay(phydev); 157 + if (phy_interface_is_rgmii(phydev)) { 158 + ret = dp83tg720_config_rgmii_delay(phydev); 159 + if (ret) 160 + return ret; 161 + } 175 162 176 - return 0; 163 + /* In case the PHY is bootstrapped in managed mode, we need to 164 + * wake it. 165 + */ 166 + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3, 167 + DP83TG720S_LPS_CFG3_PWR_MODE_0); 168 + if (ret) 169 + return ret; 170 + 171 + /* Make role configuration visible for ethtool on init and after 172 + * rest. 173 + */ 174 + return genphy_c45_pma_baset1_read_master_slave(phydev); 177 175 } 178 176 179 177 static struct phy_driver dp83tg720_driver[] = {
+38 -20
drivers/net/phy/mxl-gpy.c
··· 107 107 108 108 u8 fw_major; 109 109 u8 fw_minor; 110 + u32 wolopts; 110 111 111 112 /* It takes 3 seconds to fully switch out of loopback mode before 112 113 * it can safely re-enter loopback mode. Record the time when ··· 222 221 } 223 222 #endif 224 223 224 + static int gpy_ack_interrupt(struct phy_device *phydev) 225 + { 226 + int ret; 227 + 228 + /* Clear all pending interrupts */ 229 + ret = phy_read(phydev, PHY_ISTAT); 230 + return ret < 0 ? ret : 0; 231 + } 232 + 225 233 static int gpy_mbox_read(struct phy_device *phydev, u32 addr) 226 234 { 227 235 struct gpy_priv *priv = phydev->priv; ··· 272 262 273 263 static int gpy_config_init(struct phy_device *phydev) 274 264 { 275 - int ret; 276 - 277 - /* Mask all interrupts */ 278 - ret = phy_write(phydev, PHY_IMASK, 0); 279 - if (ret) 280 - return ret; 281 - 282 - /* Clear all pending interrupts */ 283 - ret = phy_read(phydev, PHY_ISTAT); 284 - return ret < 0 ? ret : 0; 265 + /* Nothing to configure. Configuration Requirement Placeholder */ 266 + return 0; 285 267 } 286 268 287 269 static int gpy21x_config_init(struct phy_device *phydev) ··· 629 627 630 628 static int gpy_config_intr(struct phy_device *phydev) 631 629 { 630 + struct gpy_priv *priv = phydev->priv; 632 631 u16 mask = 0; 632 + int ret; 633 + 634 + ret = gpy_ack_interrupt(phydev); 635 + if (ret) 636 + return ret; 633 637 634 638 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 635 639 mask = PHY_IMASK_MASK; 640 + 641 + if (priv->wolopts & WAKE_MAGIC) 642 + mask |= PHY_IMASK_WOL; 643 + 644 + if (priv->wolopts & WAKE_PHY) 645 + mask |= PHY_IMASK_LSTC; 636 646 637 647 return phy_write(phydev, PHY_IMASK, mask); 638 648 } ··· 692 678 struct ethtool_wolinfo *wol) 693 679 { 694 680 struct net_device *attach_dev = phydev->attached_dev; 681 + struct gpy_priv *priv = phydev->priv; 695 682 int ret; 696 683 697 684 if (wol->wolopts & WAKE_MAGIC) { ··· 740 725 ret = phy_read(phydev, PHY_ISTAT); 741 726 if (ret < 0) 742 727 return ret; 728 + 729 + priv->wolopts |= WAKE_MAGIC; 743 730 } else { 744 731 /* Disable magic packet matching */ 745 732 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, ··· 749 732 WOL_EN); 750 733 if (ret < 0) 751 734 return ret; 735 + 736 + /* Disable the WOL interrupt */ 737 + ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL); 738 + if (ret < 0) 739 + return ret; 740 + 741 + priv->wolopts &= ~WAKE_MAGIC; 752 742 } 753 743 754 744 if (wol->wolopts & WAKE_PHY) { ··· 772 748 if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC)) 773 749 phy_trigger_machine(phydev); 774 750 751 + priv->wolopts |= WAKE_PHY; 775 752 return 0; 776 753 } 777 754 755 + priv->wolopts &= ~WAKE_PHY; 778 756 /* Disable the link state change interrupt */ 779 757 return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC); 780 758 } ··· 784 758 static void gpy_get_wol(struct phy_device *phydev, 785 759 struct ethtool_wolinfo *wol) 786 760 { 787 - int ret; 761 + struct gpy_priv *priv = phydev->priv; 788 762 789 763 wol->supported = WAKE_MAGIC | WAKE_PHY; 790 - wol->wolopts = 0; 791 - 792 - ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL); 793 - if (ret & WOL_EN) 794 - wol->wolopts |= WAKE_MAGIC; 795 - 796 - ret = phy_read(phydev, PHY_IMASK); 797 - if (ret & PHY_IMASK_LSTC) 798 - wol->wolopts |= WAKE_PHY; 764 + wol->wolopts = priv->wolopts; 799 765 } 800 766 801 767 static int gpy_loopback(struct phy_device *phydev, bool enable)
+13 -5
drivers/net/usb/ax88179_178a.c
··· 174 174 u32 wol_supported; 175 175 u32 wolopts; 176 176 u8 disconnecting; 177 - u8 initialized; 178 177 }; 179 178 180 179 struct ax88179_int_data { ··· 1677 1678 1678 1679 static int ax88179_net_reset(struct usbnet *dev) 1679 1680 { 1680 - struct ax88179_data *ax179_data = dev->driver_priv; 1681 + u16 tmp16; 1681 1682 1682 - if (ax179_data->initialized) 1683 + ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR, 1684 + 2, &tmp16); 1685 + if (tmp16) { 1686 + ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, 1687 + 2, 2, &tmp16); 1688 + if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) { 1689 + tmp16 |= AX_MEDIUM_RECEIVE_EN; 1690 + ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, 1691 + 2, 2, &tmp16); 1692 + } 1693 + } else { 1683 1694 ax88179_reset(dev); 1684 - else 1685 - ax179_data->initialized = 1; 1695 + } 1686 1696 1687 1697 return 0; 1688 1698 }
+2 -1
drivers/net/usb/rtl8150.c
··· 778 778 struct ethtool_link_ksettings *ecmd) 779 779 { 780 780 rtl8150_t *dev = netdev_priv(netdev); 781 - short lpa, bmcr; 781 + short lpa = 0; 782 + short bmcr = 0; 782 783 u32 supported; 783 784 784 785 supported = (SUPPORTED_10baseT_Half |
+29 -3
drivers/net/virtio_net.c
··· 1360 1360 if (unlikely(hdr->hdr.gso_type)) 1361 1361 goto err_xdp; 1362 1362 1363 + /* Partially checksummed packets must be dropped. */ 1364 + if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) 1365 + goto err_xdp; 1366 + 1363 1367 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1364 1368 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1365 1369 ··· 1681 1677 if (unlikely(hdr->hdr.gso_type)) 1682 1678 return NULL; 1683 1679 1680 + /* Partially checksummed packets must be dropped. */ 1681 + if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) 1682 + return NULL; 1683 + 1684 1684 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 1685 1685 * with headroom may add hole in truesize, which 1686 1686 * make their length exceed PAGE_SIZE. So we disabled the ··· 1951 1943 struct net_device *dev = vi->dev; 1952 1944 struct sk_buff *skb; 1953 1945 struct virtio_net_common_hdr *hdr; 1946 + u8 flags; 1954 1947 1955 1948 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 1956 1949 pr_debug("%s: short packet %i\n", dev->name, len); ··· 1959 1950 virtnet_rq_free_buf(vi, rq, buf); 1960 1951 return; 1961 1952 } 1953 + 1954 + /* 1. Save the flags early, as the XDP program might overwrite them. 1955 + * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID 1956 + * stay valid after XDP processing. 1957 + * 2. XDP doesn't work with partially checksummed packets (refer to 1958 + * virtnet_xdp_set()), so packets marked as 1959 + * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing. 1960 + */ 1961 + flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; 1962 1962 1963 1963 if (vi->mergeable_rx_bufs) 1964 1964 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, ··· 1984 1966 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 1985 1967 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); 1986 1968 1987 - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 1969 + if (flags & VIRTIO_NET_HDR_F_DATA_VALID) 1988 1970 skb->ip_summed = CHECKSUM_UNNECESSARY; 1989 1971 1990 1972 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, ··· 5684 5666 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 5685 5667 /* (!csum && gso) case will be fixed by register_netdev() */ 5686 5668 } 5687 - if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 5688 - dev->features |= NETIF_F_RXCSUM; 5669 + 5670 + /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't 5671 + * need to calculate checksums for partially checksummed packets, 5672 + * as they're considered valid by the upper layer. 5673 + * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only 5674 + * receives fully checksummed packets. The device may assist in 5675 + * validating these packets' checksums, so the driver won't have to. 5676 + */ 5677 + dev->features |= NETIF_F_RXCSUM; 5678 + 5689 5679 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 5690 5680 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 5691 5681 dev->features |= NETIF_F_GRO_HW;
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 4795 4795 4796 4796 if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) { 4797 4797 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); 4798 - } else if (fw_ver == 3) { 4798 + } else if (fw_ver >= 3) { 4799 4799 ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, 4800 4800 ROC_ACTIVITY_HOTSPOT); 4801 4801 } else {
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 1830 1830 */ 1831 1831 if (!iwl_mvm_is_scan_fragmented(params->type)) { 1832 1832 if (!cfg80211_channel_is_psc(params->channels[i]) || 1833 - flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { 1833 + psc_no_listen) { 1834 1834 if (unsolicited_probe_on_chan) { 1835 1835 max_s_ssids = 2; 1836 1836 max_bssids = 6;
+2
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 1238 1238 if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) { 1239 1239 IWL_DEBUG_TE(mvm, 1240 1240 "No remain on channel event\n"); 1241 + mutex_unlock(&mvm->mutex); 1241 1242 return; 1242 1243 } 1243 1244 ··· 1254 1253 te_data = iwl_mvm_get_roc_te(mvm); 1255 1254 if (!te_data) { 1256 1255 IWL_WARN(mvm, "No remain on channel event\n"); 1256 + mutex_unlock(&mvm->mutex); 1257 1257 return; 1258 1258 } 1259 1259
+160 -29
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
··· 187 187 [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN, 188 188 }; 189 189 190 + static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { 191 + [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET, 192 + [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL, 193 + [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1, 194 + [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL, 195 + 196 + /* In PCS_USB */ 197 + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL, 198 + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR, 199 + 200 + [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL, 201 + [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS, 202 + [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS, 203 + [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 204 + 205 + [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS, 206 + [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV, 207 + 208 + [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV, 209 + [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL, 210 + [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL, 211 + [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN, 212 + [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN, 213 + }; 214 + 190 215 static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { 191 216 QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), 192 217 QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), ··· 1022 997 QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), 1023 998 }; 1024 999 1000 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = { 1001 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15), 1002 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b), 1003 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02), 1004 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c), 1005 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06), 1006 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30), 1007 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07), 1008 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36), 1009 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16), 1010 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06), 1011 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), 1012 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00), 1013 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0), 1014 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12), 1015 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), 1016 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00), 1017 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00), 1018 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a), 1019 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14), 1020 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00), 1021 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17), 1022 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), 1023 + }; 1024 + 1025 1025 static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = { 1026 1026 QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40), 1027 1027 QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30), ··· 1059 1009 QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x0c), 1060 1010 QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x0c), 1061 1011 QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4), 1012 + }; 1013 + 1014 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = { 1015 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40), 1016 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00), 1017 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff), 1018 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f), 1019 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03), 1020 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f), 1021 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), 1022 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), 1023 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11), 1024 + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1), 1062 1025 }; 1063 1026 1064 1027 static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = { ··· 1120 1057 QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), 1121 1058 QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), 1122 1059 QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), 1060 + }; 1061 + 1062 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = { 1063 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05), 1064 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), 1065 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04), 1066 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), 1067 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37), 1068 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04), 1069 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), 1070 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), 1071 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), 1072 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), 1073 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), 1074 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), 1075 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), 1076 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), 1077 + }; 1078 + 1079 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = { 1080 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03), 1081 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), 1082 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), 1083 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), 1084 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07), 1085 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07), 1086 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), 1087 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), 1088 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), 1089 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), 1090 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), 1091 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), 1092 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), 1093 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), 1094 + }; 1095 + 1096 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = { 1097 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01), 1098 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46), 1099 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), 1100 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05), 1101 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f), 1102 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e), 1103 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97), 1104 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10), 1105 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), 1106 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), 1107 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), 1108 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), 1109 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18), 1110 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02), 1111 + }; 1112 + 1113 + static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = { 1114 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00), 1115 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), 1116 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), 1117 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), 1118 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17), 1119 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15), 1120 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), 1121 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), 1122 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), 1123 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), 1124 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), 1125 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), 1126 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), 1127 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), 1123 1128 }; 1124 1129 1125 1130 static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = { ··· 1404 1273 }; 1405 1274 1406 1275 static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = { 1407 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), 1408 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), 1409 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4), 1410 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89), 1411 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20), 1412 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13), 1413 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21), 1414 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55), 1415 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a), 1416 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4), 1417 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30), 1418 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c), 1419 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b), 1420 - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10), 1276 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), 1277 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), 1278 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4), 1279 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89), 1280 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20), 1281 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13), 1282 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21), 1283 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55), 1284 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a), 1285 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4), 1286 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30), 1287 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c), 1288 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b), 1289 + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10), 1421 1290 }; 1422 1291 1423 1292 static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = { ··· 1925 1794 .pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl, 1926 1795 .pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl), 1927 1796 1928 - .dp_serdes_tbl = qmp_v6_dp_serdes_tbl, 1929 - .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl), 1930 - .dp_tx_tbl = qmp_v6_dp_tx_tbl, 1931 - .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl), 1797 + .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl, 1798 + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl), 1799 + .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl, 1800 + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl), 1932 1801 1933 - .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr, 1934 - .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr), 1935 - .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr, 1936 - .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr), 1937 - .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2, 1938 - .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2), 1939 - .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3, 1940 - .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3), 1802 + .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr, 1803 + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr), 1804 + .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr, 1805 + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr), 1806 + .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2, 1807 + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2), 1808 + .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3, 1809 + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3), 1941 1810 1942 - .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr, 1943 - .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr, 1811 + .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr, 1812 + .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr, 1944 1813 .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, 1945 1814 .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, 1946 1815 ··· 1953 1822 .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), 1954 1823 .vreg_list = qmp_phy_vreg_l, 1955 1824 .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), 1956 - .regs = qmp_v45_usb3phy_regs_layout, 1825 + .regs = qmp_v6_n4_usb3phy_regs_layout, 1957 1826 }; 1958 1827 1959 1828 static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
+32
drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2023, Linaro Limited 4 + */ 5 + 6 + #ifndef QCOM_PHY_QMP_PCS_V6_N4_H_ 7 + #define QCOM_PHY_QMP_PCS_V6_N4_H_ 8 + 9 + /* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */ 10 + #define QPHY_V6_N4_PCS_SW_RESET 0x000 11 + #define QPHY_V6_N4_PCS_PCS_STATUS1 0x014 12 + #define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040 13 + #define QPHY_V6_N4_PCS_START_CONTROL 0x044 14 + #define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090 15 + #define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4 16 + #define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8 17 + #define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc 18 + #define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8 19 + #define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc 20 + #define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188 21 + #define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 22 + #define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 23 + #define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198 24 + #define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0 25 + #define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0 26 + #define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4 27 + #define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0 28 + #define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc 29 + #define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0 30 + #define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec 31 + 32 + #endif
+13
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
··· 6 6 #ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ 7 7 #define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ 8 8 9 + #define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08 10 + #define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c 11 + #define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14 12 + #define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c 13 + #define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20 9 14 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30 10 15 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34 16 + #define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48 17 + #define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c 18 + #define QSERDES_V6_N4_TX_TX_POL_INV 0x50 19 + #define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54 11 20 #define QSERDES_V6_N4_TX_LANE_MODE_1 0x78 12 21 #define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c 13 22 #define QSERDES_V6_N4_TX_LANE_MODE_3 0x80 23 + #define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac 24 + #define QSERDES_V6_N4_TX_TX_BAND 0xd8 25 + #define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4 26 + #define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0 14 27 15 28 #define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8 16 29 #define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
+2
drivers/phy/qualcomm/phy-qcom-qmp.h
··· 46 46 47 47 #include "phy-qcom-qmp-pcs-v6.h" 48 48 49 + #include "phy-qcom-qmp-pcs-v6-n4.h" 50 + 49 51 #include "phy-qcom-qmp-pcs-v6_20.h" 50 52 51 53 #include "phy-qcom-qmp-pcs-v7.h"
+1 -2
drivers/ptp/ptp_sysfs.c
··· 296 296 if (max < ptp->n_vclocks) 297 297 goto out; 298 298 299 - size = sizeof(int) * max; 300 - vclock_index = kzalloc(size, GFP_KERNEL); 299 + vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL); 301 300 if (!vclock_index) { 302 301 err = -ENOMEM; 303 302 goto out;
+16 -7
drivers/pwm/pwm-stm32.c
··· 321 321 * First we need to find the minimal value for prescaler such that 322 322 * 323 323 * period_ns * clkrate 324 - * ------------------------------ 324 + * ------------------------------ < max_arr + 1 325 325 * NSEC_PER_SEC * (prescaler + 1) 326 326 * 327 - * isn't bigger than max_arr. 327 + * This equation is equivalent to 328 + * 329 + * period_ns * clkrate 330 + * ---------------------------- < prescaler + 1 331 + * NSEC_PER_SEC * (max_arr + 1) 332 + * 333 + * Using integer division and knowing that the right hand side is 334 + * integer, this is further equivalent to 335 + * 336 + * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler 328 337 */ 329 338 330 339 prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), 331 - (u64)NSEC_PER_SEC * priv->max_arr); 332 - if (prescaler > 0) 333 - prescaler -= 1; 334 - 340 + (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1)); 335 341 if (prescaler > MAX_TIM_PSC) 336 342 return -EINVAL; 337 343 338 344 prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), 339 345 (u64)NSEC_PER_SEC * (prescaler + 1)); 346 + if (!prd) 347 + return -EINVAL; 340 348 341 349 /* 342 350 * All channels share the same prescaler and counter so when two ··· 681 673 * .apply() won't overflow. 682 674 */ 683 675 if (clk_get_rate(priv->clk) > 1000000000) 684 - return dev_err_probe(dev, -EINVAL, "Failed to lock clock\n"); 676 + return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n", 677 + clk_get_rate(priv->clk)); 685 678 686 679 chip->ops = &stm32pwm_ops; 687 680
+19 -14
drivers/regulator/axp20x-regulator.c
··· 140 140 141 141 #define AXP717_DCDC1_NUM_VOLTAGES 88 142 142 #define AXP717_DCDC2_NUM_VOLTAGES 107 143 - #define AXP717_DCDC3_NUM_VOLTAGES 104 143 + #define AXP717_DCDC3_NUM_VOLTAGES 103 144 144 #define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0) 145 145 #define AXP717_LDO_V_OUT_MASK GENMASK(4, 0) 146 146 ··· 763 763 REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), 764 764 }; 765 765 766 + /* 767 + * The manual says that the last voltage is 3.4V, encoded as 0b1101011 (107), 768 + * but every other method proves that this is wrong, so it's really 106 that 769 + * programs the final 3.4V. 770 + */ 766 771 static const struct linear_range axp717_dcdc2_ranges[] = { 767 772 REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000), 768 773 REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), 769 - REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000), 774 + REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000), 770 775 }; 771 776 772 777 static const struct linear_range axp717_dcdc3_ranges[] = { ··· 795 790 AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, 796 791 AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK, 797 792 AXP717_DCDC_OUTPUT_CONTROL, BIT(3)), 798 - AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100, 793 + AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100, 799 794 AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK, 800 795 AXP717_LDO0_OUTPUT_CONTROL, BIT(0)), 801 - AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100, 796 + AXP_DESC(AXP717, ALDO2, "aldo2", "aldoin", 500, 3500, 100, 802 797 AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK, 803 798 AXP717_LDO0_OUTPUT_CONTROL, BIT(1)), 804 - AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100, 799 + AXP_DESC(AXP717, ALDO3, "aldo3", "aldoin", 500, 3500, 100, 805 800 AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK, 806 801 AXP717_LDO0_OUTPUT_CONTROL, BIT(2)), 807 - AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100, 802 + AXP_DESC(AXP717, ALDO4, "aldo4", "aldoin", 500, 3500, 100, 808 803 AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK, 809 804 AXP717_LDO0_OUTPUT_CONTROL, BIT(3)), 810 - AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100, 805 + AXP_DESC(AXP717, BLDO1, "bldo1", "bldoin", 500, 3500, 100, 811 806 AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, 812 807 AXP717_LDO0_OUTPUT_CONTROL, BIT(4)), 813 - AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100, 808 + AXP_DESC(AXP717, BLDO2, "bldo2", "bldoin", 500, 3500, 100, 814 809 AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, 815 810 AXP717_LDO0_OUTPUT_CONTROL, BIT(5)), 816 - AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100, 811 + AXP_DESC(AXP717, BLDO3, "bldo3", "bldoin", 500, 3500, 100, 817 812 AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, 818 813 AXP717_LDO0_OUTPUT_CONTROL, BIT(6)), 819 - AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100, 814 + AXP_DESC(AXP717, BLDO4, "bldo4", "bldoin", 500, 3500, 100, 820 815 AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, 821 816 AXP717_LDO0_OUTPUT_CONTROL, BIT(7)), 822 - AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100, 817 + AXP_DESC(AXP717, CLDO1, "cldo1", "cldoin", 500, 3500, 100, 823 818 AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, 824 819 AXP717_LDO1_OUTPUT_CONTROL, BIT(0)), 825 - AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100, 820 + AXP_DESC(AXP717, CLDO2, "cldo2", "cldoin", 500, 3500, 100, 826 821 AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, 827 822 AXP717_LDO1_OUTPUT_CONTROL, BIT(1)), 828 - AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100, 823 + AXP_DESC(AXP717, CLDO3, "cldo3", "cldoin", 500, 3500, 100, 829 824 AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, 830 825 AXP717_LDO1_OUTPUT_CONTROL, BIT(2)), 831 - AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100, 826 + AXP_DESC(AXP717, CLDO4, "cldo4", "cldoin", 500, 3500, 100, 832 827 AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, 833 828 AXP717_LDO1_OUTPUT_CONTROL, BIT(3)), 834 829 AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50,
+1 -1
drivers/regulator/bd71815-regulator.c
··· 256 256 * 10: 2.50mV/usec 10mV 4uS 257 257 * 11: 1.25mV/usec 10mV 8uS 258 258 */ 259 - static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 }; 259 + static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 }; 260 260 261 261 static int bd7181x_led_set_current_limit(struct regulator_dev *rdev, 262 262 int min_uA, int max_uA)
+1
drivers/regulator/core.c
··· 3347 3347 3348 3348 return map ? map : ERR_PTR(-EOPNOTSUPP); 3349 3349 } 3350 + EXPORT_SYMBOL_GPL(regulator_get_regmap); 3350 3351 3351 3352 /** 3352 3353 * regulator_get_hardware_vsel_register - get the HW voltage selector register
+4 -8
drivers/regulator/tps6594-regulator.c
··· 653 653 } 654 654 } 655 655 656 - if (tps->chip_id == LP8764) { 657 - nr_buck = ARRAY_SIZE(buck_regs); 658 - nr_ldo = 0; 659 - nr_types = REGS_INT_NB; 660 - } else if (tps->chip_id == TPS65224) { 656 + if (tps->chip_id == TPS65224) { 661 657 nr_buck = ARRAY_SIZE(tps65224_buck_regs); 662 658 nr_ldo = ARRAY_SIZE(tps65224_ldo_regs); 663 - nr_types = REGS_INT_NB; 659 + nr_types = TPS65224_REGS_INT_NB; 664 660 } else { 665 661 nr_buck = ARRAY_SIZE(buck_regs); 666 - nr_ldo = ARRAY_SIZE(tps6594_ldo_regs); 667 - nr_types = TPS65224_REGS_INT_NB; 662 + nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs); 663 + nr_types = REGS_INT_NB; 668 664 } 669 665 670 666 reg_irq_nb = nr_types * (nr_buck + nr_ldo);
+4
drivers/scsi/sd.c
··· 63 63 #include <scsi/scsi_cmnd.h> 64 64 #include <scsi/scsi_dbg.h> 65 65 #include <scsi/scsi_device.h> 66 + #include <scsi/scsi_devinfo.h> 66 67 #include <scsi/scsi_driver.h> 67 68 #include <scsi/scsi_eh.h> 68 69 #include <scsi/scsi_host.h> ··· 3118 3117 struct scsi_sense_hdr sshdr; 3119 3118 struct scsi_mode_data data; 3120 3119 int res; 3120 + 3121 + if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS) 3122 + return; 3121 3123 3122 3124 res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a, 3123 3125 /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+2 -2
drivers/soc/tegra/fuse/fuse-tegra.c
··· 127 127 128 128 static int tegra_fuse_add_lookups(struct tegra_fuse *fuse) 129 129 { 130 - fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups), 131 - fuse->soc->num_lookups, GFP_KERNEL); 130 + fuse->lookups = kmemdup_array(fuse->soc->lookups, fuse->soc->num_lookups, 131 + sizeof(*fuse->lookups), GFP_KERNEL); 132 132 if (!fuse->lookups) 133 133 return -ENOMEM; 134 134
+3
drivers/soundwire/amd_manager.c
··· 571 571 amd_manager->wake_en_mask = wake_en_mask; 572 572 fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask); 573 573 amd_manager->power_mode_mask = power_mode_mask; 574 + 575 + fwnode_handle_put(link); 576 + 574 577 return 0; 575 578 } 576 579
+5 -1
drivers/soundwire/intel_auxdevice.c
··· 155 155 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY; 156 156 157 157 intel_prop = devm_kzalloc(bus->dev, sizeof(*intel_prop), GFP_KERNEL); 158 - if (!intel_prop) 158 + if (!intel_prop) { 159 + fwnode_handle_put(link); 159 160 return -ENOMEM; 161 + } 160 162 161 163 /* initialize with hardware defaults, in case the properties are not found */ 162 164 intel_prop->doaise = 0x1; ··· 185 183 intel_prop->doais, 186 184 intel_prop->dodse, 187 185 intel_prop->dods); 186 + 187 + fwnode_handle_put(link); 188 188 189 189 return 0; 190 190 }
+24 -6
drivers/soundwire/mipi_disco.c
··· 66 66 prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq, 67 67 sizeof(*prop->clk_freq), 68 68 GFP_KERNEL); 69 - if (!prop->clk_freq) 69 + if (!prop->clk_freq) { 70 + fwnode_handle_put(link); 70 71 return -ENOMEM; 72 + } 71 73 72 74 fwnode_property_read_u32_array(link, 73 75 "mipi-sdw-clock-frequencies-supported", ··· 94 92 prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears, 95 93 sizeof(*prop->clk_gears), 96 94 GFP_KERNEL); 97 - if (!prop->clk_gears) 95 + if (!prop->clk_gears) { 96 + fwnode_handle_put(link); 98 97 return -ENOMEM; 98 + } 99 99 100 100 fwnode_property_read_u32_array(link, 101 101 "mipi-sdw-supported-clock-gears", ··· 119 115 120 116 fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold", 121 117 &prop->err_threshold); 118 + 119 + fwnode_handle_put(link); 122 120 123 121 return 0; 124 122 } ··· 203 197 dpn[i].num_words, 204 198 sizeof(*dpn[i].words), 205 199 GFP_KERNEL); 206 - if (!dpn[i].words) 200 + if (!dpn[i].words) { 201 + fwnode_handle_put(node); 207 202 return -ENOMEM; 203 + } 208 204 209 205 fwnode_property_read_u32_array(node, 210 206 "mipi-sdw-port-wordlength-configs", ··· 244 236 dpn[i].num_channels, 245 237 sizeof(*dpn[i].channels), 246 238 GFP_KERNEL); 247 - if (!dpn[i].channels) 239 + if (!dpn[i].channels) { 240 + fwnode_handle_put(node); 248 241 return -ENOMEM; 242 + } 249 243 250 244 fwnode_property_read_u32_array(node, 251 245 "mipi-sdw-channel-number-list", ··· 261 251 dpn[i].num_ch_combinations, 262 252 sizeof(*dpn[i].ch_combinations), 263 253 GFP_KERNEL); 264 - if (!dpn[i].ch_combinations) 254 + if (!dpn[i].ch_combinations) { 255 + fwnode_handle_put(node); 265 256 return -ENOMEM; 257 + } 266 258 267 259 fwnode_property_read_u32_array(node, 268 260 "mipi-sdw-channel-combination-list", ··· 285 273 &dpn[i].port_encoding); 286 274 287 275 /* TODO: Read audio mode */ 276 + 277 + fwnode_handle_put(node); 288 278 289 279 i++; 290 280 } ··· 362 348 prop->dp0_prop = devm_kzalloc(&slave->dev, 363 349 sizeof(*prop->dp0_prop), 364 350 GFP_KERNEL); 365 - if (!prop->dp0_prop) 351 + if (!prop->dp0_prop) { 352 + fwnode_handle_put(port); 366 353 return -ENOMEM; 354 + } 367 355 368 356 sdw_slave_read_dp0(slave, port, prop->dp0_prop); 357 + 358 + fwnode_handle_put(port); 369 359 } 370 360 371 361 /*
+3 -3
drivers/spi/spi-cs42l43.c
··· 26 26 #include <linux/units.h> 27 27 28 28 #define CS42L43_FIFO_SIZE 16 29 - #define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ) 29 + #define CS42L43_SPI_ROOT_HZ 49152000 30 30 #define CS42L43_SPI_MAX_LENGTH 65532 31 31 32 32 enum cs42l43_spi_cmd { ··· 54 54 55 55 static struct spi_board_info ampl_info = { 56 56 .modalias = "cs35l56", 57 - .max_speed_hz = 20 * HZ_PER_MHZ, 57 + .max_speed_hz = 11 * HZ_PER_MHZ, 58 58 .chip_select = 0, 59 59 .mode = SPI_MODE_0, 60 60 .swnode = &ampl, ··· 62 62 63 63 static struct spi_board_info ampr_info = { 64 64 .modalias = "cs35l56", 65 - .max_speed_hz = 20 * HZ_PER_MHZ, 65 + .max_speed_hz = 11 * HZ_PER_MHZ, 66 66 .chip_select = 1, 67 67 .mode = SPI_MODE_0, 68 68 .swnode = &ampr,
+2 -12
drivers/spi/spi-imx.c
··· 660 660 ctrl |= (spi_imx->target_burst * 8 - 1) 661 661 << MX51_ECSPI_CTRL_BL_OFFSET; 662 662 else { 663 - if (spi_imx->usedma) { 664 - ctrl |= (spi_imx->bits_per_word - 1) 665 - << MX51_ECSPI_CTRL_BL_OFFSET; 666 - } else { 667 - if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) 668 - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) 669 - << MX51_ECSPI_CTRL_BL_OFFSET; 670 - else 671 - ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, 672 - BITS_PER_BYTE) * spi_imx->bits_per_word - 1) 673 - << MX51_ECSPI_CTRL_BL_OFFSET; 674 - } 663 + ctrl |= (spi_imx->bits_per_word - 1) 664 + << MX51_ECSPI_CTRL_BL_OFFSET; 675 665 } 676 666 677 667 /* set clock speed */
+5 -7
drivers/spi/spi-stm32-qspi.c
··· 349 349 350 350 static int stm32_qspi_get_mode(u8 buswidth) 351 351 { 352 - if (buswidth == 4) 352 + if (buswidth >= 4) 353 353 return CCR_BUSWIDTH_4; 354 354 355 355 return buswidth; ··· 653 653 return -EINVAL; 654 654 655 655 mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL); 656 - if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) || 657 - ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) && 658 - gpiod_count(qspi->dev, "cs") == -ENOENT)) { 656 + if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) { 659 657 dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n"); 660 658 dev_err(qspi->dev, "configuration not supported\n"); 661 659 ··· 674 676 qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN; 675 677 676 678 /* 677 - * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL 678 - * are both set in spi->mode and "cs-gpios" properties is found in DT 679 + * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL 680 + * is set in spi->mode and "cs-gpios" properties is found in DT 679 681 */ 680 - if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) { 682 + if (mode) { 681 683 qspi->cr_reg |= CR_DFM; 682 684 dev_dbg(qspi->dev, "Dual flash mode enable"); 683 685 }
+10 -6
drivers/spi/spi.c
··· 689 689 * Make sure that multiple logical CS doesn't map to the same physical CS. 690 690 * For example, spi->chip_select[0] != spi->chip_select[1] and so on. 691 691 */ 692 - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 693 - status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); 694 - if (status) 695 - return status; 692 + if (!spi_controller_is_target(ctlr)) { 693 + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 694 + status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); 695 + if (status) 696 + return status; 697 + } 696 698 } 697 699 698 700 /* Set the bus ID string */ ··· 4158 4156 return -EINVAL; 4159 4157 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 4160 4158 xfer->tx_nbits != SPI_NBITS_DUAL && 4161 - xfer->tx_nbits != SPI_NBITS_QUAD) 4159 + xfer->tx_nbits != SPI_NBITS_QUAD && 4160 + xfer->tx_nbits != SPI_NBITS_OCTAL) 4162 4161 return -EINVAL; 4163 4162 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 4164 4163 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ··· 4174 4171 return -EINVAL; 4175 4172 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 4176 4173 xfer->rx_nbits != SPI_NBITS_DUAL && 4177 - xfer->rx_nbits != SPI_NBITS_QUAD) 4174 + xfer->rx_nbits != SPI_NBITS_QUAD && 4175 + xfer->rx_nbits != SPI_NBITS_OCTAL) 4178 4176 return -EINVAL; 4179 4177 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 4180 4178 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+2 -1
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
··· 150 150 { 151 151 struct proc_thermal_pci *pci_info = devid; 152 152 struct proc_thermal_device *proc_priv; 153 - int ret = IRQ_HANDLED; 153 + int ret = IRQ_NONE; 154 154 u32 status; 155 155 156 156 proc_priv = pci_info->proc_priv; ··· 175 175 /* Disable enable interrupt flag */ 176 176 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); 177 177 pkg_thermal_schedule_work(&pci_info->work); 178 + ret = IRQ_HANDLED; 178 179 } 179 180 180 181 pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
+5 -7
drivers/thermal/mediatek/lvts_thermal.c
··· 769 769 */ 770 770 gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff; 771 771 772 - if (gt && gt < LVTS_GOLDEN_TEMP_MAX) 772 + /* A zero value for gt means that device has invalid efuse data */ 773 + if (!gt) 774 + return -ENODATA; 775 + 776 + if (gt < LVTS_GOLDEN_TEMP_MAX) 773 777 golden_temp = gt; 774 778 775 779 golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset; ··· 1462 1458 }, 1463 1459 VALID_SENSOR_MAP(1, 1, 1, 1), 1464 1460 .offset = 0x0, 1465 - .mode = LVTS_MSR_FILTERED_MODE, 1466 1461 }, 1467 1462 { 1468 1463 .lvts_sensor = { ··· 1472 1469 }, 1473 1470 VALID_SENSOR_MAP(1, 1, 0, 0), 1474 1471 .offset = 0x100, 1475 - .mode = LVTS_MSR_FILTERED_MODE, 1476 1472 } 1477 1473 }; 1478 1474 ··· 1485 1483 }, 1486 1484 VALID_SENSOR_MAP(0, 1, 0, 0), 1487 1485 .offset = 0x0, 1488 - .mode = LVTS_MSR_FILTERED_MODE, 1489 1486 }, 1490 1487 { 1491 1488 .lvts_sensor = { ··· 1497 1496 }, 1498 1497 VALID_SENSOR_MAP(1, 1, 1, 0), 1499 1498 .offset = 0x100, 1500 - .mode = LVTS_MSR_FILTERED_MODE, 1501 1499 }, 1502 1500 { 1503 1501 .lvts_sensor = { ··· 1507 1507 }, 1508 1508 VALID_SENSOR_MAP(1, 1, 0, 0), 1509 1509 .offset = 0x200, 1510 - .mode = LVTS_MSR_FILTERED_MODE, 1511 1510 }, 1512 1511 { 1513 1512 .lvts_sensor = { ··· 1517 1518 }, 1518 1519 VALID_SENSOR_MAP(1, 1, 0, 0), 1519 1520 .offset = 0x300, 1520 - .mode = LVTS_MSR_FILTERED_MODE, 1521 1521 } 1522 1522 }; 1523 1523
+27
drivers/thermal/thermal_core.c
··· 1406 1406 ida_init(&tz->ida); 1407 1407 mutex_init(&tz->lock); 1408 1408 init_completion(&tz->removal); 1409 + init_completion(&tz->resume); 1409 1410 id = ida_alloc(&thermal_tz_ida, GFP_KERNEL); 1410 1411 if (id < 0) { 1411 1412 result = id; ··· 1652 1651 thermal_zone_device_init(tz); 1653 1652 __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); 1654 1653 1654 + complete(&tz->resume); 1655 + tz->resuming = false; 1656 + 1655 1657 mutex_unlock(&tz->lock); 1656 1658 } 1657 1659 ··· 1672 1668 list_for_each_entry(tz, &thermal_tz_list, node) { 1673 1669 mutex_lock(&tz->lock); 1674 1670 1671 + if (tz->resuming) { 1672 + /* 1673 + * thermal_zone_device_resume() queued up for 1674 + * this zone has not acquired the lock yet, so 1675 + * release it to let the function run and wait 1676 + * util it has done the work. 1677 + */ 1678 + mutex_unlock(&tz->lock); 1679 + 1680 + wait_for_completion(&tz->resume); 1681 + 1682 + mutex_lock(&tz->lock); 1683 + } 1684 + 1675 1685 tz->suspended = true; 1676 1686 1677 1687 mutex_unlock(&tz->lock); ··· 1702 1684 mutex_lock(&tz->lock); 1703 1685 1704 1686 cancel_delayed_work(&tz->poll_queue); 1687 + 1688 + reinit_completion(&tz->resume); 1689 + tz->resuming = true; 1705 1690 1706 1691 /* 1707 1692 * Replace the work function with the resume one, which ··· 1730 1709 1731 1710 static struct notifier_block thermal_pm_nb = { 1732 1711 .notifier_call = thermal_pm_notify, 1712 + /* 1713 + * Run at the lowest priority to avoid interference between the thermal 1714 + * zone resume work items spawned by thermal_pm_notify() and the other 1715 + * PM notifiers. 1716 + */ 1717 + .priority = INT_MIN, 1733 1718 }; 1734 1719 1735 1720 static int __init thermal_init(void)
+4
drivers/thermal/thermal_core.h
··· 55 55 * @type: the thermal zone device type 56 56 * @device: &struct device for this thermal zone 57 57 * @removal: removal completion 58 + * @resume: resume completion 58 59 * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature 59 60 * @trip_type_attrs: attributes for trip points for sysfs: trip type 60 61 * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis ··· 90 89 * @poll_queue: delayed work for polling 91 90 * @notify_event: Last notification event 92 91 * @suspended: thermal zone suspend indicator 92 + * @resuming: indicates whether or not thermal zone resume is in progress 93 93 * @trips: array of struct thermal_trip objects 94 94 */ 95 95 struct thermal_zone_device { ··· 98 96 char type[THERMAL_NAME_LENGTH]; 99 97 struct device device; 100 98 struct completion removal; 99 + struct completion resume; 101 100 struct attribute_group trips_attribute_group; 102 101 struct thermal_attr *trip_temp_attrs; 103 102 struct thermal_attr *trip_type_attrs; ··· 126 123 struct delayed_work poll_queue; 127 124 enum thermal_notify_event notify_event; 128 125 bool suspended; 126 + bool resuming; 129 127 #ifdef CONFIG_THERMAL_DEBUGFS 130 128 struct thermal_debugfs *debugfs; 131 129 #endif
+1
drivers/ufs/core/ufshcd.c
··· 8787 8787 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { 8788 8788 /* Reset the device and controller before doing reinit */ 8789 8789 ufshcd_device_reset(hba); 8790 + ufs_put_device_desc(hba); 8790 8791 ufshcd_hba_stop(hba); 8791 8792 ufshcd_vops_reinit_notify(hba); 8792 8793 ret = ufshcd_hba_enable(hba);
+6
drivers/usb/storage/scsiglue.c
··· 79 79 if (us->protocol == USB_PR_BULK && us->max_lun > 0) 80 80 sdev->sdev_bflags |= BLIST_FORCELUN; 81 81 82 + /* 83 + * Some USB storage devices reset if the IO advice hints grouping mode 84 + * page is queried. Hence skip that mode page. 85 + */ 86 + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; 87 + 82 88 return 0; 83 89 } 84 90
+7
drivers/usb/storage/uas.c
··· 21 21 #include <scsi/scsi.h> 22 22 #include <scsi/scsi_eh.h> 23 23 #include <scsi/scsi_dbg.h> 24 + #include <scsi/scsi_devinfo.h> 24 25 #include <scsi/scsi_cmnd.h> 25 26 #include <scsi/scsi_device.h> 26 27 #include <scsi/scsi_host.h> ··· 820 819 { 821 820 struct uas_dev_info *devinfo = 822 821 (struct uas_dev_info *)sdev->host->hostdata; 822 + 823 + /* 824 + * Some USB storage devices reset if the IO advice hints grouping mode 825 + * page is queried. Hence skip that mode page. 826 + */ 827 + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; 823 828 824 829 sdev->hostdata = devinfo; 825 830 return 0;
+61 -15
fs/bcachefs/alloc_background.c
··· 259 259 "invalid data type (got %u should be %u)", 260 260 a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 261 261 262 + for (unsigned i = 0; i < 2; i++) 263 + bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX, 264 + c, err, 265 + alloc_key_io_time_bad, 266 + "invalid io_time[%s]: %llu, max %llu", 267 + i == READ ? "read" : "write", 268 + a.v->io_time[i], LRU_TIME_MAX); 269 + 262 270 switch (a.v->data_type) { 263 271 case BCH_DATA_free: 264 272 case BCH_DATA_need_gc_gens: ··· 765 757 alloc_data_type_set(new_a, new_a->data_type); 766 758 767 759 if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) { 768 - new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); 769 - new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); 760 + new_a->io_time[READ] = bch2_current_io_time(c, READ); 761 + new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); 770 762 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); 771 763 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true); 772 764 } ··· 776 768 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { 777 769 new_a->gen++; 778 770 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); 771 + alloc_data_type_set(new_a, new_a->data_type); 779 772 } 780 773 781 774 if (old_a->data_type != new_a->data_type || ··· 790 781 791 782 if (new_a->data_type == BCH_DATA_cached && 792 783 !new_a->io_time[READ]) 793 - new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); 784 + new_a->io_time[READ] = bch2_current_io_time(c, READ); 794 785 795 786 u64 old_lru = alloc_lru_idx_read(*old_a); 796 787 u64 new_lru = alloc_lru_idx_read(*new_a); ··· 891 882 closure_wake_up(&c->freelist_wait); 892 883 893 884 if (statechange(a->data_type == BCH_DATA_need_discard) && 894 - !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && 885 + !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && 895 886 bucket_flushed(new_a)) 896 887 bch2_discard_one_bucket_fast(c, new.k->p); 897 888 ··· 1588 1579 if (ret) 1589 1580 goto err; 1590 1581 1591 - a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now); 1582 + a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); 1592 1583 ret = bch2_trans_update(trans, alloc_iter, 1593 1584 &a_mut->k_i, BTREE_TRIGGER_norun); 1594 1585 if (ret) ··· 1643 1634 mutex_lock(&c->discard_buckets_in_flight_lock); 1644 1635 darray_for_each(c->discard_buckets_in_flight, i) 1645 1636 if (bkey_eq(*i, bucket)) { 1646 - ret = -EEXIST; 1637 + ret = -BCH_ERR_EEXIST_discard_in_flight_add; 1647 1638 goto out; 1648 1639 } 1649 1640 ··· 1797 1788 } 1798 1789 1799 1790 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); 1800 - alloc_data_type_set(&a->v, a->v.data_type); 1801 1791 write: 1792 + alloc_data_type_set(&a->v, a->v.data_type); 1793 + 1802 1794 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: 1803 1795 bch2_trans_commit(trans, NULL, NULL, 1804 1796 BCH_WATERMARK_btree| ··· 1985 1975 a->v.data_type = 0; 1986 1976 a->v.dirty_sectors = 0; 1987 1977 a->v.cached_sectors = 0; 1988 - a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now); 1989 - a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now); 1978 + a->v.io_time[READ] = bch2_current_io_time(c, READ); 1979 + a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE); 1990 1980 1991 1981 ret = bch2_trans_commit(trans, NULL, NULL, 1992 1982 BCH_WATERMARK_btree| ··· 2021 2011 goto out; 2022 2012 } 2023 2013 2014 + static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, 2015 + struct bch_dev *ca, bool *wrapped) 2016 + { 2017 + struct bkey_s_c k; 2018 + again: 2019 + k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); 2020 + if (!k.k && !*wrapped) { 2021 + bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); 2022 + *wrapped = true; 2023 + goto again; 2024 + } 2025 + 2026 + return k; 2027 + } 2028 + 2024 2029 static void bch2_do_invalidates_work(struct work_struct *work) 2025 2030 { 2026 2031 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work); ··· 2049 2024 for_each_member_device(c, ca) { 2050 2025 s64 nr_to_invalidate = 2051 2026 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); 2027 + struct btree_iter iter; 2028 + bool wrapped = false; 2052 2029 2053 - ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, 2054 - lru_pos(ca->dev_idx, 0, 0), 2055 - lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX), 2056 - BTREE_ITER_intent, k, 2057 - invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate)); 2030 + bch2_trans_iter_init(trans, &iter, BTREE_ID_lru, 2031 + lru_pos(ca->dev_idx, 0, 2032 + ((bch2_current_io_time(c, READ) + U32_MAX) & 2033 + LRU_TIME_MAX)), 0); 2034 + 2035 + while (true) { 2036 + bch2_trans_begin(trans); 2037 + 2038 + struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); 2039 + ret = bkey_err(k); 2040 + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2041 + continue; 2042 + if (ret) 2043 + break; 2044 + if (!k.k) 2045 + break; 2046 + 2047 + ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate); 2048 + if (ret) 2049 + break; 2050 + 2051 + bch2_btree_iter_advance(&iter); 2052 + } 2053 + bch2_trans_iter_exit(trans, &iter); 2058 2054 2059 2055 if (ret < 0) { 2060 2056 bch2_dev_put(ca); ··· 2250 2204 if (ret) 2251 2205 return ret; 2252 2206 2253 - now = atomic64_read(&c->io_clock[rw].now); 2207 + now = bch2_current_io_time(c, rw); 2254 2208 if (a->v.io_time[rw] == now) 2255 2209 goto out; 2256 2210
+7 -1
fs/bcachefs/alloc_background.h
··· 141 141 !bch2_bucket_sectors_fragmented(ca, a)) 142 142 return 0; 143 143 144 - u64 d = bch2_bucket_sectors_dirty(a); 144 + /* 145 + * avoid overflowing LRU_TIME_BITS on a corrupted fs, when 146 + * bucket_sectors_dirty is (much) bigger than bucket_size 147 + */ 148 + u64 d = min(bch2_bucket_sectors_dirty(a), 149 + ca->mi.bucket_size); 150 + 145 151 return div_u64(d * (1ULL << 31), ca->mi.bucket_size); 146 152 } 147 153
+5
fs/bcachefs/bcachefs.h
··· 1214 1214 return timespec_to_bch2_time(c, now); 1215 1215 } 1216 1216 1217 + static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw) 1218 + { 1219 + return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX); 1220 + } 1221 + 1217 1222 static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c) 1218 1223 { 1219 1224 struct stdio_redirect *stdio = c->stdio;
+9 -4
fs/bcachefs/bcachefs_format.h
··· 476 476 477 477 #define LRU_ID_STRIPES (1U << 16) 478 478 479 + #define LRU_TIME_BITS 48 480 + #define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1) 481 + 479 482 /* Optional/variable size superblock sections: */ 480 483 481 484 struct bch_sb_field { ··· 990 987 991 988 #define BCH_ERROR_ACTIONS() \ 992 989 x(continue, 0) \ 993 - x(ro, 1) \ 994 - x(panic, 2) 990 + x(fix_safe, 1) \ 991 + x(panic, 2) \ 992 + x(ro, 3) 995 993 996 994 enum bch_error_actions { 997 995 #define x(t, n) BCH_ON_ERROR_##t = n, ··· 1386 1382 1387 1383 /* 1388 1384 * Maximum number of btrees that we will _ever_ have under the current scheme, 1389 - * where we refer to them with bitfields 1385 + * where we refer to them with 64 bit bitfields - and we also need a bit for 1386 + * the interior btree node type: 1390 1387 */ 1391 - #define BTREE_ID_NR_MAX 64 1388 + #define BTREE_ID_NR_MAX 63 1392 1389 1393 1390 static inline bool btree_id_is_alloc(enum btree_id id) 1394 1391 {
+1 -1
fs/bcachefs/bkey.c
··· 1064 1064 { 1065 1065 const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current; 1066 1066 u8 *l = k->key_start; 1067 - u8 *h = (u8 *) (k->_data + f->key_u64s) - 1; 1067 + u8 *h = (u8 *) ((u64 *) k->_data + f->key_u64s) - 1; 1068 1068 1069 1069 while (l < h) { 1070 1070 swap(*l, *h);
+5 -1
fs/bcachefs/bkey_methods.c
··· 398 398 for (i = 0; i < nr_compat; i++) 399 399 switch (!write ? i : nr_compat - 1 - i) { 400 400 case 0: 401 - if (big_endian != CPU_BIG_ENDIAN) 401 + if (big_endian != CPU_BIG_ENDIAN) { 402 402 bch2_bkey_swab_key(f, k); 403 + } else if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { 404 + bch2_bkey_swab_key(f, k); 405 + bch2_bkey_swab_key(f, k); 406 + } 403 407 break; 404 408 case 1: 405 409 if (version < bcachefs_metadata_version_bkey_renumber)
+2 -1
fs/bcachefs/bkey_methods.h
··· 129 129 struct bkey_packed *k) 130 130 { 131 131 if (version < bcachefs_metadata_version_current || 132 - big_endian != CPU_BIG_ENDIAN) 132 + big_endian != CPU_BIG_ENDIAN || 133 + IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) 133 134 __bch2_bkey_compat(level, btree_id, version, 134 135 big_endian, write, f, k); 135 136
+8 -3
fs/bcachefs/btree_iter.c
··· 3161 3161 list_add_done: 3162 3162 seqmutex_unlock(&c->btree_trans_lock); 3163 3163 got_trans: 3164 + trans->ref.closure_get_happened = false; 3164 3165 trans->c = c; 3165 3166 trans->last_begin_time = local_clock(); 3166 3167 trans->fn_idx = fn_idx; ··· 3236 3235 trans_for_each_update(trans, i) 3237 3236 __btree_path_put(trans->paths + i->path, true); 3238 3237 trans->nr_updates = 0; 3239 - trans->locking_wait.task = NULL; 3240 3238 3241 3239 check_btree_paths_leaked(trans); 3242 3240 ··· 3256 3256 if (unlikely(trans->journal_replay_not_finished)) 3257 3257 bch2_journal_keys_put(c); 3258 3258 3259 + /* 3260 + * trans->ref protects trans->locking_wait.task, btree_paths arary; used 3261 + * by cycle detector 3262 + */ 3263 + closure_sync(&trans->ref); 3264 + trans->locking_wait.task = NULL; 3265 + 3259 3266 unsigned long *paths_allocated = trans->paths_allocated; 3260 3267 trans->paths_allocated = NULL; 3261 3268 trans->paths = NULL; ··· 3280 3273 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans); 3281 3274 3282 3275 if (trans) { 3283 - closure_sync(&trans->ref); 3284 - 3285 3276 seqmutex_lock(&c->btree_trans_lock); 3286 3277 list_del(&trans->list); 3287 3278 seqmutex_unlock(&c->btree_trans_lock);
+8 -8
fs/bcachefs/btree_types.h
··· 761 761 762 762 static inline bool btree_node_type_is_extents(enum btree_node_type type) 763 763 { 764 - const unsigned mask = 0 764 + const u64 mask = 0 765 765 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1)) 766 766 BCH_BTREE_IDS() 767 767 #undef x 768 768 ; 769 769 770 - return (1U << type) & mask; 770 + return BIT_ULL(type) & mask; 771 771 } 772 772 773 773 static inline bool btree_id_is_extents(enum btree_id btree) ··· 777 777 778 778 static inline bool btree_type_has_snapshots(enum btree_id id) 779 779 { 780 - const unsigned mask = 0 780 + const u64 mask = 0 781 781 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr) 782 782 BCH_BTREE_IDS() 783 783 #undef x 784 784 ; 785 785 786 - return (1U << id) & mask; 786 + return BIT_ULL(id) & mask; 787 787 } 788 788 789 789 static inline bool btree_type_has_snapshot_field(enum btree_id id) 790 790 { 791 - const unsigned mask = 0 791 + const u64 mask = 0 792 792 #define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr) 793 793 BCH_BTREE_IDS() 794 794 #undef x 795 795 ; 796 796 797 - return (1U << id) & mask; 797 + return BIT_ULL(id) & mask; 798 798 } 799 799 800 800 static inline bool btree_type_has_ptrs(enum btree_id id) 801 801 { 802 - const unsigned mask = 0 802 + const u64 mask = 0 803 803 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr) 804 804 BCH_BTREE_IDS() 805 805 #undef x 806 806 ; 807 807 808 - return (1U << id) & mask; 808 + return BIT_ULL(id) & mask; 809 809 } 810 810 811 811 struct btree_root {
+3
fs/bcachefs/errcode.h
··· 116 116 x(ENOENT, ENOENT_dev_idx_not_found) \ 117 117 x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \ 118 118 x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \ 119 + x(EEXIST, EEXIST_str_hash_set) \ 120 + x(EEXIST, EEXIST_discard_in_flight_add) \ 121 + x(EEXIST, EEXIST_subvolume_create) \ 119 122 x(0, open_buckets_empty) \ 120 123 x(0, freelist_empty) \ 121 124 x(BCH_ERR_freelist_empty, no_buckets_found) \
+18 -1
fs/bcachefs/error.c
··· 15 15 switch (c->opts.errors) { 16 16 case BCH_ON_ERROR_continue: 17 17 return false; 18 + case BCH_ON_ERROR_fix_safe: 18 19 case BCH_ON_ERROR_ro: 19 20 if (bch2_fs_emergency_read_only(c)) 20 21 bch_err(c, "inconsistency detected - emergency read only at journal seq %llu", ··· 192 191 prt_str(out, "ing"); 193 192 } 194 193 194 + static const u8 fsck_flags_extra[] = { 195 + #define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags, 196 + BCH_SB_ERRS() 197 + #undef x 198 + }; 199 + 195 200 int bch2_fsck_err(struct bch_fs *c, 196 201 enum bch_fsck_flags flags, 197 202 enum bch_sb_error_id err, ··· 209 202 struct printbuf buf = PRINTBUF, *out = &buf; 210 203 int ret = -BCH_ERR_fsck_ignore; 211 204 const char *action_orig = "fix?", *action = action_orig; 205 + 206 + if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) 207 + flags |= fsck_flags_extra[err]; 212 208 213 209 if ((flags & FSCK_CAN_FIX) && 214 210 test_bit(err, c->sb.errors_silent)) ··· 275 265 prt_printf(out, bch2_log_msg(c, "")); 276 266 #endif 277 267 278 - if (!test_bit(BCH_FS_fsck_running, &c->flags)) { 268 + if ((flags & FSCK_CAN_FIX) && 269 + (flags & FSCK_AUTOFIX) && 270 + (c->opts.errors == BCH_ON_ERROR_continue || 271 + c->opts.errors == BCH_ON_ERROR_fix_safe)) { 272 + prt_str(out, ", "); 273 + prt_actioning(out, action); 274 + ret = -BCH_ERR_fsck_fix; 275 + } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { 279 276 if (c->opts.errors != BCH_ON_ERROR_continue || 280 277 !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { 281 278 prt_str(out, ", shutting down");
-7
fs/bcachefs/error.h
··· 108 108 char *last_msg; 109 109 }; 110 110 111 - enum bch_fsck_flags { 112 - FSCK_CAN_FIX = 1 << 0, 113 - FSCK_CAN_IGNORE = 1 << 1, 114 - FSCK_NEED_FSCK = 1 << 2, 115 - FSCK_NO_RATELIMIT = 1 << 3, 116 - }; 117 - 118 111 #define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err) 119 112 120 113 __printf(4, 5) __cold
+1 -1
fs/bcachefs/fs-ioctl.c
··· 373 373 } 374 374 375 375 if (dst_dentry->d_inode) { 376 - error = -EEXIST; 376 + error = -BCH_ERR_EEXIST_subvolume_create; 377 377 goto err3; 378 378 } 379 379
+14 -7
fs/bcachefs/fs.c
··· 188 188 BUG_ON(!old); 189 189 190 190 if (unlikely(old != inode)) { 191 + /* 192 + * bcachefs doesn't use I_NEW; we have no use for it since we 193 + * only insert fully created inodes in the inode hash table. But 194 + * discard_new_inode() expects it to be set... 195 + */ 196 + inode->v.i_flags |= I_NEW; 191 197 discard_new_inode(&inode->v); 192 198 inode = old; 193 199 } else { ··· 201 195 list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); 202 196 mutex_unlock(&c->vfs_inodes_lock); 203 197 /* 204 - * we really don't want insert_inode_locked2() to be setting 205 - * I_NEW... 198 + * Again, I_NEW makes no sense for bcachefs. This is only needed 199 + * for clearing I_NEW, but since the inode was already fully 200 + * created and initialized we didn't actually want 201 + * inode_insert5() to set it for us. 206 202 */ 207 203 unlock_new_inode(&inode->v); 208 204 } ··· 1165 1157 .read_iter = bch2_read_iter, 1166 1158 .write_iter = bch2_write_iter, 1167 1159 .mmap = bch2_mmap, 1160 + .get_unmapped_area = thp_get_unmapped_area, 1168 1161 .fsync = bch2_fsync, 1169 1162 .splice_read = filemap_splice_read, 1170 1163 .splice_write = iter_file_splice_write, ··· 1497 1488 bch2_iget5_set(&inode->v, &inum); 1498 1489 bch2_inode_update_after_write(trans, inode, bi, ~0); 1499 1490 1500 - if (BCH_SUBVOLUME_SNAP(subvol)) 1501 - set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1502 - else 1503 - clear_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1504 - 1505 1491 inode->v.i_blocks = bi->bi_sectors; 1506 1492 inode->v.i_ino = bi->bi_inum; 1507 1493 inode->v.i_rdev = bi->bi_dev; ··· 1507 1503 inode->ei_quota_reserved = 0; 1508 1504 inode->ei_qid = bch_qid(bi); 1509 1505 inode->ei_subvol = inum.subvol; 1506 + 1507 + if (BCH_SUBVOLUME_SNAP(subvol)) 1508 + set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1510 1509 1511 1510 inode->v.i_mapping->a_ops = &bch_address_space_operations; 1512 1511
+3
fs/bcachefs/journal.c
··· 1167 1167 1168 1168 void bch2_fs_journal_stop(struct journal *j) 1169 1169 { 1170 + if (!test_bit(JOURNAL_running, &j->flags)) 1171 + return; 1172 + 1170 1173 bch2_journal_reclaim_stop(j); 1171 1174 bch2_journal_flush_all_pins(j); 1172 1175
+8 -5
fs/bcachefs/journal_io.c
··· 1967 1967 struct journal *j = container_of(w, struct journal, buf[w->idx]); 1968 1968 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1969 1969 struct bch_replicas_padded replicas; 1970 - struct printbuf journal_debug_buf = PRINTBUF; 1971 1970 unsigned nr_rw_members = 0; 1972 1971 int ret; 1973 1972 ··· 2010 2011 } 2011 2012 2012 2013 if (ret) { 2013 - __bch2_journal_debug_to_text(&journal_debug_buf, j); 2014 + struct printbuf buf = PRINTBUF; 2015 + buf.atomic++; 2016 + 2017 + prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write: %s"), 2018 + bch2_err_str(ret)); 2019 + __bch2_journal_debug_to_text(&buf, j); 2014 2020 spin_unlock(&j->lock); 2015 - bch_err(c, "Unable to allocate journal write:\n%s", 2016 - journal_debug_buf.buf); 2017 - printbuf_exit(&journal_debug_buf); 2021 + bch2_print_string_as_lines(KERN_ERR, buf.buf); 2022 + printbuf_exit(&buf); 2018 2023 goto err; 2019 2024 } 2020 2025
-3
fs/bcachefs/lru.h
··· 2 2 #ifndef _BCACHEFS_LRU_H 3 3 #define _BCACHEFS_LRU_H 4 4 5 - #define LRU_TIME_BITS 48 6 - #define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1) 7 - 8 5 static inline u64 lru_pos_id(struct bpos pos) 9 6 { 10 7 return pos.inode >> LRU_TIME_BITS;
+1 -1
fs/bcachefs/opts.h
··· 137 137 x(errors, u8, \ 138 138 OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ 139 139 OPT_STR(bch2_error_actions), \ 140 - BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \ 140 + BCH_SB_ERROR_ACTION, BCH_ON_ERROR_fix_safe, \ 141 141 NULL, "Action to take on filesystem error") \ 142 142 x(metadata_replicas, u8, \ 143 143 OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+9 -3
fs/bcachefs/recovery.c
··· 326 326 case BCH_JSET_ENTRY_btree_root: { 327 327 struct btree_root *r; 328 328 329 + if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX, 330 + c, invalid_btree_id, 331 + "invalid btree id %u (max %u)", 332 + entry->btree_id, BTREE_ID_NR_MAX)) 333 + return 0; 334 + 329 335 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 330 336 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 331 337 if (ret) ··· 421 415 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 422 416 } 423 417 } 424 - 418 + fsck_err: 425 419 return ret; 426 420 } 427 421 ··· 664 658 if (check_version_upgrade(c)) 665 659 write_sb = true; 666 660 661 + c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 662 + 667 663 if (write_sb) 668 664 bch2_write_super(c); 669 - 670 - c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 671 665 mutex_unlock(&c->sb_lock); 672 666 673 667 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
+1 -1
fs/bcachefs/sb-downgrade.c
··· 228 228 229 229 dst = (void *) &darray_top(table); 230 230 dst->version = cpu_to_le16(src->version); 231 - dst->recovery_passes[0] = cpu_to_le64(src->recovery_passes); 231 + dst->recovery_passes[0] = cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes)); 232 232 dst->recovery_passes[1] = 0; 233 233 dst->nr_errors = cpu_to_le16(src->nr_errors); 234 234 for (unsigned i = 0; i < src->nr_errors; i++)
+286 -273
fs/bcachefs/sb-errors_format.h
··· 2 2 #ifndef _BCACHEFS_SB_ERRORS_FORMAT_H 3 3 #define _BCACHEFS_SB_ERRORS_FORMAT_H 4 4 5 - #define BCH_SB_ERRS() \ 6 - x(clean_but_journal_not_empty, 0) \ 7 - x(dirty_but_no_journal_entries, 1) \ 8 - x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \ 9 - x(sb_clean_journal_seq_mismatch, 3) \ 10 - x(sb_clean_btree_root_mismatch, 4) \ 11 - x(sb_clean_missing, 5) \ 12 - x(jset_unsupported_version, 6) \ 13 - x(jset_unknown_csum, 7) \ 14 - x(jset_last_seq_newer_than_seq, 8) \ 15 - x(jset_past_bucket_end, 9) \ 16 - x(jset_seq_blacklisted, 10) \ 17 - x(journal_entries_missing, 11) \ 18 - x(journal_entry_replicas_not_marked, 12) \ 19 - x(journal_entry_past_jset_end, 13) \ 20 - x(journal_entry_replicas_data_mismatch, 14) \ 21 - x(journal_entry_bkey_u64s_0, 15) \ 22 - x(journal_entry_bkey_past_end, 16) \ 23 - x(journal_entry_bkey_bad_format, 17) \ 24 - x(journal_entry_bkey_invalid, 18) \ 25 - x(journal_entry_btree_root_bad_size, 19) \ 26 - x(journal_entry_blacklist_bad_size, 20) \ 27 - x(journal_entry_blacklist_v2_bad_size, 21) \ 28 - x(journal_entry_blacklist_v2_start_past_end, 22) \ 29 - x(journal_entry_usage_bad_size, 23) \ 30 - x(journal_entry_data_usage_bad_size, 24) \ 31 - x(journal_entry_clock_bad_size, 25) \ 32 - x(journal_entry_clock_bad_rw, 26) \ 33 - x(journal_entry_dev_usage_bad_size, 27) \ 34 - x(journal_entry_dev_usage_bad_dev, 28) \ 35 - x(journal_entry_dev_usage_bad_pad, 29) \ 36 - x(btree_node_unreadable, 30) \ 37 - x(btree_node_fault_injected, 31) \ 38 - x(btree_node_bad_magic, 32) \ 39 - x(btree_node_bad_seq, 33) \ 40 - x(btree_node_unsupported_version, 34) \ 41 - x(btree_node_bset_older_than_sb_min, 35) \ 42 - x(btree_node_bset_newer_than_sb, 36) \ 43 - x(btree_node_data_missing, 37) \ 44 - x(btree_node_bset_after_end, 38) \ 45 - x(btree_node_replicas_sectors_written_mismatch, 39) \ 46 - x(btree_node_replicas_data_mismatch, 40) \ 47 - x(bset_unknown_csum, 41) \ 48 - x(bset_bad_csum, 42) \ 49 - x(bset_past_end_of_btree_node, 43) \ 50 - x(bset_wrong_sector_offset, 44) \ 51 - x(bset_empty, 45) \ 52 - x(bset_bad_seq, 46) \ 53 - x(bset_blacklisted_journal_seq, 47) \ 54 - x(first_bset_blacklisted_journal_seq, 48) \ 55 - x(btree_node_bad_btree, 49) \ 56 - x(btree_node_bad_level, 50) \ 57 - x(btree_node_bad_min_key, 51) \ 58 - x(btree_node_bad_max_key, 52) \ 59 - x(btree_node_bad_format, 53) \ 60 - x(btree_node_bkey_past_bset_end, 54) \ 61 - x(btree_node_bkey_bad_format, 55) \ 62 - x(btree_node_bad_bkey, 56) \ 63 - x(btree_node_bkey_out_of_order, 57) \ 64 - x(btree_root_bkey_invalid, 58) \ 65 - x(btree_root_read_error, 59) \ 66 - x(btree_root_bad_min_key, 60) \ 67 - x(btree_root_bad_max_key, 61) \ 68 - x(btree_node_read_error, 62) \ 69 - x(btree_node_topology_bad_min_key, 63) \ 70 - x(btree_node_topology_bad_max_key, 64) \ 71 - x(btree_node_topology_overwritten_by_prev_node, 65) \ 72 - x(btree_node_topology_overwritten_by_next_node, 66) \ 73 - x(btree_node_topology_interior_node_empty, 67) \ 74 - x(fs_usage_hidden_wrong, 68) \ 75 - x(fs_usage_btree_wrong, 69) \ 76 - x(fs_usage_data_wrong, 70) \ 77 - x(fs_usage_cached_wrong, 71) \ 78 - x(fs_usage_reserved_wrong, 72) \ 79 - x(fs_usage_persistent_reserved_wrong, 73) \ 80 - x(fs_usage_nr_inodes_wrong, 74) \ 81 - x(fs_usage_replicas_wrong, 75) \ 82 - x(dev_usage_buckets_wrong, 76) \ 83 - x(dev_usage_sectors_wrong, 77) \ 84 - x(dev_usage_fragmented_wrong, 78) \ 85 - x(dev_usage_buckets_ec_wrong, 79) \ 86 - x(bkey_version_in_future, 80) \ 87 - x(bkey_u64s_too_small, 81) \ 88 - x(bkey_invalid_type_for_btree, 82) \ 89 - x(bkey_extent_size_zero, 83) \ 90 - x(bkey_extent_size_greater_than_offset, 84) \ 91 - x(bkey_size_nonzero, 85) \ 92 - x(bkey_snapshot_nonzero, 86) \ 93 - x(bkey_snapshot_zero, 87) \ 94 - x(bkey_at_pos_max, 88) \ 95 - x(bkey_before_start_of_btree_node, 89) \ 96 - x(bkey_after_end_of_btree_node, 90) \ 97 - x(bkey_val_size_nonzero, 91) \ 98 - x(bkey_val_size_too_small, 92) \ 99 - x(alloc_v1_val_size_bad, 93) \ 100 - x(alloc_v2_unpack_error, 94) \ 101 - x(alloc_v3_unpack_error, 95) \ 102 - x(alloc_v4_val_size_bad, 96) \ 103 - x(alloc_v4_backpointers_start_bad, 97) \ 104 - x(alloc_key_data_type_bad, 98) \ 105 - x(alloc_key_empty_but_have_data, 99) \ 106 - x(alloc_key_dirty_sectors_0, 100) \ 107 - x(alloc_key_data_type_inconsistency, 101) \ 108 - x(alloc_key_to_missing_dev_bucket, 102) \ 109 - x(alloc_key_cached_inconsistency, 103) \ 110 - x(alloc_key_cached_but_read_time_zero, 104) \ 111 - x(alloc_key_to_missing_lru_entry, 105) \ 112 - x(alloc_key_data_type_wrong, 106) \ 113 - x(alloc_key_gen_wrong, 107) \ 114 - x(alloc_key_dirty_sectors_wrong, 108) \ 115 - x(alloc_key_cached_sectors_wrong, 109) \ 116 - x(alloc_key_stripe_wrong, 110) \ 117 - x(alloc_key_stripe_redundancy_wrong, 111) \ 118 - x(bucket_sector_count_overflow, 112) \ 119 - x(bucket_metadata_type_mismatch, 113) \ 120 - x(need_discard_key_wrong, 114) \ 121 - x(freespace_key_wrong, 115) \ 122 - x(freespace_hole_missing, 116) \ 123 - x(bucket_gens_val_size_bad, 117) \ 124 - x(bucket_gens_key_wrong, 118) \ 125 - x(bucket_gens_hole_wrong, 119) \ 126 - x(bucket_gens_to_invalid_dev, 120) \ 127 - x(bucket_gens_to_invalid_buckets, 121) \ 128 - x(bucket_gens_nonzero_for_invalid_buckets, 122) \ 129 - x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \ 130 - x(need_discard_freespace_key_bad, 124) \ 131 - x(backpointer_bucket_offset_wrong, 125) \ 132 - x(backpointer_to_missing_device, 126) \ 133 - x(backpointer_to_missing_alloc, 127) \ 134 - x(backpointer_to_missing_ptr, 128) \ 135 - x(lru_entry_at_time_0, 129) \ 136 - x(lru_entry_to_invalid_bucket, 130) \ 137 - x(lru_entry_bad, 131) \ 138 - x(btree_ptr_val_too_big, 132) \ 139 - x(btree_ptr_v2_val_too_big, 133) \ 140 - x(btree_ptr_has_non_ptr, 134) \ 141 - x(extent_ptrs_invalid_entry, 135) \ 142 - x(extent_ptrs_no_ptrs, 136) \ 143 - x(extent_ptrs_too_many_ptrs, 137) \ 144 - x(extent_ptrs_redundant_crc, 138) \ 145 - x(extent_ptrs_redundant_stripe, 139) \ 146 - x(extent_ptrs_unwritten, 140) \ 147 - x(extent_ptrs_written_and_unwritten, 141) \ 148 - x(ptr_to_invalid_device, 142) \ 149 - x(ptr_to_duplicate_device, 143) \ 150 - x(ptr_after_last_bucket, 144) \ 151 - x(ptr_before_first_bucket, 145) \ 152 - x(ptr_spans_multiple_buckets, 146) \ 153 - x(ptr_to_missing_backpointer, 147) \ 154 - x(ptr_to_missing_alloc_key, 148) \ 155 - x(ptr_to_missing_replicas_entry, 149) \ 156 - x(ptr_to_missing_stripe, 150) \ 157 - x(ptr_to_incorrect_stripe, 151) \ 158 - x(ptr_gen_newer_than_bucket_gen, 152) \ 159 - x(ptr_too_stale, 153) \ 160 - x(stale_dirty_ptr, 154) \ 161 - x(ptr_bucket_data_type_mismatch, 155) \ 162 - x(ptr_cached_and_erasure_coded, 156) \ 163 - x(ptr_crc_uncompressed_size_too_small, 157) \ 164 - x(ptr_crc_csum_type_unknown, 158) \ 165 - x(ptr_crc_compression_type_unknown, 159) \ 166 - x(ptr_crc_redundant, 160) \ 167 - x(ptr_crc_uncompressed_size_too_big, 161) \ 168 - x(ptr_crc_nonce_mismatch, 162) \ 169 - x(ptr_stripe_redundant, 163) \ 170 - x(reservation_key_nr_replicas_invalid, 164) \ 171 - x(reflink_v_refcount_wrong, 165) \ 172 - x(reflink_p_to_missing_reflink_v, 166) \ 173 - x(stripe_pos_bad, 167) \ 174 - x(stripe_val_size_bad, 168) \ 175 - x(stripe_sector_count_wrong, 169) \ 176 - x(snapshot_tree_pos_bad, 170) \ 177 - x(snapshot_tree_to_missing_snapshot, 171) \ 178 - x(snapshot_tree_to_missing_subvol, 172) \ 179 - x(snapshot_tree_to_wrong_subvol, 173) \ 180 - x(snapshot_tree_to_snapshot_subvol, 174) \ 181 - x(snapshot_pos_bad, 175) \ 182 - x(snapshot_parent_bad, 176) \ 183 - x(snapshot_children_not_normalized, 177) \ 184 - x(snapshot_child_duplicate, 178) \ 185 - x(snapshot_child_bad, 179) \ 186 - x(snapshot_skiplist_not_normalized, 180) \ 187 - x(snapshot_skiplist_bad, 181) \ 188 - x(snapshot_should_not_have_subvol, 182) \ 189 - x(snapshot_to_bad_snapshot_tree, 183) \ 190 - x(snapshot_bad_depth, 184) \ 191 - x(snapshot_bad_skiplist, 185) \ 192 - x(subvol_pos_bad, 186) \ 193 - x(subvol_not_master_and_not_snapshot, 187) \ 194 - x(subvol_to_missing_root, 188) \ 195 - x(subvol_root_wrong_bi_subvol, 189) \ 196 - x(bkey_in_missing_snapshot, 190) \ 197 - x(inode_pos_inode_nonzero, 191) \ 198 - x(inode_pos_blockdev_range, 192) \ 199 - x(inode_unpack_error, 193) \ 200 - x(inode_str_hash_invalid, 194) \ 201 - x(inode_v3_fields_start_bad, 195) \ 202 - x(inode_snapshot_mismatch, 196) \ 203 - x(inode_unlinked_but_clean, 197) \ 204 - x(inode_unlinked_but_nlink_nonzero, 198) \ 205 - x(inode_checksum_type_invalid, 199) \ 206 - x(inode_compression_type_invalid, 200) \ 207 - x(inode_subvol_root_but_not_dir, 201) \ 208 - x(inode_i_size_dirty_but_clean, 202) \ 209 - x(inode_i_sectors_dirty_but_clean, 203) \ 210 - x(inode_i_sectors_wrong, 204) \ 211 - x(inode_dir_wrong_nlink, 205) \ 212 - x(inode_dir_multiple_links, 206) \ 213 - x(inode_multiple_links_but_nlink_0, 207) \ 214 - x(inode_wrong_backpointer, 208) \ 215 - x(inode_wrong_nlink, 209) \ 216 - x(inode_unreachable, 210) \ 217 - x(deleted_inode_but_clean, 211) \ 218 - x(deleted_inode_missing, 212) \ 219 - x(deleted_inode_is_dir, 213) \ 220 - x(deleted_inode_not_unlinked, 214) \ 221 - x(extent_overlapping, 215) \ 222 - x(extent_in_missing_inode, 216) \ 223 - x(extent_in_non_reg_inode, 217) \ 224 - x(extent_past_end_of_inode, 218) \ 225 - x(dirent_empty_name, 219) \ 226 - x(dirent_val_too_big, 220) \ 227 - x(dirent_name_too_long, 221) \ 228 - x(dirent_name_embedded_nul, 222) \ 229 - x(dirent_name_dot_or_dotdot, 223) \ 230 - x(dirent_name_has_slash, 224) \ 231 - x(dirent_d_type_wrong, 225) \ 232 - x(inode_bi_parent_wrong, 226) \ 233 - x(dirent_in_missing_dir_inode, 227) \ 234 - x(dirent_in_non_dir_inode, 228) \ 235 - x(dirent_to_missing_inode, 229) \ 236 - x(dirent_to_missing_subvol, 230) \ 237 - x(dirent_to_itself, 231) \ 238 - x(quota_type_invalid, 232) \ 239 - x(xattr_val_size_too_small, 233) \ 240 - x(xattr_val_size_too_big, 234) \ 241 - x(xattr_invalid_type, 235) \ 242 - x(xattr_name_invalid_chars, 236) \ 243 - x(xattr_in_missing_inode, 237) \ 244 - x(root_subvol_missing, 238) \ 245 - x(root_dir_missing, 239) \ 246 - x(root_inode_not_dir, 240) \ 247 - x(dir_loop, 241) \ 248 - x(hash_table_key_duplicate, 242) \ 249 - x(hash_table_key_wrong_offset, 243) \ 250 - x(unlinked_inode_not_on_deleted_list, 244) \ 251 - x(reflink_p_front_pad_bad, 245) \ 252 - x(journal_entry_dup_same_device, 246) \ 253 - x(inode_bi_subvol_missing, 247) \ 254 - x(inode_bi_subvol_wrong, 248) \ 255 - x(inode_points_to_missing_dirent, 249) \ 256 - x(inode_points_to_wrong_dirent, 250) \ 257 - x(inode_bi_parent_nonzero, 251) \ 258 - x(dirent_to_missing_parent_subvol, 252) \ 259 - x(dirent_not_visible_in_parent_subvol, 253) \ 260 - x(subvol_fs_path_parent_wrong, 254) \ 261 - x(subvol_root_fs_path_parent_nonzero, 255) \ 262 - x(subvol_children_not_set, 256) \ 263 - x(subvol_children_bad, 257) \ 264 - x(subvol_loop, 258) \ 265 - x(subvol_unreachable, 259) \ 266 - x(btree_node_bkey_bad_u64s, 260) \ 267 - x(btree_node_topology_empty_interior_node, 261) \ 268 - x(btree_ptr_v2_min_key_bad, 262) \ 269 - x(btree_root_unreadable_and_scan_found_nothing, 263) \ 270 - x(snapshot_node_missing, 264) \ 271 - x(dup_backpointer_to_bad_csum_extent, 265) \ 272 - x(btree_bitmap_not_marked, 266) \ 273 - x(sb_clean_entry_overrun, 267) \ 274 - x(btree_ptr_v2_written_0, 268) \ 275 - x(subvol_snapshot_bad, 269) \ 276 - x(subvol_inode_bad, 270) 5 + enum bch_fsck_flags { 6 + FSCK_CAN_FIX = 1 << 0, 7 + FSCK_CAN_IGNORE = 1 << 1, 8 + FSCK_NEED_FSCK = 1 << 2, 9 + FSCK_NO_RATELIMIT = 1 << 3, 10 + FSCK_AUTOFIX = 1 << 4, 11 + }; 12 + 13 + #define BCH_SB_ERRS() \ 14 + x(clean_but_journal_not_empty, 0, 0) \ 15 + x(dirty_but_no_journal_entries, 1, 0) \ 16 + x(dirty_but_no_journal_entries_post_drop_nonflushes, 2, 0) \ 17 + x(sb_clean_journal_seq_mismatch, 3, 0) \ 18 + x(sb_clean_btree_root_mismatch, 4, 0) \ 19 + x(sb_clean_missing, 5, 0) \ 20 + x(jset_unsupported_version, 6, 0) \ 21 + x(jset_unknown_csum, 7, 0) \ 22 + x(jset_last_seq_newer_than_seq, 8, 0) \ 23 + x(jset_past_bucket_end, 9, 0) \ 24 + x(jset_seq_blacklisted, 10, 0) \ 25 + x(journal_entries_missing, 11, 0) \ 26 + x(journal_entry_replicas_not_marked, 12, 0) \ 27 + x(journal_entry_past_jset_end, 13, 0) \ 28 + x(journal_entry_replicas_data_mismatch, 14, 0) \ 29 + x(journal_entry_bkey_u64s_0, 15, 0) \ 30 + x(journal_entry_bkey_past_end, 16, 0) \ 31 + x(journal_entry_bkey_bad_format, 17, 0) \ 32 + x(journal_entry_bkey_invalid, 18, 0) \ 33 + x(journal_entry_btree_root_bad_size, 19, 0) \ 34 + x(journal_entry_blacklist_bad_size, 20, 0) \ 35 + x(journal_entry_blacklist_v2_bad_size, 21, 0) \ 36 + x(journal_entry_blacklist_v2_start_past_end, 22, 0) \ 37 + x(journal_entry_usage_bad_size, 23, 0) \ 38 + x(journal_entry_data_usage_bad_size, 24, 0) \ 39 + x(journal_entry_clock_bad_size, 25, 0) \ 40 + x(journal_entry_clock_bad_rw, 26, 0) \ 41 + x(journal_entry_dev_usage_bad_size, 27, 0) \ 42 + x(journal_entry_dev_usage_bad_dev, 28, 0) \ 43 + x(journal_entry_dev_usage_bad_pad, 29, 0) \ 44 + x(btree_node_unreadable, 30, 0) \ 45 + x(btree_node_fault_injected, 31, 0) \ 46 + x(btree_node_bad_magic, 32, 0) \ 47 + x(btree_node_bad_seq, 33, 0) \ 48 + x(btree_node_unsupported_version, 34, 0) \ 49 + x(btree_node_bset_older_than_sb_min, 35, 0) \ 50 + x(btree_node_bset_newer_than_sb, 36, 0) \ 51 + x(btree_node_data_missing, 37, 0) \ 52 + x(btree_node_bset_after_end, 38, 0) \ 53 + x(btree_node_replicas_sectors_written_mismatch, 39, 0) \ 54 + x(btree_node_replicas_data_mismatch, 40, 0) \ 55 + x(bset_unknown_csum, 41, 0) \ 56 + x(bset_bad_csum, 42, 0) \ 57 + x(bset_past_end_of_btree_node, 43, 0) \ 58 + x(bset_wrong_sector_offset, 44, 0) \ 59 + x(bset_empty, 45, 0) \ 60 + x(bset_bad_seq, 46, 0) \ 61 + x(bset_blacklisted_journal_seq, 47, 0) \ 62 + x(first_bset_blacklisted_journal_seq, 48, 0) \ 63 + x(btree_node_bad_btree, 49, 0) \ 64 + x(btree_node_bad_level, 50, 0) \ 65 + x(btree_node_bad_min_key, 51, 0) \ 66 + x(btree_node_bad_max_key, 52, 0) \ 67 + x(btree_node_bad_format, 53, 0) \ 68 + x(btree_node_bkey_past_bset_end, 54, 0) \ 69 + x(btree_node_bkey_bad_format, 55, 0) \ 70 + x(btree_node_bad_bkey, 56, 0) \ 71 + x(btree_node_bkey_out_of_order, 57, 0) \ 72 + x(btree_root_bkey_invalid, 58, 0) \ 73 + x(btree_root_read_error, 59, 0) \ 74 + x(btree_root_bad_min_key, 60, 0) \ 75 + x(btree_root_bad_max_key, 61, 0) \ 76 + x(btree_node_read_error, 62, 0) \ 77 + x(btree_node_topology_bad_min_key, 63, 0) \ 78 + x(btree_node_topology_bad_max_key, 64, 0) \ 79 + x(btree_node_topology_overwritten_by_prev_node, 65, 0) \ 80 + x(btree_node_topology_overwritten_by_next_node, 66, 0) \ 81 + x(btree_node_topology_interior_node_empty, 67, 0) \ 82 + x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \ 83 + x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \ 84 + x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \ 85 + x(fs_usage_cached_wrong, 71, FSCK_AUTOFIX) \ 86 + x(fs_usage_reserved_wrong, 72, FSCK_AUTOFIX) \ 87 + x(fs_usage_persistent_reserved_wrong, 73, FSCK_AUTOFIX) \ 88 + x(fs_usage_nr_inodes_wrong, 74, FSCK_AUTOFIX) \ 89 + x(fs_usage_replicas_wrong, 75, FSCK_AUTOFIX) \ 90 + x(dev_usage_buckets_wrong, 76, FSCK_AUTOFIX) \ 91 + x(dev_usage_sectors_wrong, 77, FSCK_AUTOFIX) \ 92 + x(dev_usage_fragmented_wrong, 78, FSCK_AUTOFIX) \ 93 + x(dev_usage_buckets_ec_wrong, 79, FSCK_AUTOFIX) \ 94 + x(bkey_version_in_future, 80, 0) \ 95 + x(bkey_u64s_too_small, 81, 0) \ 96 + x(bkey_invalid_type_for_btree, 82, 0) \ 97 + x(bkey_extent_size_zero, 83, 0) \ 98 + x(bkey_extent_size_greater_than_offset, 84, 0) \ 99 + x(bkey_size_nonzero, 85, 0) \ 100 + x(bkey_snapshot_nonzero, 86, 0) \ 101 + x(bkey_snapshot_zero, 87, 0) \ 102 + x(bkey_at_pos_max, 88, 0) \ 103 + x(bkey_before_start_of_btree_node, 89, 0) \ 104 + x(bkey_after_end_of_btree_node, 90, 0) \ 105 + x(bkey_val_size_nonzero, 91, 0) \ 106 + x(bkey_val_size_too_small, 92, 0) \ 107 + x(alloc_v1_val_size_bad, 93, 0) \ 108 + x(alloc_v2_unpack_error, 94, 0) \ 109 + x(alloc_v3_unpack_error, 95, 0) \ 110 + x(alloc_v4_val_size_bad, 96, 0) \ 111 + x(alloc_v4_backpointers_start_bad, 97, 0) \ 112 + x(alloc_key_data_type_bad, 98, 0) \ 113 + x(alloc_key_empty_but_have_data, 99, 0) \ 114 + x(alloc_key_dirty_sectors_0, 100, 0) \ 115 + x(alloc_key_data_type_inconsistency, 101, 0) \ 116 + x(alloc_key_to_missing_dev_bucket, 102, 0) \ 117 + x(alloc_key_cached_inconsistency, 103, 0) \ 118 + x(alloc_key_cached_but_read_time_zero, 104, 0) \ 119 + x(alloc_key_to_missing_lru_entry, 105, 0) \ 120 + x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \ 121 + x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \ 122 + x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \ 123 + x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \ 124 + x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \ 125 + x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \ 126 + x(bucket_sector_count_overflow, 112, 0) \ 127 + x(bucket_metadata_type_mismatch, 113, 0) \ 128 + x(need_discard_key_wrong, 114, 0) \ 129 + x(freespace_key_wrong, 115, 0) \ 130 + x(freespace_hole_missing, 116, 0) \ 131 + x(bucket_gens_val_size_bad, 117, 0) \ 132 + x(bucket_gens_key_wrong, 118, 0) \ 133 + x(bucket_gens_hole_wrong, 119, 0) \ 134 + x(bucket_gens_to_invalid_dev, 120, 0) \ 135 + x(bucket_gens_to_invalid_buckets, 121, 0) \ 136 + x(bucket_gens_nonzero_for_invalid_buckets, 122, 0) \ 137 + x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \ 138 + x(need_discard_freespace_key_bad, 124, 0) \ 139 + x(backpointer_bucket_offset_wrong, 125, 0) \ 140 + x(backpointer_to_missing_device, 126, 0) \ 141 + x(backpointer_to_missing_alloc, 127, 0) \ 142 + x(backpointer_to_missing_ptr, 128, 0) \ 143 + x(lru_entry_at_time_0, 129, 0) \ 144 + x(lru_entry_to_invalid_bucket, 130, 0) \ 145 + x(lru_entry_bad, 131, 0) \ 146 + x(btree_ptr_val_too_big, 132, 0) \ 147 + x(btree_ptr_v2_val_too_big, 133, 0) \ 148 + x(btree_ptr_has_non_ptr, 134, 0) \ 149 + x(extent_ptrs_invalid_entry, 135, 0) \ 150 + x(extent_ptrs_no_ptrs, 136, 0) \ 151 + x(extent_ptrs_too_many_ptrs, 137, 0) \ 152 + x(extent_ptrs_redundant_crc, 138, 0) \ 153 + x(extent_ptrs_redundant_stripe, 139, 0) \ 154 + x(extent_ptrs_unwritten, 140, 0) \ 155 + x(extent_ptrs_written_and_unwritten, 141, 0) \ 156 + x(ptr_to_invalid_device, 142, 0) \ 157 + x(ptr_to_duplicate_device, 143, 0) \ 158 + x(ptr_after_last_bucket, 144, 0) \ 159 + x(ptr_before_first_bucket, 145, 0) \ 160 + x(ptr_spans_multiple_buckets, 146, 0) \ 161 + x(ptr_to_missing_backpointer, 147, 0) \ 162 + x(ptr_to_missing_alloc_key, 148, 0) \ 163 + x(ptr_to_missing_replicas_entry, 149, 0) \ 164 + x(ptr_to_missing_stripe, 150, 0) \ 165 + x(ptr_to_incorrect_stripe, 151, 0) \ 166 + x(ptr_gen_newer_than_bucket_gen, 152, 0) \ 167 + x(ptr_too_stale, 153, 0) \ 168 + x(stale_dirty_ptr, 154, 0) \ 169 + x(ptr_bucket_data_type_mismatch, 155, 0) \ 170 + x(ptr_cached_and_erasure_coded, 156, 0) \ 171 + x(ptr_crc_uncompressed_size_too_small, 157, 0) \ 172 + x(ptr_crc_csum_type_unknown, 158, 0) \ 173 + x(ptr_crc_compression_type_unknown, 159, 0) \ 174 + x(ptr_crc_redundant, 160, 0) \ 175 + x(ptr_crc_uncompressed_size_too_big, 161, 0) \ 176 + x(ptr_crc_nonce_mismatch, 162, 0) \ 177 + x(ptr_stripe_redundant, 163, 0) \ 178 + x(reservation_key_nr_replicas_invalid, 164, 0) \ 179 + x(reflink_v_refcount_wrong, 165, 0) \ 180 + x(reflink_p_to_missing_reflink_v, 166, 0) \ 181 + x(stripe_pos_bad, 167, 0) \ 182 + x(stripe_val_size_bad, 168, 0) \ 183 + x(stripe_sector_count_wrong, 169, 0) \ 184 + x(snapshot_tree_pos_bad, 170, 0) \ 185 + x(snapshot_tree_to_missing_snapshot, 171, 0) \ 186 + x(snapshot_tree_to_missing_subvol, 172, 0) \ 187 + x(snapshot_tree_to_wrong_subvol, 173, 0) \ 188 + x(snapshot_tree_to_snapshot_subvol, 174, 0) \ 189 + x(snapshot_pos_bad, 175, 0) \ 190 + x(snapshot_parent_bad, 176, 0) \ 191 + x(snapshot_children_not_normalized, 177, 0) \ 192 + x(snapshot_child_duplicate, 178, 0) \ 193 + x(snapshot_child_bad, 179, 0) \ 194 + x(snapshot_skiplist_not_normalized, 180, 0) \ 195 + x(snapshot_skiplist_bad, 181, 0) \ 196 + x(snapshot_should_not_have_subvol, 182, 0) \ 197 + x(snapshot_to_bad_snapshot_tree, 183, 0) \ 198 + x(snapshot_bad_depth, 184, 0) \ 199 + x(snapshot_bad_skiplist, 185, 0) \ 200 + x(subvol_pos_bad, 186, 0) \ 201 + x(subvol_not_master_and_not_snapshot, 187, 0) \ 202 + x(subvol_to_missing_root, 188, 0) \ 203 + x(subvol_root_wrong_bi_subvol, 189, 0) \ 204 + x(bkey_in_missing_snapshot, 190, 0) \ 205 + x(inode_pos_inode_nonzero, 191, 0) \ 206 + x(inode_pos_blockdev_range, 192, 0) \ 207 + x(inode_unpack_error, 193, 0) \ 208 + x(inode_str_hash_invalid, 194, 0) \ 209 + x(inode_v3_fields_start_bad, 195, 0) \ 210 + x(inode_snapshot_mismatch, 196, 0) \ 211 + x(inode_unlinked_but_clean, 197, 0) \ 212 + x(inode_unlinked_but_nlink_nonzero, 198, 0) \ 213 + x(inode_checksum_type_invalid, 199, 0) \ 214 + x(inode_compression_type_invalid, 200, 0) \ 215 + x(inode_subvol_root_but_not_dir, 201, 0) \ 216 + x(inode_i_size_dirty_but_clean, 202, 0) \ 217 + x(inode_i_sectors_dirty_but_clean, 203, 0) \ 218 + x(inode_i_sectors_wrong, 204, 0) \ 219 + x(inode_dir_wrong_nlink, 205, 0) \ 220 + x(inode_dir_multiple_links, 206, 0) \ 221 + x(inode_multiple_links_but_nlink_0, 207, 0) \ 222 + x(inode_wrong_backpointer, 208, 0) \ 223 + x(inode_wrong_nlink, 209, 0) \ 224 + x(inode_unreachable, 210, 0) \ 225 + x(deleted_inode_but_clean, 211, 0) \ 226 + x(deleted_inode_missing, 212, 0) \ 227 + x(deleted_inode_is_dir, 213, 0) \ 228 + x(deleted_inode_not_unlinked, 214, 0) \ 229 + x(extent_overlapping, 215, 0) \ 230 + x(extent_in_missing_inode, 216, 0) \ 231 + x(extent_in_non_reg_inode, 217, 0) \ 232 + x(extent_past_end_of_inode, 218, 0) \ 233 + x(dirent_empty_name, 219, 0) \ 234 + x(dirent_val_too_big, 220, 0) \ 235 + x(dirent_name_too_long, 221, 0) \ 236 + x(dirent_name_embedded_nul, 222, 0) \ 237 + x(dirent_name_dot_or_dotdot, 223, 0) \ 238 + x(dirent_name_has_slash, 224, 0) \ 239 + x(dirent_d_type_wrong, 225, 0) \ 240 + x(inode_bi_parent_wrong, 226, 0) \ 241 + x(dirent_in_missing_dir_inode, 227, 0) \ 242 + x(dirent_in_non_dir_inode, 228, 0) \ 243 + x(dirent_to_missing_inode, 229, 0) \ 244 + x(dirent_to_missing_subvol, 230, 0) \ 245 + x(dirent_to_itself, 231, 0) \ 246 + x(quota_type_invalid, 232, 0) \ 247 + x(xattr_val_size_too_small, 233, 0) \ 248 + x(xattr_val_size_too_big, 234, 0) \ 249 + x(xattr_invalid_type, 235, 0) \ 250 + x(xattr_name_invalid_chars, 236, 0) \ 251 + x(xattr_in_missing_inode, 237, 0) \ 252 + x(root_subvol_missing, 238, 0) \ 253 + x(root_dir_missing, 239, 0) \ 254 + x(root_inode_not_dir, 240, 0) \ 255 + x(dir_loop, 241, 0) \ 256 + x(hash_table_key_duplicate, 242, 0) \ 257 + x(hash_table_key_wrong_offset, 243, 0) \ 258 + x(unlinked_inode_not_on_deleted_list, 244, 0) \ 259 + x(reflink_p_front_pad_bad, 245, 0) \ 260 + x(journal_entry_dup_same_device, 246, 0) \ 261 + x(inode_bi_subvol_missing, 247, 0) \ 262 + x(inode_bi_subvol_wrong, 248, 0) \ 263 + x(inode_points_to_missing_dirent, 249, 0) \ 264 + x(inode_points_to_wrong_dirent, 250, 0) \ 265 + x(inode_bi_parent_nonzero, 251, 0) \ 266 + x(dirent_to_missing_parent_subvol, 252, 0) \ 267 + x(dirent_not_visible_in_parent_subvol, 253, 0) \ 268 + x(subvol_fs_path_parent_wrong, 254, 0) \ 269 + x(subvol_root_fs_path_parent_nonzero, 255, 0) \ 270 + x(subvol_children_not_set, 256, 0) \ 271 + x(subvol_children_bad, 257, 0) \ 272 + x(subvol_loop, 258, 0) \ 273 + x(subvol_unreachable, 259, 0) \ 274 + x(btree_node_bkey_bad_u64s, 260, 0) \ 275 + x(btree_node_topology_empty_interior_node, 261, 0) \ 276 + x(btree_ptr_v2_min_key_bad, 262, 0) \ 277 + x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \ 278 + x(snapshot_node_missing, 264, 0) \ 279 + x(dup_backpointer_to_bad_csum_extent, 265, 0) \ 280 + x(btree_bitmap_not_marked, 266, 0) \ 281 + x(sb_clean_entry_overrun, 267, 0) \ 282 + x(btree_ptr_v2_written_0, 268, 0) \ 283 + x(subvol_snapshot_bad, 269, 0) \ 284 + x(subvol_inode_bad, 270, 0) \ 285 + x(alloc_key_stripe_sectors_wrong, 271, 0) \ 286 + x(accounting_mismatch, 272, 0) \ 287 + x(accounting_replicas_not_marked, 273, 0) \ 288 + x(invalid_btree_id, 274, 0) \ 289 + x(alloc_key_io_time_bad, 275, 0) 277 290 278 291 enum bch_sb_error_id { 279 - #define x(t, n) BCH_FSCK_ERR_##t = n, 292 + #define x(t, n, ...) BCH_FSCK_ERR_##t = n, 280 293 BCH_SB_ERRS() 281 294 #undef x 282 295 BCH_SB_ERR_MAX
+2 -7
fs/bcachefs/snapshot.c
··· 1565 1565 if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) 1566 1566 return 0; 1567 1567 1568 - if (!test_bit(BCH_FS_started, &c->flags)) { 1569 - ret = bch2_fs_read_write_early(c); 1570 - bch_err_msg(c, ret, "deleting dead snapshots: error going rw"); 1571 - if (ret) 1572 - return ret; 1573 - } 1574 - 1575 1568 trans = bch2_trans_get(c); 1576 1569 1577 1570 /* ··· 1679 1686 void bch2_delete_dead_snapshots_work(struct work_struct *work) 1680 1687 { 1681 1688 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work); 1689 + 1690 + set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name); 1682 1691 1683 1692 bch2_delete_dead_snapshots(c); 1684 1693 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
+1 -1
fs/bcachefs/str_hash.h
··· 300 300 if (!found && (flags & STR_HASH_must_replace)) { 301 301 ret = -BCH_ERR_ENOENT_str_hash_set_must_replace; 302 302 } else if (found && (flags & STR_HASH_must_create)) { 303 - ret = -EEXIST; 303 + ret = -BCH_ERR_EEXIST_str_hash_set; 304 304 } else { 305 305 if (!found && slot.path) 306 306 swap(iter, slot);
+4 -3
fs/bcachefs/super-io.c
··· 649 649 650 650 bytes = vstruct_bytes(sb->sb); 651 651 652 - if (bytes > 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits)) { 653 - prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %lu)", 654 - bytes, 512UL << sb->sb->layout.sb_max_size_bits); 652 + u64 sb_size = 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits); 653 + if (bytes > sb_size) { 654 + prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %llu)", 655 + bytes, sb_size); 655 656 return -BCH_ERR_invalid_sb_too_big; 656 657 } 657 658
+7 -6
fs/bcachefs/super.c
··· 912 912 bch2_io_clock_init(&c->io_clock[WRITE]) ?: 913 913 bch2_fs_journal_init(&c->journal) ?: 914 914 bch2_fs_replicas_init(c) ?: 915 + bch2_fs_btree_iter_init(c) ?: 915 916 bch2_fs_btree_cache_init(c) ?: 916 917 bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?: 917 - bch2_fs_btree_iter_init(c) ?: 918 918 bch2_fs_btree_interior_update_init(c) ?: 919 919 bch2_fs_buckets_waiting_for_journal_init(c) ?: 920 920 bch2_fs_btree_write_buffer_init(c) ?: ··· 931 931 if (ret) 932 932 goto err; 933 933 934 - for (i = 0; i < c->sb.nr_devices; i++) 935 - if (bch2_member_exists(c->disk_sb.sb, i) && 936 - bch2_dev_alloc(c, i)) { 937 - ret = -EEXIST; 934 + for (i = 0; i < c->sb.nr_devices; i++) { 935 + if (!bch2_member_exists(c->disk_sb.sb, i)) 936 + continue; 937 + ret = bch2_dev_alloc(c, i); 938 + if (ret) 938 939 goto err; 939 - } 940 + } 940 941 941 942 bch2_journal_entry_res_resize(&c->journal, 942 943 &c->btree_root_journal_res,
+3 -1
fs/btrfs/bio.c
··· 741 741 ret = btrfs_bio_csum(bbio); 742 742 if (ret) 743 743 goto fail_put_bio; 744 - } else if (use_append) { 744 + } else if (use_append || 745 + (btrfs_is_zoned(fs_info) && inode && 746 + inode->flags & BTRFS_INODE_NODATASUM)) { 745 747 ret = btrfs_alloc_dummy_sum(bbio); 746 748 if (ret) 747 749 goto fail_put_bio;
+9 -2
fs/btrfs/block-group.c
··· 1785 1785 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1786 1786 struct btrfs_block_group *bg; 1787 1787 struct btrfs_space_info *space_info; 1788 + LIST_HEAD(retry_list); 1788 1789 1789 1790 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1790 1791 return; ··· 1922 1921 } 1923 1922 1924 1923 next: 1925 - if (ret) 1926 - btrfs_mark_bg_to_reclaim(bg); 1924 + if (ret) { 1925 + /* Refcount held by the reclaim_bgs list after splice. */ 1926 + btrfs_get_block_group(bg); 1927 + list_add_tail(&bg->bg_list, &retry_list); 1928 + } 1927 1929 btrfs_put_block_group(bg); 1928 1930 1929 1931 mutex_unlock(&fs_info->reclaim_bgs_lock); ··· 1946 1942 spin_unlock(&fs_info->unused_bgs_lock); 1947 1943 mutex_unlock(&fs_info->reclaim_bgs_lock); 1948 1944 end: 1945 + spin_lock(&fs_info->unused_bgs_lock); 1946 + list_splice_tail(&retry_list, &fs_info->reclaim_bgs); 1947 + spin_unlock(&fs_info->unused_bgs_lock); 1949 1948 btrfs_exclop_finish(fs_info); 1950 1949 sb_end_write(fs_info->sb); 1951 1950 }
-2
fs/nfsd/netlink.c
··· 44 44 static const struct genl_split_ops nfsd_nl_ops[] = { 45 45 { 46 46 .cmd = NFSD_CMD_RPC_STATUS_GET, 47 - .start = nfsd_nl_rpc_status_get_start, 48 47 .dumpit = nfsd_nl_rpc_status_get_dumpit, 49 - .done = nfsd_nl_rpc_status_get_done, 50 48 .flags = GENL_CMD_CAP_DUMP, 51 49 }, 52 50 {
-3
fs/nfsd/netlink.h
··· 15 15 extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1]; 16 16 extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1]; 17 17 18 - int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb); 19 - int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb); 20 - 21 18 int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb, 22 19 struct netlink_callback *cb); 23 20 int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
+11 -37
fs/nfsd/nfsctl.c
··· 1460 1460 1461 1461 unsigned int nfsd_net_id; 1462 1462 1463 - /** 1464 - * nfsd_nl_rpc_status_get_start - Prepare rpc_status_get dumpit 1465 - * @cb: netlink metadata and command arguments 1466 - * 1467 - * Return values: 1468 - * %0: The rpc_status_get command may proceed 1469 - * %-ENODEV: There is no NFSD running in this namespace 1470 - */ 1471 - int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb) 1472 - { 1473 - struct nfsd_net *nn = net_generic(sock_net(cb->skb->sk), nfsd_net_id); 1474 - int ret = -ENODEV; 1475 - 1476 - mutex_lock(&nfsd_mutex); 1477 - if (nn->nfsd_serv) 1478 - ret = 0; 1479 - else 1480 - mutex_unlock(&nfsd_mutex); 1481 - 1482 - return ret; 1483 - } 1484 - 1485 1463 static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, 1486 1464 struct netlink_callback *cb, 1487 1465 struct nfsd_genl_rqstp *rqstp) ··· 1536 1558 int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb, 1537 1559 struct netlink_callback *cb) 1538 1560 { 1539 - struct nfsd_net *nn = net_generic(sock_net(skb->sk), nfsd_net_id); 1540 1561 int i, ret, rqstp_index = 0; 1562 + struct nfsd_net *nn; 1563 + 1564 + mutex_lock(&nfsd_mutex); 1565 + 1566 + nn = net_generic(sock_net(skb->sk), nfsd_net_id); 1567 + if (!nn->nfsd_serv) { 1568 + ret = -ENODEV; 1569 + goto out_unlock; 1570 + } 1541 1571 1542 1572 rcu_read_lock(); 1543 1573 ··· 1622 1636 ret = skb->len; 1623 1637 out: 1624 1638 rcu_read_unlock(); 1625 - 1626 - return ret; 1627 - } 1628 - 1629 - /** 1630 - * nfsd_nl_rpc_status_get_done - rpc_status_get dumpit post-processing 1631 - * @cb: netlink metadata and command arguments 1632 - * 1633 - * Return values: 1634 - * %0: Success 1635 - */ 1636 - int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb) 1637 - { 1639 + out_unlock: 1638 1640 mutex_unlock(&nfsd_mutex); 1639 1641 1640 - return 0; 1642 + return ret; 1641 1643 } 1642 1644 1643 1645 /**
+107 -85
fs/ocfs2/journal.c
··· 479 479 return status; 480 480 } 481 481 482 - 483 - struct ocfs2_triggers { 484 - struct jbd2_buffer_trigger_type ot_triggers; 485 - int ot_offset; 486 - }; 487 - 488 482 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) 489 483 { 490 484 return container_of(triggers, struct ocfs2_triggers, ot_triggers); ··· 542 548 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, 543 549 struct buffer_head *bh) 544 550 { 551 + struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); 552 + 545 553 mlog(ML_ERROR, 546 554 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " 547 555 "bh->b_blocknr = %llu\n", 548 556 (unsigned long)bh, 549 557 (unsigned long long)bh->b_blocknr); 550 558 551 - ocfs2_error(bh->b_assoc_map->host->i_sb, 559 + ocfs2_error(ot->sb, 552 560 "JBD2 has aborted our journal, ocfs2 cannot continue\n"); 553 561 } 554 562 555 - static struct ocfs2_triggers di_triggers = { 556 - .ot_triggers = { 557 - .t_frozen = ocfs2_frozen_trigger, 558 - .t_abort = ocfs2_abort_trigger, 559 - }, 560 - .ot_offset = offsetof(struct ocfs2_dinode, i_check), 561 - }; 563 + static void ocfs2_setup_csum_triggers(struct super_block *sb, 564 + enum ocfs2_journal_trigger_type type, 565 + struct ocfs2_triggers *ot) 566 + { 567 + BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT); 562 568 563 - static struct ocfs2_triggers eb_triggers = { 564 - .ot_triggers = { 565 - .t_frozen = ocfs2_frozen_trigger, 566 - .t_abort = ocfs2_abort_trigger, 567 - }, 568 - .ot_offset = offsetof(struct ocfs2_extent_block, h_check), 569 - }; 569 + switch (type) { 570 + case OCFS2_JTR_DI: 571 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 572 + ot->ot_offset = offsetof(struct ocfs2_dinode, i_check); 573 + break; 574 + case OCFS2_JTR_EB: 575 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 576 + ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check); 577 + break; 578 + case OCFS2_JTR_RB: 579 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 580 + ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check); 581 + break; 582 + case OCFS2_JTR_GD: 583 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 584 + ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check); 585 + break; 586 + case OCFS2_JTR_DB: 587 + ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger; 588 + break; 589 + case OCFS2_JTR_XB: 590 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 591 + ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check); 592 + break; 593 + case OCFS2_JTR_DQ: 594 + ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger; 595 + break; 596 + case OCFS2_JTR_DR: 597 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 598 + ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check); 599 + break; 600 + case OCFS2_JTR_DL: 601 + ot->ot_triggers.t_frozen = ocfs2_frozen_trigger; 602 + ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check); 603 + break; 604 + case OCFS2_JTR_NONE: 605 + /* To make compiler happy... */ 606 + return; 607 + } 570 608 571 - static struct ocfs2_triggers rb_triggers = { 572 - .ot_triggers = { 573 - .t_frozen = ocfs2_frozen_trigger, 574 - .t_abort = ocfs2_abort_trigger, 575 - }, 576 - .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), 577 - }; 609 + ot->ot_triggers.t_abort = ocfs2_abort_trigger; 610 + ot->sb = sb; 611 + } 578 612 579 - static struct ocfs2_triggers gd_triggers = { 580 - .ot_triggers = { 581 - .t_frozen = ocfs2_frozen_trigger, 582 - .t_abort = ocfs2_abort_trigger, 583 - }, 584 - .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), 585 - }; 613 + void ocfs2_initialize_journal_triggers(struct super_block *sb, 614 + struct ocfs2_triggers triggers[]) 615 + { 616 + enum ocfs2_journal_trigger_type type; 586 617 587 - static struct ocfs2_triggers db_triggers = { 588 - .ot_triggers = { 589 - .t_frozen = ocfs2_db_frozen_trigger, 590 - .t_abort = ocfs2_abort_trigger, 591 - }, 592 - }; 593 - 594 - static struct ocfs2_triggers xb_triggers = { 595 - .ot_triggers = { 596 - .t_frozen = ocfs2_frozen_trigger, 597 - .t_abort = ocfs2_abort_trigger, 598 - }, 599 - .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), 600 - }; 601 - 602 - static struct ocfs2_triggers dq_triggers = { 603 - .ot_triggers = { 604 - .t_frozen = ocfs2_dq_frozen_trigger, 605 - .t_abort = ocfs2_abort_trigger, 606 - }, 607 - }; 608 - 609 - static struct ocfs2_triggers dr_triggers = { 610 - .ot_triggers = { 611 - .t_frozen = ocfs2_frozen_trigger, 612 - .t_abort = ocfs2_abort_trigger, 613 - }, 614 - .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), 615 - }; 616 - 617 - static struct ocfs2_triggers dl_triggers = { 618 - .ot_triggers = { 619 - .t_frozen = ocfs2_frozen_trigger, 620 - .t_abort = ocfs2_abort_trigger, 621 - }, 622 - .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), 623 - }; 618 + for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++) 619 + ocfs2_setup_csum_triggers(sb, type, &triggers[type]); 620 + } 624 621 625 622 static int __ocfs2_journal_access(handle_t *handle, 626 623 struct ocfs2_caching_info *ci, ··· 693 708 int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, 694 709 struct buffer_head *bh, int type) 695 710 { 696 - return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type); 711 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 712 + 713 + return __ocfs2_journal_access(handle, ci, bh, 714 + &osb->s_journal_triggers[OCFS2_JTR_DI], 715 + type); 697 716 } 698 717 699 718 int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, 700 719 struct buffer_head *bh, int type) 701 720 { 702 - return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type); 721 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 722 + 723 + return __ocfs2_journal_access(handle, ci, bh, 724 + &osb->s_journal_triggers[OCFS2_JTR_EB], 725 + type); 703 726 } 704 727 705 728 int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, 706 729 struct buffer_head *bh, int type) 707 730 { 708 - return __ocfs2_journal_access(handle, ci, bh, &rb_triggers, 731 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 732 + 733 + return __ocfs2_journal_access(handle, ci, bh, 734 + &osb->s_journal_triggers[OCFS2_JTR_RB], 709 735 type); 710 736 } 711 737 712 738 int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, 713 739 struct buffer_head *bh, int type) 714 740 { 715 - return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type); 741 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 742 + 743 + return __ocfs2_journal_access(handle, ci, bh, 744 + &osb->s_journal_triggers[OCFS2_JTR_GD], 745 + type); 716 746 } 717 747 718 748 int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, 719 749 struct buffer_head *bh, int type) 720 750 { 721 - return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type); 751 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 752 + 753 + return __ocfs2_journal_access(handle, ci, bh, 754 + &osb->s_journal_triggers[OCFS2_JTR_DB], 755 + type); 722 756 } 723 757 724 758 int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, 725 759 struct buffer_head *bh, int type) 726 760 { 727 - return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type); 761 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 762 + 763 + return __ocfs2_journal_access(handle, ci, bh, 764 + &osb->s_journal_triggers[OCFS2_JTR_XB], 765 + type); 728 766 } 729 767 730 768 int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, 731 769 struct buffer_head *bh, int type) 732 770 { 733 - return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type); 771 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 772 + 773 + return __ocfs2_journal_access(handle, ci, bh, 774 + &osb->s_journal_triggers[OCFS2_JTR_DQ], 775 + type); 734 776 } 735 777 736 778 int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, 737 779 struct buffer_head *bh, int type) 738 780 { 739 - return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type); 781 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 782 + 783 + return __ocfs2_journal_access(handle, ci, bh, 784 + &osb->s_journal_triggers[OCFS2_JTR_DR], 785 + type); 740 786 } 741 787 742 788 int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, 743 789 struct buffer_head *bh, int type) 744 790 { 745 - return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type); 791 + struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 792 + 793 + return __ocfs2_journal_access(handle, ci, bh, 794 + &osb->s_journal_triggers[OCFS2_JTR_DL], 795 + type); 746 796 } 747 797 748 798 int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, ··· 798 778 if (!is_handle_aborted(handle)) { 799 779 journal_t *journal = handle->h_transaction->t_journal; 800 780 801 - mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. " 802 - "Aborting transaction and journal.\n"); 781 + mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: " 782 + "handle type %u started at line %u, credits %u/%u " 783 + "errcode %d. Aborting transaction and journal.\n", 784 + handle->h_type, handle->h_line_no, 785 + handle->h_requested_credits, 786 + jbd2_handle_buffer_credits(handle), status); 803 787 handle->h_err = status; 804 788 jbd2_journal_abort_handle(handle); 805 789 jbd2_journal_abort(journal, status); 806 - ocfs2_abort(bh->b_assoc_map->host->i_sb, 807 - "Journal already aborted.\n"); 808 790 } 809 791 } 810 792 }
+27
fs/ocfs2/ocfs2.h
··· 284 284 #define OCFS2_OSB_ERROR_FS 0x0004 285 285 #define OCFS2_DEFAULT_ATIME_QUANTUM 60 286 286 287 + struct ocfs2_triggers { 288 + struct jbd2_buffer_trigger_type ot_triggers; 289 + int ot_offset; 290 + struct super_block *sb; 291 + }; 292 + 293 + enum ocfs2_journal_trigger_type { 294 + OCFS2_JTR_DI, 295 + OCFS2_JTR_EB, 296 + OCFS2_JTR_RB, 297 + OCFS2_JTR_GD, 298 + OCFS2_JTR_DB, 299 + OCFS2_JTR_XB, 300 + OCFS2_JTR_DQ, 301 + OCFS2_JTR_DR, 302 + OCFS2_JTR_DL, 303 + OCFS2_JTR_NONE /* This must be the last entry */ 304 + }; 305 + 306 + #define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE 307 + 308 + void ocfs2_initialize_journal_triggers(struct super_block *sb, 309 + struct ocfs2_triggers triggers[]); 310 + 287 311 struct ocfs2_journal; 288 312 struct ocfs2_slot_info; 289 313 struct ocfs2_recovery_map; ··· 374 350 wait_queue_head_t checkpoint_event; 375 351 struct ocfs2_journal *journal; 376 352 unsigned long osb_commit_interval; 353 + 354 + /* Journal triggers for checksum */ 355 + struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT]; 377 356 378 357 struct delayed_work la_enable_wq; 379 358
+3 -1
fs/ocfs2/super.c
··· 1075 1075 debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root, 1076 1076 osb, &ocfs2_osb_debug_fops); 1077 1077 1078 - if (ocfs2_meta_ecc(osb)) 1078 + if (ocfs2_meta_ecc(osb)) { 1079 + ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers); 1079 1080 ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats, 1080 1081 osb->osb_debug_root); 1082 + } 1081 1083 1082 1084 status = ocfs2_mount_volume(sb); 1083 1085 if (status < 0)
+4 -4
fs/overlayfs/dir.c
··· 1314 1314 int flags = file->f_flags | OVL_OPEN_FLAGS; 1315 1315 int err; 1316 1316 1317 - err = ovl_copy_up(dentry->d_parent); 1318 - if (err) 1319 - return err; 1320 - 1321 1317 old_cred = ovl_override_creds(dentry->d_sb); 1322 1318 err = ovl_setup_cred_for_create(dentry, inode, mode, old_cred); 1323 1319 if (err) ··· 1355 1359 1356 1360 if (!OVL_FS(dentry->d_sb)->tmpfile) 1357 1361 return -EOPNOTSUPP; 1362 + 1363 + err = ovl_copy_up(dentry->d_parent); 1364 + if (err) 1365 + return err; 1358 1366 1359 1367 err = ovl_want_write(dentry); 1360 1368 if (err)
+5 -1
fs/overlayfs/export.c
··· 181 181 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 182 182 bool decodable = ofs->config.nfs_export; 183 183 184 + /* No upper layer? */ 185 + if (!ovl_upper_mnt(ofs)) 186 + return 1; 187 + 184 188 /* Lower file handle for non-upper non-decodable */ 185 189 if (!ovl_dentry_upper(dentry) && !decodable) 186 190 return 1; ··· 213 209 * ovl_connect_layer() will try to make origin's layer "connected" by 214 210 * copying up a "connectable" ancestor. 215 211 */ 216 - if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable) 212 + if (d_is_dir(dentry) && decodable) 217 213 return ovl_connect_layer(dentry); 218 214 219 215 /* Lower file handle for indexed and non-upper dir/non-dir */
+1 -1
fs/smb/client/cifsfs.c
··· 134 134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 135 135 136 136 module_param(enable_gcm_256, bool, 0644); 137 - MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 137 + MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); 138 138 139 139 module_param(require_gcm_256, bool, 0644); 140 140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+2 -1
fs/smb/client/cifsglob.h
··· 1494 1494 struct cifs_io_request { 1495 1495 struct netfs_io_request rreq; 1496 1496 struct cifsFileInfo *cfile; 1497 + struct TCP_Server_Info *server; 1498 + pid_t pid; 1497 1499 }; 1498 1500 1499 1501 /* asynchronous read support */ ··· 1506 1504 struct cifs_io_request *req; 1507 1505 }; 1508 1506 ssize_t got_bytes; 1509 - pid_t pid; 1510 1507 unsigned int xid; 1511 1508 int result; 1512 1509 bool have_xid;
+4 -4
fs/smb/client/cifssmb.c
··· 1345 1345 if (rc) 1346 1346 return rc; 1347 1347 1348 - smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid); 1349 - smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); 1348 + smb->hdr.Pid = cpu_to_le16((__u16)rdata->req->pid); 1349 + smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->req->pid >> 16)); 1350 1350 1351 1351 smb->AndXCommand = 0xFF; /* none */ 1352 1352 smb->Fid = rdata->req->cfile->fid.netfid; ··· 1689 1689 if (rc) 1690 1690 goto async_writev_out; 1691 1691 1692 - smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid); 1693 - smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16)); 1692 + smb->hdr.Pid = cpu_to_le16((__u16)wdata->req->pid); 1693 + smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->req->pid >> 16)); 1694 1694 1695 1695 smb->AndXCommand = 0xFF; /* none */ 1696 1696 smb->Fid = wdata->req->cfile->fid.netfid;
+6 -21
fs/smb/client/file.c
··· 134 134 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) 135 135 { 136 136 struct netfs_io_request *rreq = subreq->rreq; 137 - struct TCP_Server_Info *server; 138 137 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 139 138 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 139 + struct TCP_Server_Info *server = req->server; 140 140 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 141 141 size_t rsize = 0; 142 142 int rc; 143 143 144 144 rdata->xid = get_xid(); 145 145 rdata->have_xid = true; 146 - 147 - server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 148 146 rdata->server = server; 149 147 150 148 if (cifs_sb->ctx->rsize == 0) ··· 177 179 struct netfs_io_request *rreq = subreq->rreq; 178 180 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 179 181 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 180 - struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 181 - pid_t pid; 182 182 int rc = 0; 183 - 184 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 185 - pid = req->cfile->pid; 186 - else 187 - pid = current->tgid; // Ummm... This may be a workqueue 188 183 189 184 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", 190 185 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, ··· 192 201 } 193 202 194 203 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 195 - rdata->pid = pid; 196 204 197 - rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len); 198 - if (!rc) { 199 - if (rdata->req->cfile->invalidHandle) 200 - rc = -EAGAIN; 201 - else 202 - rc = rdata->server->ops->async_readv(rdata); 203 - } 204 - 205 + rc = rdata->server->ops->async_readv(rdata); 205 206 out: 206 207 if (rc) 207 208 netfs_subreq_terminated(subreq, rc, false); ··· 228 245 229 246 rreq->rsize = cifs_sb->ctx->rsize; 230 247 rreq->wsize = cifs_sb->ctx->wsize; 248 + req->pid = current->tgid; // Ummm... This may be a workqueue 231 249 232 250 if (file) { 233 251 open_file = file->private_data; 234 252 rreq->netfs_priv = file->private_data; 235 253 req->cfile = cifsFileInfo_get(open_file); 254 + req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 255 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 256 + req->pid = req->cfile->pid; 236 257 } else if (rreq->origin != NETFS_WRITEBACK) { 237 258 WARN_ON_ONCE(1); 238 259 return -EIO; ··· 3186 3199 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter) 3187 3200 { 3188 3201 ssize_t ret; 3189 - 3190 - WARN_ON_ONCE(iov_iter_count(iter) != PAGE_SIZE); 3191 3202 3192 3203 if (iov_iter_rw(iter) == READ) 3193 3204 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
+14 -5
fs/smb/client/smb2pdu.c
··· 4484 4484 return rc; 4485 4485 } 4486 4486 4487 + static void smb2_readv_worker(struct work_struct *work) 4488 + { 4489 + struct cifs_io_subrequest *rdata = 4490 + container_of(work, struct cifs_io_subrequest, subreq.work); 4491 + 4492 + netfs_subreq_terminated(&rdata->subreq, 4493 + (rdata->result == 0 || rdata->result == -EAGAIN) ? 4494 + rdata->got_bytes : rdata->result, true); 4495 + } 4496 + 4487 4497 static void 4488 4498 smb2_readv_callback(struct mid_q_entry *mid) 4489 4499 { ··· 4588 4578 rdata->result = 0; 4589 4579 } 4590 4580 rdata->credits.value = 0; 4591 - netfs_subreq_terminated(&rdata->subreq, 4592 - (rdata->result == 0 || rdata->result == -EAGAIN) ? 4593 - rdata->got_bytes : rdata->result, true); 4581 + INIT_WORK(&rdata->subreq.work, smb2_readv_worker); 4582 + queue_work(cifsiod_wq, &rdata->subreq.work); 4594 4583 release_mid(mid); 4595 4584 add_credits(server, &credits, 0); 4596 4585 } ··· 4621 4612 io_parms.length = rdata->subreq.len; 4622 4613 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; 4623 4614 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; 4624 - io_parms.pid = rdata->pid; 4615 + io_parms.pid = rdata->req->pid; 4625 4616 4626 4617 rc = smb2_new_read_req( 4627 4618 (void **) &buf, &total_len, &io_parms, rdata, 0, 0); ··· 4873 4864 .length = wdata->subreq.len, 4874 4865 .persistent_fid = wdata->req->cfile->fid.persistent_fid, 4875 4866 .volatile_fid = wdata->req->cfile->fid.volatile_fid, 4876 - .pid = wdata->pid, 4867 + .pid = wdata->req->pid, 4877 4868 }; 4878 4869 io_parms = &_io_parms; 4879 4870
+19 -4
fs/xfs/xfs_inode.c
··· 2548 2548 * This buffer may not have been correctly initialised as we 2549 2549 * didn't read it from disk. That's not important because we are 2550 2550 * only using to mark the buffer as stale in the log, and to 2551 - * attach stale cached inodes on it. That means it will never be 2552 - * dispatched for IO. If it is, we want to know about it, and we 2553 - * want it to fail. We can acheive this by adding a write 2554 - * verifier to the buffer. 2551 + * attach stale cached inodes on it. 2552 + * 2553 + * For the inode that triggered the cluster freeing, this 2554 + * attachment may occur in xfs_inode_item_precommit() after we 2555 + * have marked this buffer stale. If this buffer was not in 2556 + * memory before xfs_ifree_cluster() started, it will not be 2557 + * marked XBF_DONE and this will cause problems later in 2558 + * xfs_inode_item_precommit() when we trip over a (stale, !done) 2559 + * buffer to attached to the transaction. 2560 + * 2561 + * Hence we have to mark the buffer as XFS_DONE here. This is 2562 + * safe because we are also marking the buffer as XBF_STALE and 2563 + * XFS_BLI_STALE. That means it will never be dispatched for 2564 + * IO and it won't be unlocked until the cluster freeing has 2565 + * been committed to the journal and the buffer unpinned. If it 2566 + * is written, we want to know about it, and we want it to 2567 + * fail. We can acheive this by adding a write verifier to the 2568 + * buffer. 2555 2569 */ 2570 + bp->b_flags |= XBF_DONE; 2556 2571 bp->b_ops = &xfs_inode_buf_ops; 2557 2572 2558 2573 /*
+2
include/linux/bpf_verifier.h
··· 746 746 /* Same as scratched_regs but for stack slots */ 747 747 u64 scratched_stack_slots; 748 748 u64 prev_log_pos, prev_insn_print_pos; 749 + /* buffer used to temporary hold constants as scalar registers */ 750 + struct bpf_reg_state fake_reg[2]; 749 751 /* buffer used to generate temporary string representations, 750 752 * e.g., in reg_type_str() to generate reg_type string 751 753 */
+1 -1
include/linux/btf.h
··· 82 82 * as to avoid issues such as the compiler inlining or eliding either a static 83 83 * kfunc, or a global kfunc in an LTO build. 84 84 */ 85 - #define __bpf_kfunc __used noinline 85 + #define __bpf_kfunc __used __retain noinline 86 86 87 87 #define __bpf_kfunc_start_defs() \ 88 88 __diag_push(); \
+23
include/linux/compiler_types.h
··· 143 143 # define __preserve_most 144 144 #endif 145 145 146 + /* 147 + * Annotating a function/variable with __retain tells the compiler to place 148 + * the object in its own section and set the flag SHF_GNU_RETAIN. This flag 149 + * instructs the linker to retain the object during garbage-cleanup or LTO 150 + * phases. 151 + * 152 + * Note that the __used macro is also used to prevent functions or data 153 + * being optimized out, but operates at the compiler/IR-level and may still 154 + * allow unintended removal of objects during linking. 155 + * 156 + * Optional: only supported since gcc >= 11, clang >= 13 157 + * 158 + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute 159 + * clang: https://clang.llvm.org/docs/AttributeReference.html#retain 160 + */ 161 + #if __has_attribute(__retain__) && \ 162 + (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \ 163 + defined(CONFIG_LTO_CLANG)) 164 + # define __retain __attribute__((__retain__)) 165 + #else 166 + # define __retain 167 + #endif 168 + 146 169 /* Compiler specific macros. */ 147 170 #ifdef __clang__ 148 171 #include <linux/compiler-clang.h>
+22 -2
include/linux/i2c.h
··· 960 960 #define builtin_i2c_driver(__i2c_driver) \ 961 961 builtin_driver(__i2c_driver, i2c_add_driver) 962 962 963 - #endif /* I2C */ 964 - 965 963 /* must call put_device() when done with returned i2c_client device */ 966 964 struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode); 967 965 ··· 968 970 969 971 /* must call i2c_put_adapter() when done with returned i2c_adapter device */ 970 972 struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode); 973 + 974 + #else /* I2C */ 975 + 976 + static inline struct i2c_client * 977 + i2c_find_device_by_fwnode(struct fwnode_handle *fwnode) 978 + { 979 + return NULL; 980 + } 981 + 982 + static inline struct i2c_adapter * 983 + i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode) 984 + { 985 + return NULL; 986 + } 987 + 988 + static inline struct i2c_adapter * 989 + i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode) 990 + { 991 + return NULL; 992 + } 993 + 994 + #endif /* !I2C */ 971 995 972 996 #if IS_ENABLED(CONFIG_OF) 973 997 /* must call put_device() when done with returned i2c_client device */
+2
include/linux/kcov.h
··· 21 21 KCOV_MODE_TRACE_PC = 2, 22 22 /* Collecting comparison operands mode. */ 23 23 KCOV_MODE_TRACE_CMP = 3, 24 + /* The process owns a KCOV remote reference. */ 25 + KCOV_MODE_REMOTE = 4, 24 26 }; 25 27 26 28 #define KCOV_IN_CTXSW (1 << 30)
+1 -1
include/linux/lsm_hook_defs.h
··· 413 413 414 414 #ifdef CONFIG_AUDIT 415 415 LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, 416 - void **lsmrule) 416 + void **lsmrule, gfp_t gfp) 417 417 LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) 418 418 LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) 419 419 LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+1 -8
include/linux/mm.h
··· 3776 3776 static inline bool want_init_on_free(void) 3777 3777 { 3778 3778 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 3779 - &init_on_free); 3780 - } 3781 - 3782 - DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free); 3783 - static inline bool want_init_mlocked_on_free(void) 3784 - { 3785 - return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, 3786 - &init_mlocked_on_free); 3779 + &init_on_free); 3787 3780 } 3788 3781 3789 3782 extern bool _debug_pagealloc_enabled_early;
+5
include/linux/numa.h
··· 15 15 #define NUMA_NO_NODE (-1) 16 16 #define NUMA_NO_MEMBLK (-1) 17 17 18 + static inline bool numa_valid_node(int nid) 19 + { 20 + return nid >= 0 && nid < MAX_NUMNODES; 21 + } 22 + 18 23 /* optionally keep NUMA memory info available post init */ 19 24 #ifdef CONFIG_NUMA_KEEP_MEMINFO 20 25 #define __initdata_or_meminfo
+4
include/linux/pagemap.h
··· 381 381 */ 382 382 static inline bool mapping_large_folio_support(struct address_space *mapping) 383 383 { 384 + /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */ 385 + VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, 386 + "Anonymous mapping always supports large folio"); 387 + 384 388 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 385 389 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 386 390 }
+8 -3
include/linux/pgalloc_tag.h
··· 37 37 38 38 static inline void put_page_tag_ref(union codetag_ref *ref) 39 39 { 40 + if (WARN_ON(!ref)) 41 + return; 42 + 40 43 page_ext_put(page_ext_from_codetag_ref(ref)); 41 44 } 42 45 ··· 105 102 union codetag_ref *ref = get_page_tag_ref(page); 106 103 107 104 alloc_tag_sub_check(ref); 108 - if (ref && ref->ct) 109 - tag = ct_to_alloc_tag(ref->ct); 110 - put_page_tag_ref(ref); 105 + if (ref) { 106 + if (ref->ct) 107 + tag = ct_to_alloc_tag(ref->ct); 108 + put_page_tag_ref(ref); 109 + } 111 110 } 112 111 113 112 return tag;
+2
include/linux/regmap.h
··· 1237 1237 void *val, size_t val_len); 1238 1238 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 1239 1239 size_t val_count); 1240 + int regmap_multi_reg_read(struct regmap *map, unsigned int *reg, void *val, 1241 + size_t val_count); 1240 1242 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 1241 1243 unsigned int mask, unsigned int val, 1242 1244 bool *change, bool async, bool force);
+3 -2
include/linux/security.h
··· 2048 2048 2049 2049 #ifdef CONFIG_AUDIT 2050 2050 #ifdef CONFIG_SECURITY 2051 - int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); 2051 + int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, 2052 + gfp_t gfp); 2052 2053 int security_audit_rule_known(struct audit_krule *krule); 2053 2054 int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); 2054 2055 void security_audit_rule_free(void *lsmrule); ··· 2057 2056 #else 2058 2057 2059 2058 static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, 2060 - void **lsmrule) 2059 + void **lsmrule, gfp_t gfp) 2061 2060 { 2062 2061 return 0; 2063 2062 }
+3 -2
include/linux/spi/spi.h
··· 1085 1085 unsigned dummy_data:1; 1086 1086 unsigned cs_off:1; 1087 1087 unsigned cs_change:1; 1088 - unsigned tx_nbits:3; 1089 - unsigned rx_nbits:3; 1088 + unsigned tx_nbits:4; 1089 + unsigned rx_nbits:4; 1090 1090 unsigned timestamped:1; 1091 1091 #define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */ 1092 1092 #define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */ 1093 1093 #define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */ 1094 + #define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */ 1094 1095 u8 bits_per_word; 1095 1096 struct spi_delay delay; 1096 1097 struct spi_delay cs_change_delay;
+1 -1
include/linux/string.h
··· 289 289 290 290 extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); 291 291 extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); 292 - extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) 292 + extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) 293 293 __realloc_size(2, 3); 294 294 295 295 /* lib/argv_split.c */
+3
include/net/netns/netfilter.h
··· 15 15 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; 16 16 #ifdef CONFIG_SYSCTL 17 17 struct ctl_table_header *nf_log_dir_header; 18 + #ifdef CONFIG_LWTUNNEL 19 + struct ctl_table_header *nf_lwtnl_dir_header; 20 + #endif 18 21 #endif 19 22 struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS]; 20 23 struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+3 -1
include/scsi/scsi_devinfo.h
··· 69 69 #define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32)) 70 70 /* Always retry ABORTED_COMMAND with ASC 0xc1 */ 71 71 #define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33)) 72 + /* Do not query the IO Advice Hints Grouping mode page */ 73 + #define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34)) 72 74 73 - #define __BLIST_LAST_USED BLIST_RETRY_ASC_C1 75 + #define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS 74 76 75 77 #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \ 76 78 (__force blist_flags_t) \
+1 -1
init/Kconfig
··· 883 883 884 884 config CC_NO_ARRAY_BOUNDS 885 885 bool 886 - default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS 886 + default y if CC_IS_GCC && GCC_VERSION >= 90000 && GCC10_NO_ARRAY_BOUNDS 887 887 888 888 # Currently, disable -Wstringop-overflow for GCC globally. 889 889 config GCC_NO_STRINGOP_OVERFLOW
-1
io_uring/rsrc.c
··· 1068 1068 * branch doesn't expect non PAGE_SIZE'd chunks. 1069 1069 */ 1070 1070 iter->bvec = bvec; 1071 - iter->nr_segs = bvec->bv_len; 1072 1071 iter->count -= offset; 1073 1072 iter->iov_offset = offset; 1074 1073 } else {
+3 -2
kernel/auditfilter.c
··· 529 529 entry->rule.buflen += f_val; 530 530 f->lsm_str = str; 531 531 err = security_audit_rule_init(f->type, f->op, str, 532 - (void **)&f->lsm_rule); 532 + (void **)&f->lsm_rule, 533 + GFP_KERNEL); 533 534 /* Keep currently invalid fields around in case they 534 535 * become valid after a policy reload. */ 535 536 if (err == -EINVAL) { ··· 800 799 801 800 /* our own (refreshed) copy of lsm_rule */ 802 801 ret = security_audit_rule_init(df->type, df->op, df->lsm_str, 803 - (void **)&df->lsm_rule); 802 + (void **)&df->lsm_rule, GFP_KERNEL); 804 803 /* Keep currently invalid fields around in case they 805 804 * become valid after a policy reload. */ 806 805 if (ret == -EINVAL) {
+16 -9
kernel/bpf/verifier.c
··· 4549 4549 state->stack[spi].spilled_ptr.id = 0; 4550 4550 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && 4551 4551 env->bpf_capable) { 4552 - struct bpf_reg_state fake_reg = {}; 4552 + struct bpf_reg_state *tmp_reg = &env->fake_reg[0]; 4553 4553 4554 - __mark_reg_known(&fake_reg, insn->imm); 4555 - fake_reg.type = SCALAR_VALUE; 4556 - save_register_state(env, state, spi, &fake_reg, size); 4554 + memset(tmp_reg, 0, sizeof(*tmp_reg)); 4555 + __mark_reg_known(tmp_reg, insn->imm); 4556 + tmp_reg->type = SCALAR_VALUE; 4557 + save_register_state(env, state, spi, tmp_reg, size); 4557 4558 } else if (reg && is_spillable_regtype(reg->type)) { 4558 4559 /* register containing pointer is being spilled into stack */ 4559 4560 if (size != BPF_REG_SIZE) { ··· 15114 15113 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 15115 15114 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 15116 15115 struct bpf_reg_state *eq_branch_regs; 15117 - struct bpf_reg_state fake_reg = {}; 15118 15116 u8 opcode = BPF_OP(insn->code); 15119 15117 bool is_jmp32; 15120 15118 int pred = -1; ··· 15179 15179 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 15180 15180 return -EINVAL; 15181 15181 } 15182 - src_reg = &fake_reg; 15182 + src_reg = &env->fake_reg[0]; 15183 + memset(src_reg, 0, sizeof(*src_reg)); 15183 15184 src_reg->type = SCALAR_VALUE; 15184 15185 __mark_reg_known(src_reg, insn->imm); 15185 15186 } ··· 15240 15239 &other_branch_regs[insn->src_reg], 15241 15240 dst_reg, src_reg, opcode, is_jmp32); 15242 15241 } else /* BPF_SRC(insn->code) == BPF_K */ { 15242 + /* reg_set_min_max() can mangle the fake_reg. Make a copy 15243 + * so that these are two different memory locations. The 15244 + * src_reg is not used beyond here in context of K. 15245 + */ 15246 + memcpy(&env->fake_reg[1], &env->fake_reg[0], 15247 + sizeof(env->fake_reg[0])); 15243 15248 err = reg_set_min_max(env, 15244 15249 &other_branch_regs[insn->dst_reg], 15245 - src_reg /* fake one */, 15246 - dst_reg, src_reg /* same fake one */, 15250 + &env->fake_reg[0], 15251 + dst_reg, &env->fake_reg[1], 15247 15252 opcode, is_jmp32); 15248 15253 } 15249 15254 if (err) ··· 20320 20313 goto next_insn; 20321 20314 } 20322 20315 20323 - #ifdef CONFIG_X86_64 20316 + #if defined(CONFIG_X86_64) && !defined(CONFIG_UML) 20324 20317 /* Implement bpf_get_smp_processor_id() inline. */ 20325 20318 if (insn->imm == BPF_FUNC_get_smp_processor_id && 20326 20319 prog->jit_requested && bpf_jit_supports_percpu_insn()) {
+3 -1
kernel/gcov/gcc_4_7.c
··· 18 18 #include <linux/mm.h> 19 19 #include "gcov.h" 20 20 21 - #if (__GNUC__ >= 10) 21 + #if (__GNUC__ >= 14) 22 + #define GCOV_COUNTERS 9 23 + #elif (__GNUC__ >= 10) 22 24 #define GCOV_COUNTERS 8 23 25 #elif (__GNUC__ >= 7) 24 26 #define GCOV_COUNTERS 9
+1
kernel/kcov.c
··· 632 632 return -EINVAL; 633 633 kcov->mode = mode; 634 634 t->kcov = kcov; 635 + t->kcov_mode = KCOV_MODE_REMOTE; 635 636 kcov->t = t; 636 637 kcov->remote = true; 637 638 kcov->remote_size = remote_arg->area_size;
+1
kernel/pid_namespace.c
··· 218 218 */ 219 219 do { 220 220 clear_thread_flag(TIF_SIGPENDING); 221 + clear_thread_flag(TIF_NOTIFY_SIGNAL); 221 222 rc = kernel_wait4(-1, NULL, __WALL, NULL); 222 223 } while (rc != -ECHILD); 223 224
+2 -2
kernel/trace/Kconfig
··· 1136 1136 1137 1137 config SYNTH_EVENT_GEN_TEST 1138 1138 tristate "Test module for in-kernel synthetic event generation" 1139 - depends on SYNTH_EVENTS 1139 + depends on SYNTH_EVENTS && m 1140 1140 help 1141 1141 This option creates a test module to check the base 1142 1142 functionality of in-kernel synthetic event definition and ··· 1149 1149 1150 1150 config KPROBE_EVENT_GEN_TEST 1151 1151 tristate "Test module for in-kernel kprobe event generation" 1152 - depends on KPROBE_EVENTS 1152 + depends on KPROBE_EVENTS && m 1153 1153 help 1154 1154 This option creates a test module to check the base 1155 1155 functionality of in-kernel kprobe event definition.
+1 -7
lib/Kconfig
··· 539 539 stack overflow. 540 540 541 541 config FORCE_NR_CPUS 542 - bool "Set number of CPUs at compile time" 543 - depends on SMP && EXPERT && !COMPILE_TEST 544 - help 545 - Say Yes if you have NR_CPUS set to an actual number of possible 546 - CPUs in your system, not to a default value. This forces the core 547 - code to rely on compile-time value and optimize kernel routines 548 - better. 542 + def_bool !SMP 549 543 550 544 config CPU_RMAP 551 545 bool
+13 -3
lib/alloc_tag.c
··· 227 227 }; 228 228 EXPORT_SYMBOL(page_alloc_tagging_ops); 229 229 230 + #ifdef CONFIG_SYSCTL 230 231 static struct ctl_table memory_allocation_profiling_sysctls[] = { 231 232 { 232 233 .procname = "mem_profiling", ··· 242 241 { } 243 242 }; 244 243 244 + static void __init sysctl_init(void) 245 + { 246 + if (!mem_profiling_support) 247 + memory_allocation_profiling_sysctls[0].mode = 0444; 248 + 249 + register_sysctl_init("vm", memory_allocation_profiling_sysctls); 250 + } 251 + #else /* CONFIG_SYSCTL */ 252 + static inline void sysctl_init(void) {} 253 + #endif /* CONFIG_SYSCTL */ 254 + 245 255 static int __init alloc_tag_init(void) 246 256 { 247 257 const struct codetag_type_desc desc = { ··· 265 253 if (IS_ERR(alloc_tag_cttype)) 266 254 return PTR_ERR(alloc_tag_cttype); 267 255 268 - if (!mem_profiling_support) 269 - memory_allocation_profiling_sysctls[0].mode = 0444; 270 - register_sysctl_init("vm", memory_allocation_profiling_sysctls); 256 + sysctl_init(); 271 257 procfs_init(); 272 258 273 259 return 0;
+8 -2
lib/closure.c
··· 17 17 { 18 18 int r = flags & CLOSURE_REMAINING_MASK; 19 19 20 - BUG_ON(flags & CLOSURE_GUARD_MASK); 21 - BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); 20 + if (WARN(flags & CLOSURE_GUARD_MASK, 21 + "closure has guard bits set: %x (%u)", 22 + flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) 23 + r &= ~CLOSURE_GUARD_MASK; 22 24 23 25 if (!r) { 24 26 smp_acquire__after_ctrl_dep(); 27 + 28 + WARN(flags & ~CLOSURE_DESTRUCTOR, 29 + "closure ref hit 0 with incorrect flags set: %x (%u)", 30 + flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags)); 25 31 26 32 cl->closure_get_happened = false; 27 33
+1 -1
lib/fortify_kunit.c
··· 374 374 for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \ 375 375 len = strlen(test_strs[i]); \ 376 376 KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \ 377 - checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \ 377 + checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \ 378 378 kfree(p)); \ 379 379 checker(len, kmemdup(test_strs[i], len, gfp), \ 380 380 kfree(p)); \
+17 -3
lib/overflow_kunit.c
··· 1178 1178 s16 array[] __counted_by(counter); 1179 1179 }; 1180 1180 1181 + struct bar { 1182 + int a; 1183 + u32 counter; 1184 + s16 array[]; 1185 + }; 1186 + 1181 1187 static void DEFINE_FLEX_test(struct kunit *test) 1182 1188 { 1183 - DEFINE_RAW_FLEX(struct foo, two, array, 2); 1189 + /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */ 1190 + DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2); 1191 + #if __has_attribute(__counted_by__) 1192 + int expected_raw_size = sizeof(struct foo); 1193 + #else 1194 + int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16); 1195 + #endif 1196 + /* Without annotation, it will always be on-stack size. */ 1197 + DEFINE_RAW_FLEX(struct bar, two, array, 2); 1184 1198 DEFINE_FLEX(struct foo, eight, array, counter, 8); 1185 1199 DEFINE_FLEX(struct foo, empty, array, counter, 0); 1186 1200 1187 - KUNIT_EXPECT_EQ(test, __struct_size(two), 1188 - sizeof(struct foo) + sizeof(s16) + sizeof(s16)); 1201 + KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size); 1202 + KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16)); 1189 1203 KUNIT_EXPECT_EQ(test, __struct_size(eight), 24); 1190 1204 KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo)); 1191 1205 }
+5 -26
mm/debug_vm_pgtable.c
··· 40 40 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics 41 41 * expectations that are being validated here. All future changes in here 42 42 * or the documentation need to be in sync. 43 - * 44 - * On s390 platform, the lower 4 bits are used to identify given page table 45 - * entry type. But these bits might affect the ability to clear entries with 46 - * pxx_clear() because of how dynamic page table folding works on s390. So 47 - * while loading up the entries do not change the lower 4 bits. It does not 48 - * have affect any other platform. Also avoid the 62nd bit on ppc64 that is 49 - * used to mark a pte entry. 50 43 */ 51 - #define S390_SKIP_MASK GENMASK(3, 0) 52 - #if __BITS_PER_LONG == 64 53 - #define PPC64_SKIP_MASK GENMASK(62, 62) 54 - #else 55 - #define PPC64_SKIP_MASK 0x0 56 - #endif 57 - #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) 58 - #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) 59 44 #define RANDOM_NZVALUE GENMASK(7, 0) 60 45 61 46 struct pgtable_debug_args { ··· 496 511 return; 497 512 498 513 pr_debug("Validating PUD clear\n"); 499 - pud = __pud(pud_val(pud) | RANDOM_ORVALUE); 500 - WRITE_ONCE(*args->pudp, pud); 514 + WARN_ON(pud_none(pud)); 501 515 pud_clear(args->pudp); 502 516 pud = READ_ONCE(*args->pudp); 503 517 WARN_ON(!pud_none(pud)); ··· 532 548 return; 533 549 534 550 pr_debug("Validating P4D clear\n"); 535 - p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); 536 - WRITE_ONCE(*args->p4dp, p4d); 551 + WARN_ON(p4d_none(p4d)); 537 552 p4d_clear(args->p4dp); 538 553 p4d = READ_ONCE(*args->p4dp); 539 554 WARN_ON(!p4d_none(p4d)); ··· 565 582 return; 566 583 567 584 pr_debug("Validating PGD clear\n"); 568 - pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); 569 - WRITE_ONCE(*args->pgdp, pgd); 585 + WARN_ON(pgd_none(pgd)); 570 586 pgd_clear(args->pgdp); 571 587 pgd = READ_ONCE(*args->pgdp); 572 588 WARN_ON(!pgd_none(pgd)); ··· 616 634 if (WARN_ON(!args->ptep)) 617 635 return; 618 636 619 - #ifndef CONFIG_RISCV 620 - pte = __pte(pte_val(pte) | RANDOM_ORVALUE); 621 - #endif 622 637 set_pte_at(args->mm, args->vaddr, args->ptep, pte); 638 + WARN_ON(pte_none(pte)); 623 639 flush_dcache_page(page); 624 640 barrier(); 625 641 ptep_clear(args->mm, args->vaddr, args->ptep); ··· 630 650 pmd_t pmd = READ_ONCE(*args->pmdp); 631 651 632 652 pr_debug("Validating PMD clear\n"); 633 - pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); 634 - WRITE_ONCE(*args->pmdp, pmd); 653 + WARN_ON(pmd_none(pmd)); 635 654 pmd_clear(args->pmdp); 636 655 pmd = READ_ONCE(*args->pmdp); 637 656 WARN_ON(!pmd_none(pmd));
+17 -11
mm/huge_memory.c
··· 3009 3009 if (new_order >= folio_order(folio)) 3010 3010 return -EINVAL; 3011 3011 3012 - /* Cannot split anonymous THP to order-1 */ 3013 - if (new_order == 1 && folio_test_anon(folio)) { 3014 - VM_WARN_ONCE(1, "Cannot split to order-1 folio"); 3015 - return -EINVAL; 3016 - } 3017 - 3018 - if (new_order) { 3019 - /* Only swapping a whole PMD-mapped folio is supported */ 3020 - if (folio_test_swapcache(folio)) 3012 + if (folio_test_anon(folio)) { 3013 + /* order-1 is not supported for anonymous THP. */ 3014 + if (new_order == 1) { 3015 + VM_WARN_ONCE(1, "Cannot split to order-1 folio"); 3021 3016 return -EINVAL; 3017 + } 3018 + } else if (new_order) { 3022 3019 /* Split shmem folio to non-zero order not supported */ 3023 3020 if (shmem_mapping(folio->mapping)) { 3024 3021 VM_WARN_ONCE(1, 3025 3022 "Cannot split shmem folio to non-0 order"); 3026 3023 return -EINVAL; 3027 3024 } 3028 - /* No split if the file system does not support large folio */ 3029 - if (!mapping_large_folio_support(folio->mapping)) { 3025 + /* 3026 + * No split if the file system does not support large folio. 3027 + * Note that we might still have THPs in such mappings due to 3028 + * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping 3029 + * does not actually support large folios properly. 3030 + */ 3031 + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3032 + !mapping_large_folio_support(folio->mapping)) { 3030 3033 VM_WARN_ONCE(1, 3031 3034 "Cannot split file folio to non-0 order"); 3032 3035 return -EINVAL; 3033 3036 } 3034 3037 } 3035 3038 3039 + /* Only swapping a whole PMD-mapped folio is supported */ 3040 + if (folio_test_swapcache(folio) && new_order) 3041 + return -EINVAL; 3036 3042 3037 3043 is_hzp = is_huge_zero_folio(folio); 3038 3044 if (is_hzp) {
-1
mm/internal.h
··· 588 588 extern void memblock_free_pages(struct page *page, unsigned long pfn, 589 589 unsigned int order); 590 590 extern void __free_pages_core(struct page *page, unsigned int order); 591 - extern void kernel_init_pages(struct page *page, int numpages); 592 591 593 592 /* 594 593 * This will have no effect, other than possibly generating a warning, if the
+7 -21
mm/memblock.c
··· 754 754 755 755 /* calculate lose page */ 756 756 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 757 - if (nid == NUMA_NO_NODE) 757 + if (!numa_valid_node(nid)) 758 758 nr_pages += end_pfn - start_pfn; 759 759 } 760 760 ··· 1061 1061 return false; 1062 1062 1063 1063 /* only memory regions are associated with nodes, check it */ 1064 - if (nid != NUMA_NO_NODE && nid != m_nid) 1064 + if (numa_valid_node(nid) && nid != m_nid) 1065 1065 return true; 1066 1066 1067 1067 /* skip hotpluggable memory regions if needed */ ··· 1117 1117 { 1118 1118 int idx_a = *idx & 0xffffffff; 1119 1119 int idx_b = *idx >> 32; 1120 - 1121 - if (WARN_ONCE(nid == MAX_NUMNODES, 1122 - "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1123 - nid = NUMA_NO_NODE; 1124 1120 1125 1121 for (; idx_a < type_a->cnt; idx_a++) { 1126 1122 struct memblock_region *m = &type_a->regions[idx_a]; ··· 1211 1215 int idx_a = *idx & 0xffffffff; 1212 1216 int idx_b = *idx >> 32; 1213 1217 1214 - if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1215 - nid = NUMA_NO_NODE; 1216 - 1217 1218 if (*idx == (u64)ULLONG_MAX) { 1218 1219 idx_a = type_a->cnt - 1; 1219 1220 if (type_b != NULL) ··· 1296 1303 1297 1304 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1298 1305 continue; 1299 - if (nid == MAX_NUMNODES || nid == r_nid) 1306 + if (!numa_valid_node(nid) || nid == r_nid) 1300 1307 break; 1301 1308 } 1302 1309 if (*idx >= type->cnt) { ··· 1331 1338 #ifdef CONFIG_NUMA 1332 1339 int start_rgn, end_rgn; 1333 1340 int i, ret; 1334 - 1335 - if (WARN_ONCE(nid == MAX_NUMNODES, 1336 - "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1337 - nid = NUMA_NO_NODE; 1338 1341 1339 1342 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1340 1343 if (ret) ··· 1441 1452 enum memblock_flags flags = choose_memblock_flags(); 1442 1453 phys_addr_t found; 1443 1454 1444 - if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1445 - nid = NUMA_NO_NODE; 1446 - 1447 1455 if (!align) { 1448 1456 /* Can't use WARNs this early in boot on powerpc */ 1449 1457 dump_stack(); ··· 1453 1467 if (found && !memblock_reserve(found, size)) 1454 1468 goto done; 1455 1469 1456 - if (nid != NUMA_NO_NODE && !exact_nid) { 1470 + if (numa_valid_node(nid) && !exact_nid) { 1457 1471 found = memblock_find_in_range_node(size, align, start, 1458 1472 end, NUMA_NO_NODE, 1459 1473 flags); ··· 1973 1987 end = base + size - 1; 1974 1988 flags = rgn->flags; 1975 1989 #ifdef CONFIG_NUMA 1976 - if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1990 + if (numa_valid_node(memblock_get_region_node(rgn))) 1977 1991 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1978 1992 memblock_get_region_node(rgn)); 1979 1993 #endif ··· 2167 2181 start = region->base; 2168 2182 end = start + region->size; 2169 2183 2170 - if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES) 2184 + if (!numa_valid_node(nid)) 2171 2185 nid = early_pfn_to_nid(PFN_DOWN(start)); 2172 2186 2173 2187 reserve_bootmem_region(start, end, nid); ··· 2258 2272 2259 2273 seq_printf(m, "%4d: ", i); 2260 2274 seq_printf(m, "%pa..%pa ", &reg->base, &end); 2261 - if (nid != MAX_NUMNODES) 2275 + if (numa_valid_node(nid)) 2262 2276 seq_printf(m, "%4d ", nid); 2263 2277 else 2264 2278 seq_printf(m, "%4c ", 'x');
+1 -2
mm/memcontrol.c
··· 7745 7745 * @new: Replacement folio. 7746 7746 * 7747 7747 * Charge @new as a replacement folio for @old. @old will 7748 - * be uncharged upon free. This is only used by the page cache 7749 - * (in replace_page_cache_folio()). 7748 + * be uncharged upon free. 7750 7749 * 7751 7750 * Both folios must be locked, @new->mapping must be set up. 7752 7751 */
+10 -10
mm/memory.c
··· 1507 1507 if (unlikely(folio_mapcount(folio) < 0)) 1508 1508 print_bad_pte(vma, addr, ptent, page); 1509 1509 } 1510 - 1511 - if (want_init_mlocked_on_free() && folio_test_mlocked(folio) && 1512 - !delay_rmap && folio_test_anon(folio)) { 1513 - kernel_init_pages(page, folio_nr_pages(folio)); 1514 - } 1515 - 1516 1510 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { 1517 1511 *force_flush = true; 1518 1512 *force_break = true; ··· 5100 5106 bool ignore_writable, bool pte_write_upgrade) 5101 5107 { 5102 5108 int nr = pte_pfn(fault_pte) - folio_pfn(folio); 5103 - unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start); 5104 - unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end); 5105 - pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE; 5106 - unsigned long addr; 5109 + unsigned long start, end, addr = vmf->address; 5110 + unsigned long addr_start = addr - (nr << PAGE_SHIFT); 5111 + unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); 5112 + pte_t *start_ptep; 5113 + 5114 + /* Stay within the VMA and within the page table. */ 5115 + start = max3(addr_start, pt_start, vma->vm_start); 5116 + end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, 5117 + vma->vm_end); 5118 + start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); 5107 5119 5108 5120 /* Restore all PTEs' mapping of the large folio */ 5109 5121 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
+7 -1
mm/migrate.c
··· 1654 1654 1655 1655 /* 1656 1656 * The rare folio on the deferred split list should 1657 - * be split now. It should not count as a failure. 1657 + * be split now. It should not count as a failure: 1658 + * but increment nr_failed because, without doing so, 1659 + * migrate_pages() may report success with (split but 1660 + * unmigrated) pages still on its fromlist; whereas it 1661 + * always reports success when its fromlist is empty. 1662 + * 1658 1663 * Only check it without removing it from the list. 1659 1664 * Since the folio can be on deferred_split_scan() 1660 1665 * local list and removing it can cause the local list ··· 1674 1669 if (nr_pages > 2 && 1675 1670 !list_empty(&folio->_deferred_list)) { 1676 1671 if (try_split_folio(folio, split_folios) == 0) { 1672 + nr_failed++; 1677 1673 stats->nr_thp_split += is_thp; 1678 1674 stats->nr_split++; 1679 1675 continue;
+7 -36
mm/mm_init.c
··· 2523 2523 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2524 2524 EXPORT_SYMBOL(init_on_free); 2525 2525 2526 - DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free); 2527 - EXPORT_SYMBOL(init_mlocked_on_free); 2528 - 2529 2526 static bool _init_on_alloc_enabled_early __read_mostly 2530 2527 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2531 2528 static int __init early_init_on_alloc(char *buf) ··· 2539 2542 return kstrtobool(buf, &_init_on_free_enabled_early); 2540 2543 } 2541 2544 early_param("init_on_free", early_init_on_free); 2542 - 2543 - static bool _init_mlocked_on_free_enabled_early __read_mostly 2544 - = IS_ENABLED(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON); 2545 - static int __init early_init_mlocked_on_free(char *buf) 2546 - { 2547 - return kstrtobool(buf, &_init_mlocked_on_free_enabled_early); 2548 - } 2549 - early_param("init_mlocked_on_free", early_init_mlocked_on_free); 2550 2545 2551 2546 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2552 2547 ··· 2567 2578 } 2568 2579 #endif 2569 2580 2570 - if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early || 2571 - _init_mlocked_on_free_enabled_early) && 2581 + if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2572 2582 page_poisoning_requested) { 2573 2583 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2574 - "will take precedence over init_on_alloc, init_on_free " 2575 - "and init_mlocked_on_free\n"); 2584 + "will take precedence over init_on_alloc and init_on_free\n"); 2576 2585 _init_on_alloc_enabled_early = false; 2577 2586 _init_on_free_enabled_early = false; 2578 - _init_mlocked_on_free_enabled_early = false; 2579 - } 2580 - 2581 - if (_init_mlocked_on_free_enabled_early && _init_on_free_enabled_early) { 2582 - pr_info("mem auto-init: init_on_free is on, " 2583 - "will take precedence over init_mlocked_on_free\n"); 2584 - _init_mlocked_on_free_enabled_early = false; 2585 2587 } 2586 2588 2587 2589 if (_init_on_alloc_enabled_early) { ··· 2589 2609 static_branch_disable(&init_on_free); 2590 2610 } 2591 2611 2592 - if (_init_mlocked_on_free_enabled_early) { 2593 - want_check_pages = true; 2594 - static_branch_enable(&init_mlocked_on_free); 2595 - } else { 2596 - static_branch_disable(&init_mlocked_on_free); 2597 - } 2598 - 2599 - if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early || 2600 - _init_on_free_enabled_early || _init_mlocked_on_free_enabled_early)) 2601 - pr_info("mem auto-init: please make sure init_on_alloc, init_on_free and " 2602 - "init_mlocked_on_free are disabled when running KMSAN\n"); 2612 + if (IS_ENABLED(CONFIG_KMSAN) && 2613 + (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2614 + pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2603 2615 2604 2616 #ifdef CONFIG_DEBUG_PAGEALLOC 2605 2617 if (debug_pagealloc_enabled()) { ··· 2630 2658 else 2631 2659 stack = "off"; 2632 2660 2633 - pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s, mlocked free:%s\n", 2661 + pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2634 2662 stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", 2635 - want_init_on_free() ? "on" : "off", 2636 - want_init_mlocked_on_free() ? "on" : "off"); 2663 + want_init_on_free() ? "on" : "off"); 2637 2664 if (want_init_on_free()) 2638 2665 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2639 2666 }
+1 -1
mm/page_alloc.c
··· 1016 1016 return page_kasan_tag(page) == KASAN_TAG_KERNEL; 1017 1017 } 1018 1018 1019 - void kernel_init_pages(struct page *page, int numpages) 1019 + static void kernel_init_pages(struct page *page, int numpages) 1020 1020 { 1021 1021 int i; 1022 1022
+10 -1
mm/page_table_check.c
··· 73 73 page = pfn_to_page(pfn); 74 74 page_ext = page_ext_get(page); 75 75 76 + if (!page_ext) 77 + return; 78 + 76 79 BUG_ON(PageSlab(page)); 77 80 anon = PageAnon(page); 78 81 ··· 113 110 page = pfn_to_page(pfn); 114 111 page_ext = page_ext_get(page); 115 112 113 + if (!page_ext) 114 + return; 115 + 116 116 BUG_ON(PageSlab(page)); 117 117 anon = PageAnon(page); 118 118 ··· 146 140 BUG_ON(PageSlab(page)); 147 141 148 142 page_ext = page_ext_get(page); 149 - BUG_ON(!page_ext); 143 + 144 + if (!page_ext) 145 + return; 146 + 150 147 for (i = 0; i < (1ul << order); i++) { 151 148 struct page_table_check *ptc = get_page_table_check(page_ext); 152 149
+1 -1
mm/shmem.c
··· 1786 1786 xa_lock_irq(&swap_mapping->i_pages); 1787 1787 error = shmem_replace_entry(swap_mapping, swap_index, old, new); 1788 1788 if (!error) { 1789 - mem_cgroup_migrate(old, new); 1789 + mem_cgroup_replace_folio(old, new); 1790 1790 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1791 1791 __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1792 1792 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
+2 -2
mm/util.c
··· 139 139 * kmemdup_array - duplicate a given array. 140 140 * 141 141 * @src: array to duplicate. 142 - * @element_size: size of each element of array. 143 142 * @count: number of elements to duplicate from array. 143 + * @element_size: size of each element of array. 144 144 * @gfp: GFP mask to use. 145 145 * 146 146 * Return: duplicated array of @src or %NULL in case of error, 147 147 * result is physically contiguous. Use kfree() to free. 148 148 */ 149 - void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) 149 + void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) 150 150 { 151 151 return kmemdup(src, size_mul(element_size, count), gfp); 152 152 }
+5
net/core/filter.c
··· 1665 1665 static inline int __bpf_try_make_writable(struct sk_buff *skb, 1666 1666 unsigned int write_len) 1667 1667 { 1668 + #ifdef CONFIG_DEBUG_NET 1669 + /* Avoid a splat in pskb_may_pull_reason() */ 1670 + if (write_len > INT_MAX) 1671 + return -EINVAL; 1672 + #endif 1668 1673 return skb_ensure_writable(skb, write_len); 1669 1674 } 1670 1675
+7 -2
net/core/net_namespace.c
··· 693 693 * get_net_ns - increment the refcount of the network namespace 694 694 * @ns: common namespace (net) 695 695 * 696 - * Returns the net's common namespace. 696 + * Returns the net's common namespace or ERR_PTR() if ref is zero. 697 697 */ 698 698 struct ns_common *get_net_ns(struct ns_common *ns) 699 699 { 700 - return &get_net(container_of(ns, struct net, ns))->ns; 700 + struct net *net; 701 + 702 + net = maybe_get_net(container_of(ns, struct net, ns)); 703 + if (net) 704 + return &net->ns; 705 + return ERR_PTR(-EINVAL); 701 706 } 702 707 EXPORT_SYMBOL_GPL(get_net_ns); 703 708
+8 -8
net/core/netdev-genl.c
··· 59 59 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 60 60 xdp_rx_meta, NETDEV_A_DEV_PAD) || 61 61 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 62 - xsk_features, NETDEV_A_DEV_PAD)) { 63 - genlmsg_cancel(rsp, hdr); 64 - return -EINVAL; 65 - } 62 + xsk_features, NETDEV_A_DEV_PAD)) 63 + goto err_cancel_msg; 66 64 67 65 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 68 66 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 69 - netdev->xdp_zc_max_segs)) { 70 - genlmsg_cancel(rsp, hdr); 71 - return -EINVAL; 72 - } 67 + netdev->xdp_zc_max_segs)) 68 + goto err_cancel_msg; 73 69 } 74 70 75 71 genlmsg_end(rsp, hdr); 76 72 77 73 return 0; 74 + 75 + err_cancel_msg: 76 + genlmsg_cancel(rsp, hdr); 77 + return -EMSGSIZE; 78 78 } 79 79 80 80 static void
+3
net/core/sock.c
··· 3742 3742 3743 3743 sk->sk_prot->unhash(sk); 3744 3744 3745 + if (sk->sk_socket) 3746 + sk->sk_socket->sk = NULL; 3747 + 3745 3748 /* 3746 3749 * In this point socket cannot receive new packets, but it is possible 3747 3750 * that some packets are in flight because some CPU runs receiver and
+54 -21
net/ipv4/cipso_ipv4.c
··· 1810 1810 return CIPSO_V4_HDR_LEN + ret_val; 1811 1811 } 1812 1812 1813 + static int cipso_v4_get_actual_opt_len(const unsigned char *data, int len) 1814 + { 1815 + int iter = 0, optlen = 0; 1816 + 1817 + /* determining the new total option length is tricky because of 1818 + * the padding necessary, the only thing i can think to do at 1819 + * this point is walk the options one-by-one, skipping the 1820 + * padding at the end to determine the actual option size and 1821 + * from there we can determine the new total option length 1822 + */ 1823 + while (iter < len) { 1824 + if (data[iter] == IPOPT_END) { 1825 + break; 1826 + } else if (data[iter] == IPOPT_NOP) { 1827 + iter++; 1828 + } else { 1829 + iter += data[iter + 1]; 1830 + optlen = iter; 1831 + } 1832 + } 1833 + return optlen; 1834 + } 1835 + 1813 1836 /** 1814 1837 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1815 1838 * @sk: the socket ··· 2009 1986 u8 cipso_len; 2010 1987 u8 cipso_off; 2011 1988 unsigned char *cipso_ptr; 2012 - int iter; 2013 1989 int optlen_new; 2014 1990 2015 1991 cipso_off = opt->opt.cipso - sizeof(struct iphdr); ··· 2028 2006 memmove(cipso_ptr, cipso_ptr + cipso_len, 2029 2007 opt->opt.optlen - cipso_off - cipso_len); 2030 2008 2031 - /* determining the new total option length is tricky because of 2032 - * the padding necessary, the only thing i can think to do at 2033 - * this point is walk the options one-by-one, skipping the 2034 - * padding at the end to determine the actual option size and 2035 - * from there we can determine the new total option length */ 2036 - iter = 0; 2037 - optlen_new = 0; 2038 - while (iter < opt->opt.optlen) 2039 - if (opt->opt.__data[iter] != IPOPT_NOP) { 2040 - iter += opt->opt.__data[iter + 1]; 2041 - optlen_new = iter; 2042 - } else 2043 - iter++; 2009 + optlen_new = cipso_v4_get_actual_opt_len(opt->opt.__data, 2010 + opt->opt.optlen); 2044 2011 hdr_delta = opt->opt.optlen; 2045 2012 opt->opt.optlen = (optlen_new + 3) & ~3; 2046 2013 hdr_delta -= opt->opt.optlen; ··· 2249 2238 */ 2250 2239 int cipso_v4_skbuff_delattr(struct sk_buff *skb) 2251 2240 { 2252 - int ret_val; 2241 + int ret_val, cipso_len, hdr_len_actual, new_hdr_len_actual, new_hdr_len, 2242 + hdr_len_delta; 2253 2243 struct iphdr *iph; 2254 2244 struct ip_options *opt = &IPCB(skb)->opt; 2255 2245 unsigned char *cipso_ptr; ··· 2263 2251 if (ret_val < 0) 2264 2252 return ret_val; 2265 2253 2266 - /* the easiest thing to do is just replace the cipso option with noop 2267 - * options since we don't change the size of the packet, although we 2268 - * still need to recalculate the checksum */ 2269 - 2270 2254 iph = ip_hdr(skb); 2271 2255 cipso_ptr = (unsigned char *)iph + opt->cipso; 2272 - memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); 2256 + cipso_len = cipso_ptr[1]; 2257 + 2258 + hdr_len_actual = sizeof(struct iphdr) + 2259 + cipso_v4_get_actual_opt_len((unsigned char *)(iph + 1), 2260 + opt->optlen); 2261 + new_hdr_len_actual = hdr_len_actual - cipso_len; 2262 + new_hdr_len = (new_hdr_len_actual + 3) & ~3; 2263 + hdr_len_delta = (iph->ihl << 2) - new_hdr_len; 2264 + 2265 + /* 1. shift any options after CIPSO to the left */ 2266 + memmove(cipso_ptr, cipso_ptr + cipso_len, 2267 + new_hdr_len_actual - opt->cipso); 2268 + /* 2. move the whole IP header to its new place */ 2269 + memmove((unsigned char *)iph + hdr_len_delta, iph, new_hdr_len_actual); 2270 + /* 3. adjust the skb layout */ 2271 + skb_pull(skb, hdr_len_delta); 2272 + skb_reset_network_header(skb); 2273 + iph = ip_hdr(skb); 2274 + /* 4. re-fill new padding with IPOPT_END (may now be longer) */ 2275 + memset((unsigned char *)iph + new_hdr_len_actual, IPOPT_END, 2276 + new_hdr_len - new_hdr_len_actual); 2277 + 2278 + opt->optlen -= hdr_len_delta; 2273 2279 opt->cipso = 0; 2274 2280 opt->is_changed = 1; 2275 - 2281 + if (hdr_len_delta != 0) { 2282 + iph->ihl = new_hdr_len >> 2; 2283 + iph_set_totlen(iph, skb->len); 2284 + } 2276 2285 ip_send_check(iph); 2277 2286 2278 2287 return 0;
+4 -2
net/ipv4/tcp_ao.c
··· 1968 1968 first = true; 1969 1969 } 1970 1970 1971 - if (cmd.ao_required && tcp_ao_required_verify(sk)) 1972 - return -EKEYREJECTED; 1971 + if (cmd.ao_required && tcp_ao_required_verify(sk)) { 1972 + err = -EKEYREJECTED; 1973 + goto out; 1974 + } 1973 1975 1974 1976 /* For sockets in TCP_CLOSED it's possible set keys that aren't 1975 1977 * matching the future peer (address/port/VRF/etc),
+1
net/ipv4/tcp_input.c
··· 6296 6296 skb_rbtree_walk_from(data) 6297 6297 tcp_mark_skb_lost(sk, data); 6298 6298 tcp_xmit_retransmit_queue(sk); 6299 + tp->retrans_stamp = 0; 6299 6300 NET_INC_STATS(sock_net(sk), 6300 6301 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 6301 6302 return true;
+2 -1
net/ipv6/ip6_fib.c
··· 2514 2514 goto out_kmem_cache_create; 2515 2515 2516 2516 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL, 2517 - inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED); 2517 + inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED | 2518 + RTNL_FLAG_DUMP_SPLIT_NLM_DONE); 2518 2519 if (ret) 2519 2520 goto out_unregister_subsys; 2520 2521
+3 -1
net/ipv6/route.c
··· 638 638 rcu_read_lock(); 639 639 last_probe = READ_ONCE(fib6_nh->last_probe); 640 640 idev = __in6_dev_get(dev); 641 + if (!idev) 642 + goto out; 641 643 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 642 644 if (neigh) { 643 645 if (READ_ONCE(neigh->nud_state) & NUD_VALID) ··· 3605 3603 if (!dev) 3606 3604 goto out; 3607 3605 3608 - if (idev->cnf.disable_ipv6) { 3606 + if (!idev || idev->cnf.disable_ipv6) { 3609 3607 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); 3610 3608 err = -EACCES; 3611 3609 goto out;
+4 -4
net/ipv6/seg6_local.c
··· 941 941 942 942 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) 943 943 return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 944 - dev_net(skb->dev), NULL, skb, NULL, 945 - skb_dst(skb)->dev, input_action_end_dx6_finish); 944 + dev_net(skb->dev), NULL, skb, skb->dev, 945 + NULL, input_action_end_dx6_finish); 946 946 947 947 return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb); 948 948 drop: ··· 991 991 992 992 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) 993 993 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, 994 - dev_net(skb->dev), NULL, skb, NULL, 995 - skb_dst(skb)->dev, input_action_end_dx4_finish); 994 + dev_net(skb->dev), NULL, skb, skb->dev, 995 + NULL, input_action_end_dx4_finish); 996 996 997 997 return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb); 998 998 drop:
+7 -1
net/ipv6/xfrm6_policy.c
··· 56 56 { 57 57 struct dst_entry *dst; 58 58 struct net_device *dev; 59 + struct inet6_dev *idev; 59 60 60 61 dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark); 61 62 if (IS_ERR(dst)) 62 63 return -EHOSTUNREACH; 63 64 64 - dev = ip6_dst_idev(dst)->dev; 65 + idev = ip6_dst_idev(dst); 66 + if (!idev) { 67 + dst_release(dst); 68 + return -EHOSTUNREACH; 69 + } 70 + dev = idev->dev; 65 71 ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6); 66 72 dst_release(dst); 67 73 return 0;
+17
net/mac80211/driver-ops.c
··· 311 311 might_sleep(); 312 312 lockdep_assert_wiphy(local->hw.wiphy); 313 313 314 + /* 315 + * We should perhaps push emulate chanctx down and only 316 + * make it call ->config() when the chanctx is actually 317 + * assigned here (and unassigned below), but that's yet 318 + * another change to all drivers to add assign/unassign 319 + * emulation callbacks. Maybe later. 320 + */ 321 + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && 322 + local->emulate_chanctx && 323 + !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 324 + return 0; 325 + 314 326 if (!check_sdata_in_driver(sdata)) 315 327 return -EIO; 316 328 ··· 349 337 { 350 338 might_sleep(); 351 339 lockdep_assert_wiphy(local->hw.wiphy); 340 + 341 + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && 342 + local->emulate_chanctx && 343 + !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 344 + return; 352 345 353 346 if (!check_sdata_in_driver(sdata)) 354 347 return;
+10 -12
net/mac80211/iface.c
··· 686 686 ieee80211_del_virtual_monitor(local); 687 687 688 688 ieee80211_recalc_idle(local); 689 + ieee80211_recalc_offload(local); 689 690 690 691 if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) 691 692 break; ··· 1122 1121 struct ieee80211_sub_if_data *sdata; 1123 1122 int ret; 1124 1123 1125 - if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 1126 - return 0; 1127 - 1128 1124 ASSERT_RTNL(); 1129 1125 lockdep_assert_wiphy(local->hw.wiphy); 1130 1126 ··· 1143 1145 1144 1146 ieee80211_set_default_queues(sdata); 1145 1147 1146 - ret = drv_add_interface(local, sdata); 1147 - if (WARN_ON(ret)) { 1148 - /* ok .. stupid driver, it asked for this! */ 1149 - kfree(sdata); 1150 - return ret; 1148 + if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { 1149 + ret = drv_add_interface(local, sdata); 1150 + if (WARN_ON(ret)) { 1151 + /* ok .. stupid driver, it asked for this! */ 1152 + kfree(sdata); 1153 + return ret; 1154 + } 1151 1155 } 1152 1156 1153 1157 set_bit(SDATA_STATE_RUNNING, &sdata->state); ··· 1187 1187 { 1188 1188 struct ieee80211_sub_if_data *sdata; 1189 1189 1190 - if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 1191 - return; 1192 - 1193 1190 ASSERT_RTNL(); 1194 1191 lockdep_assert_wiphy(local->hw.wiphy); 1195 1192 ··· 1206 1209 1207 1210 ieee80211_link_release_channel(&sdata->deflink); 1208 1211 1209 - drv_remove_interface(local, sdata); 1212 + if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 1213 + drv_remove_interface(local, sdata); 1210 1214 1211 1215 kfree(sdata); 1212 1216 }
+9 -8
net/mac80211/scan.c
··· 358 358 struct cfg80211_scan_request *req; 359 359 struct cfg80211_chan_def chandef; 360 360 u8 bands_used = 0; 361 - int i, ielen, n_chans; 361 + int i, ielen; 362 + u32 *n_chans; 362 363 u32 flags = 0; 363 364 364 365 req = rcu_dereference_protected(local->scan_req, ··· 369 368 return false; 370 369 371 370 if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { 371 + local->hw_scan_req->req.n_channels = req->n_channels; 372 + 372 373 for (i = 0; i < req->n_channels; i++) { 373 374 local->hw_scan_req->req.channels[i] = req->channels[i]; 374 375 bands_used |= BIT(req->channels[i]->band); 375 376 } 376 - 377 - n_chans = req->n_channels; 378 377 } else { 379 378 do { 380 379 if (local->hw_scan_band == NUM_NL80211_BANDS) 381 380 return false; 382 381 383 - n_chans = 0; 382 + n_chans = &local->hw_scan_req->req.n_channels; 383 + *n_chans = 0; 384 384 385 385 for (i = 0; i < req->n_channels; i++) { 386 386 if (req->channels[i]->band != 387 387 local->hw_scan_band) 388 388 continue; 389 - local->hw_scan_req->req.channels[n_chans] = 389 + local->hw_scan_req->req.channels[(*n_chans)++] = 390 390 req->channels[i]; 391 - n_chans++; 391 + 392 392 bands_used |= BIT(req->channels[i]->band); 393 393 } 394 394 395 395 local->hw_scan_band++; 396 - } while (!n_chans); 396 + } while (!*n_chans); 397 397 } 398 398 399 - local->hw_scan_req->req.n_channels = n_chans; 400 399 ieee80211_prepare_scan_chandef(&chandef); 401 400 402 401 if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+1 -1
net/mac80211/util.c
··· 1843 1843 1844 1844 /* add interfaces */ 1845 1845 sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); 1846 - if (sdata) { 1846 + if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { 1847 1847 /* in HW restart it exists already */ 1848 1848 WARN_ON(local->resuming); 1849 1849 res = drv_add_interface(local, sdata);
+11 -2
net/netfilter/core.c
··· 815 815 if (ret < 0) 816 816 goto err; 817 817 818 + #ifdef CONFIG_LWTUNNEL 819 + ret = netfilter_lwtunnel_init(); 820 + if (ret < 0) 821 + goto err_lwtunnel_pernet; 822 + #endif 818 823 ret = netfilter_log_init(); 819 824 if (ret < 0) 820 - goto err_pernet; 825 + goto err_log_pernet; 821 826 822 827 return 0; 823 - err_pernet: 828 + err_log_pernet: 829 + #ifdef CONFIG_LWTUNNEL 830 + netfilter_lwtunnel_fini(); 831 + err_lwtunnel_pernet: 832 + #endif 824 833 unregister_pernet_subsys(&netfilter_net_ops); 825 834 err: 826 835 return ret;
+6 -5
net/netfilter/ipset/ip_set_core.c
··· 53 53 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); 54 54 55 55 /* When the nfnl mutex or ip_set_ref_lock is held: */ 56 - #define ip_set_dereference(p) \ 57 - rcu_dereference_protected(p, \ 56 + #define ip_set_dereference(inst) \ 57 + rcu_dereference_protected((inst)->ip_set_list, \ 58 58 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \ 59 - lockdep_is_held(&ip_set_ref_lock)) 59 + lockdep_is_held(&ip_set_ref_lock) || \ 60 + (inst)->is_deleted) 60 61 #define ip_set(inst, id) \ 61 - ip_set_dereference((inst)->ip_set_list)[id] 62 + ip_set_dereference(inst)[id] 62 63 #define ip_set_ref_netlink(inst,id) \ 63 64 rcu_dereference_raw((inst)->ip_set_list)[id] 64 65 #define ip_set_dereference_nfnl(p) \ ··· 1134 1133 if (!list) 1135 1134 goto cleanup; 1136 1135 /* nfnl mutex is held, both lists are valid */ 1137 - tmp = ip_set_dereference(inst->ip_set_list); 1136 + tmp = ip_set_dereference(inst); 1138 1137 memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max); 1139 1138 rcu_assign_pointer(inst->ip_set_list, list); 1140 1139 /* Make sure all current packets have passed through */
-15
net/netfilter/nf_conntrack_standalone.c
··· 22 22 #include <net/netfilter/nf_conntrack_acct.h> 23 23 #include <net/netfilter/nf_conntrack_zones.h> 24 24 #include <net/netfilter/nf_conntrack_timestamp.h> 25 - #ifdef CONFIG_LWTUNNEL 26 - #include <net/netfilter/nf_hooks_lwtunnel.h> 27 - #endif 28 25 #include <linux/rculist_nulls.h> 29 26 30 27 static bool enable_hooks __read_mostly; ··· 609 612 NF_SYSCTL_CT_PROTO_TIMEOUT_GRE, 610 613 NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM, 611 614 #endif 612 - #ifdef CONFIG_LWTUNNEL 613 - NF_SYSCTL_CT_LWTUNNEL, 614 - #endif 615 615 616 616 NF_SYSCTL_CT_LAST_SYSCTL, 617 617 }; ··· 938 944 .maxlen = sizeof(unsigned int), 939 945 .mode = 0644, 940 946 .proc_handler = proc_dointvec_jiffies, 941 - }, 942 - #endif 943 - #ifdef CONFIG_LWTUNNEL 944 - [NF_SYSCTL_CT_LWTUNNEL] = { 945 - .procname = "nf_hooks_lwtunnel", 946 - .data = NULL, 947 - .maxlen = sizeof(int), 948 - .mode = 0644, 949 - .proc_handler = nf_hooks_lwtunnel_sysctl_handler, 950 947 }, 951 948 #endif 952 949 };
+67
net/netfilter/nf_hooks_lwtunnel.c
··· 3 3 #include <linux/sysctl.h> 4 4 #include <net/lwtunnel.h> 5 5 #include <net/netfilter/nf_hooks_lwtunnel.h> 6 + #include <linux/netfilter.h> 7 + 8 + #include "nf_internals.h" 6 9 7 10 static inline int nf_hooks_lwtunnel_get(void) 8 11 { ··· 53 50 return ret; 54 51 } 55 52 EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler); 53 + 54 + static struct ctl_table nf_lwtunnel_sysctl_table[] = { 55 + { 56 + .procname = "nf_hooks_lwtunnel", 57 + .data = NULL, 58 + .maxlen = sizeof(int), 59 + .mode = 0644, 60 + .proc_handler = nf_hooks_lwtunnel_sysctl_handler, 61 + }, 62 + }; 63 + 64 + static int __net_init nf_lwtunnel_net_init(struct net *net) 65 + { 66 + struct ctl_table_header *hdr; 67 + struct ctl_table *table; 68 + 69 + table = nf_lwtunnel_sysctl_table; 70 + if (!net_eq(net, &init_net)) { 71 + table = kmemdup(nf_lwtunnel_sysctl_table, 72 + sizeof(nf_lwtunnel_sysctl_table), 73 + GFP_KERNEL); 74 + if (!table) 75 + goto err_alloc; 76 + } 77 + 78 + hdr = register_net_sysctl_sz(net, "net/netfilter", table, 79 + ARRAY_SIZE(nf_lwtunnel_sysctl_table)); 80 + if (!hdr) 81 + goto err_reg; 82 + 83 + net->nf.nf_lwtnl_dir_header = hdr; 84 + 85 + return 0; 86 + err_reg: 87 + if (!net_eq(net, &init_net)) 88 + kfree(table); 89 + err_alloc: 90 + return -ENOMEM; 91 + } 92 + 93 + static void __net_exit nf_lwtunnel_net_exit(struct net *net) 94 + { 95 + const struct ctl_table *table; 96 + 97 + table = net->nf.nf_lwtnl_dir_header->ctl_table_arg; 98 + unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header); 99 + if (!net_eq(net, &init_net)) 100 + kfree(table); 101 + } 102 + 103 + static struct pernet_operations nf_lwtunnel_net_ops = { 104 + .init = nf_lwtunnel_net_init, 105 + .exit = nf_lwtunnel_net_exit, 106 + }; 107 + 108 + int __init netfilter_lwtunnel_init(void) 109 + { 110 + return register_pernet_subsys(&nf_lwtunnel_net_ops); 111 + } 112 + 113 + void netfilter_lwtunnel_fini(void) 114 + { 115 + unregister_pernet_subsys(&nf_lwtunnel_net_ops); 116 + } 56 117 #endif /* CONFIG_SYSCTL */
+6
net/netfilter/nf_internals.h
··· 29 29 /* nf_log.c */ 30 30 int __init netfilter_log_init(void); 31 31 32 + #ifdef CONFIG_LWTUNNEL 33 + /* nf_hooks_lwtunnel.c */ 34 + int __init netfilter_lwtunnel_init(void); 35 + void netfilter_lwtunnel_fini(void); 36 + #endif 37 + 32 38 /* core.c */ 33 39 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp, 34 40 const struct nf_hook_ops *reg);
+2 -1
net/netrom/nr_timer.c
··· 121 121 is accepted() it isn't 'dead' so doesn't get removed. */ 122 122 if (sock_flag(sk, SOCK_DESTROY) || 123 123 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { 124 - sock_hold(sk); 124 + if (sk->sk_state == TCP_LISTEN) 125 + sock_hold(sk); 125 126 bh_unlock_sock(sk); 126 127 nr_destroy_socket(sk); 127 128 goto out;
+1 -2
net/sched/act_api.c
··· 830 830 u32 max; 831 831 832 832 if (*index) { 833 - again: 834 833 rcu_read_lock(); 835 834 p = idr_find(&idrinfo->action_idr, *index); 836 835 ··· 838 839 * index but did not assign the pointer yet. 839 840 */ 840 841 rcu_read_unlock(); 841 - goto again; 842 + return -EAGAIN; 842 843 } 843 844 844 845 if (!p) {
+11 -5
net/sched/act_ct.c
··· 41 41 static struct rhashtable zones_ht; 42 42 static DEFINE_MUTEX(zones_mutex); 43 43 44 + struct zones_ht_key { 45 + struct net *net; 46 + u16 zone; 47 + }; 48 + 44 49 struct tcf_ct_flow_table { 45 50 struct rhash_head node; /* In zones tables */ 46 51 47 52 struct rcu_work rwork; 48 53 struct nf_flowtable nf_ft; 49 54 refcount_t ref; 50 - u16 zone; 55 + struct zones_ht_key key; 51 56 52 57 bool dying; 53 58 }; 54 59 55 60 static const struct rhashtable_params zones_params = { 56 61 .head_offset = offsetof(struct tcf_ct_flow_table, node), 57 - .key_offset = offsetof(struct tcf_ct_flow_table, zone), 58 - .key_len = sizeof_field(struct tcf_ct_flow_table, zone), 62 + .key_offset = offsetof(struct tcf_ct_flow_table, key), 63 + .key_len = sizeof_field(struct tcf_ct_flow_table, key), 59 64 .automatic_shrinking = true, 60 65 }; 61 66 ··· 321 316 322 317 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params) 323 318 { 319 + struct zones_ht_key key = { .net = net, .zone = params->zone }; 324 320 struct tcf_ct_flow_table *ct_ft; 325 321 int err = -ENOMEM; 326 322 327 323 mutex_lock(&zones_mutex); 328 - ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params); 324 + ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params); 329 325 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) 330 326 goto out_unlock; 331 327 ··· 335 329 goto err_alloc; 336 330 refcount_set(&ct_ft->ref, 1); 337 331 338 - ct_ft->zone = params->zone; 332 + ct_ft->key = key; 339 333 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params); 340 334 if (err) 341 335 goto err_insert;
+5 -3
net/sunrpc/svc_xprt.c
··· 1421 1421 1422 1422 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1423 1423 1424 + if (!si->serv) 1425 + return NULL; 1426 + 1424 1427 mutex_lock(si->mutex); 1425 1428 1426 1429 if (!pidx) 1427 1430 return SEQ_START_TOKEN; 1428 - if (!si->serv) 1429 - return NULL; 1430 1431 return pidx > si->serv->sv_nrpools ? NULL 1431 1432 : &si->serv->sv_pools[pidx - 1]; 1432 1433 } ··· 1459 1458 { 1460 1459 struct svc_info *si = m->private; 1461 1460 1462 - mutex_unlock(si->mutex); 1461 + if (si->serv) 1462 + mutex_unlock(si->mutex); 1463 1463 } 1464 1464 1465 1465 static int svc_pool_stats_show(struct seq_file *m, void *p)
+1
net/tipc/node.c
··· 2105 2105 } else { 2106 2106 n = tipc_node_find_by_id(net, ehdr->id); 2107 2107 } 2108 + skb_dst_force(skb); 2108 2109 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b); 2109 2110 if (!skb) 2110 2111 return;
+9 -3
net/wireless/scan.c
··· 3416 3416 wiphy = &rdev->wiphy; 3417 3417 3418 3418 /* Determine number of channels, needed to allocate creq */ 3419 - if (wreq && wreq->num_channels) 3419 + if (wreq && wreq->num_channels) { 3420 + /* Passed from userspace so should be checked */ 3421 + if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES)) 3422 + return -EINVAL; 3420 3423 n_channels = wreq->num_channels; 3421 - else 3424 + } else { 3422 3425 n_channels = ieee80211_get_num_supported_channels(wiphy); 3426 + } 3423 3427 3424 3428 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + 3425 3429 n_channels * sizeof(void *), ··· 3497 3493 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); 3498 3494 creq->ssids[0].ssid_len = wreq->essid_len; 3499 3495 } 3500 - if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) 3496 + if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) { 3497 + creq->ssids = NULL; 3501 3498 creq->n_ssids = 0; 3499 + } 3502 3500 } 3503 3501 3504 3502 for (i = 0; i < NUM_NL80211_BANDS; i++)
+5 -2
rust/kernel/alloc/vec_ext.rs
··· 4 4 5 5 use super::{AllocError, Flags}; 6 6 use alloc::vec::Vec; 7 - use core::ptr; 8 7 9 8 /// Extensions to [`Vec`]. 10 9 pub trait VecExt<T>: Sized { ··· 140 141 // `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be 141 142 // dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity 142 143 // to be zero if no memory has been allocated yet. 143 - let ptr = if cap == 0 { ptr::null_mut() } else { old_ptr }; 144 + let ptr = if cap == 0 { 145 + core::ptr::null_mut() 146 + } else { 147 + old_ptr 148 + }; 144 149 145 150 // SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to 146 151 // `krealloc_aligned`. We also verified that the type is not a ZST.
-15
security/Kconfig.hardening
··· 255 255 touching "cold" memory areas. Most cases see 3-5% impact. Some 256 256 synthetic workloads have measured as high as 8%. 257 257 258 - config INIT_MLOCKED_ON_FREE_DEFAULT_ON 259 - bool "Enable mlocked memory zeroing on free" 260 - depends on !KMSAN 261 - help 262 - This config has the effect of setting "init_mlocked_on_free=1" 263 - on the kernel command line. If it is enabled, all mlocked process 264 - memory is zeroed when freed. This restriction to mlocked memory 265 - improves performance over "init_on_free" but can still be used to 266 - protect confidential data like key material from content exposures 267 - to other processes, as well as live forensics and cold boot attacks. 268 - Any non-mlocked memory is not cleared before it is reassigned. This 269 - configuration can be overwritten by setting "init_mlocked_on_free=0" 270 - on the command line. The "init_on_free" boot option takes 271 - precedence over "init_mlocked_on_free". 272 - 273 258 config CC_HAS_ZERO_CALL_USED_REGS 274 259 def_bool $(cc-option,-fzero-call-used-regs=used-gpr) 275 260 # https://github.com/ClangBuiltLinux/linux/issues/1766
+3 -3
security/apparmor/audit.c
··· 217 217 } 218 218 } 219 219 220 - int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) 220 + int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp) 221 221 { 222 222 struct aa_audit_rule *rule; 223 223 ··· 230 230 return -EINVAL; 231 231 } 232 232 233 - rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL); 233 + rule = kzalloc(sizeof(struct aa_audit_rule), gfp); 234 234 235 235 if (!rule) 236 236 return -ENOMEM; 237 237 238 238 /* Currently rules are treated as coming from the root ns */ 239 239 rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr, 240 - GFP_KERNEL, true, false); 240 + gfp, true, false); 241 241 if (IS_ERR(rule->label)) { 242 242 int err = PTR_ERR(rule->label); 243 243 aa_audit_rule_free(rule);
+1 -1
security/apparmor/include/audit.h
··· 200 200 } 201 201 202 202 void aa_audit_rule_free(void *vrule); 203 - int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule); 203 + int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp); 204 204 int aa_audit_rule_known(struct audit_krule *rule); 205 205 int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule); 206 206
+1 -1
security/integrity/ima/ima.h
··· 546 546 #else 547 547 548 548 static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr, 549 - void **lsmrule) 549 + void **lsmrule, gfp_t gfp) 550 550 { 551 551 return -EINVAL; 552 552 }
+9 -6
security/integrity/ima/ima_policy.c
··· 401 401 kfree(entry); 402 402 } 403 403 404 - static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) 404 + static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry, 405 + gfp_t gfp) 405 406 { 406 407 struct ima_rule_entry *nentry; 407 408 int i; ··· 411 410 * Immutable elements are copied over as pointers and data; only 412 411 * lsm rules can change 413 412 */ 414 - nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL); 413 + nentry = kmemdup(entry, sizeof(*nentry), gfp); 415 414 if (!nentry) 416 415 return NULL; 417 416 ··· 426 425 427 426 ima_filter_rule_init(nentry->lsm[i].type, Audit_equal, 428 427 nentry->lsm[i].args_p, 429 - &nentry->lsm[i].rule); 428 + &nentry->lsm[i].rule, 429 + gfp); 430 430 if (!nentry->lsm[i].rule) 431 431 pr_warn("rule for LSM \'%s\' is undefined\n", 432 432 nentry->lsm[i].args_p); ··· 440 438 int i; 441 439 struct ima_rule_entry *nentry; 442 440 443 - nentry = ima_lsm_copy_rule(entry); 441 + nentry = ima_lsm_copy_rule(entry, GFP_KERNEL); 444 442 if (!nentry) 445 443 return -ENOMEM; 446 444 ··· 666 664 } 667 665 668 666 if (rc == -ESTALE && !rule_reinitialized) { 669 - lsm_rule = ima_lsm_copy_rule(rule); 667 + lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC); 670 668 if (lsm_rule) { 671 669 rule_reinitialized = true; 672 670 goto retry; ··· 1142 1140 entry->lsm[lsm_rule].type = audit_type; 1143 1141 result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, 1144 1142 entry->lsm[lsm_rule].args_p, 1145 - &entry->lsm[lsm_rule].rule); 1143 + &entry->lsm[lsm_rule].rule, 1144 + GFP_KERNEL); 1146 1145 if (!entry->lsm[lsm_rule].rule) { 1147 1146 pr_warn("rule for LSM \'%s\' is undefined\n", 1148 1147 entry->lsm[lsm_rule].args_p);
+4 -2
security/security.c
··· 5332 5332 * @op: rule operator 5333 5333 * @rulestr: rule context 5334 5334 * @lsmrule: receive buffer for audit rule struct 5335 + * @gfp: GFP flag used for kmalloc 5335 5336 * 5336 5337 * Allocate and initialize an LSM audit rule structure. 5337 5338 * 5338 5339 * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of 5339 5340 * an invalid rule. 5340 5341 */ 5341 - int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule) 5342 + int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, 5343 + gfp_t gfp) 5342 5344 { 5343 - return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule); 5345 + return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp); 5344 5346 } 5345 5347 5346 5348 /**
+3 -1
security/selinux/include/audit.h
··· 21 21 * @op: the operator the rule uses 22 22 * @rulestr: the text "target" of the rule 23 23 * @rule: pointer to the new rule structure returned via this 24 + * @gfp: GFP flag used for kmalloc 24 25 * 25 26 * Returns 0 if successful, -errno if not. On success, the rule structure 26 27 * will be allocated internally. The caller must free this structure with 27 28 * selinux_audit_rule_free() after use. 28 29 */ 29 - int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule); 30 + int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule, 31 + gfp_t gfp); 30 32 31 33 /** 32 34 * selinux_audit_rule_free - free an selinux audit rule structure.
+3 -2
security/selinux/ss/services.c
··· 3507 3507 } 3508 3508 } 3509 3509 3510 - int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) 3510 + int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, 3511 + gfp_t gfp) 3511 3512 { 3512 3513 struct selinux_state *state = &selinux_state; 3513 3514 struct selinux_policy *policy; ··· 3549 3548 return -EINVAL; 3550 3549 } 3551 3550 3552 - tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL); 3551 + tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp); 3553 3552 if (!tmprule) 3554 3553 return -ENOMEM; 3555 3554 context_init(&tmprule->au_ctxt);
+3 -1
security/smack/smack_lsm.c
··· 4693 4693 * @op: required testing operator (=, !=, >, <, ...) 4694 4694 * @rulestr: smack label to be audited 4695 4695 * @vrule: pointer to save our own audit rule representation 4696 + * @gfp: type of the memory for the allocation 4696 4697 * 4697 4698 * Prepare to audit cases where (@field @op @rulestr) is true. 4698 4699 * The label to be audited is created if necessay. 4699 4700 */ 4700 - static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) 4701 + static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, 4702 + gfp_t gfp) 4701 4703 { 4702 4704 struct smack_known *skp; 4703 4705 char **rule = (char **)vrule;
+1
security/yama/yama_lsm.c
··· 111 111 112 112 /** 113 113 * yama_relation_cleanup - remove invalid entries from the relation list 114 + * @work: unused 114 115 * 115 116 */ 116 117 static void yama_relation_cleanup(struct work_struct *work)
+2
sound/core/seq/seq_ump_convert.c
··· 1075 1075 system_ev_to_ump_midi1, system_ev_to_ump_midi2 }, 1076 1076 { SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING, 1077 1077 system_ev_to_ump_midi1, system_ev_to_ump_midi2 }, 1078 + { SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET, 1079 + system_ev_to_ump_midi1, system_ev_to_ump_midi2 }, 1078 1080 }; 1079 1081 1080 1082 static const struct seq_ev_to_ump *find_ump_encoder(int type)
+1 -1
sound/hda/intel-dsp-config.c
··· 18 18 static int dsp_driver; 19 19 20 20 module_param(dsp_driver, int, 0444); 21 - MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)"); 21 + MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)"); 22 22 23 23 #define FLAG_SST BIT(0) 24 24 #define FLAG_SOF BIT(1)
+1
sound/oss/dmasound/dmasound_core.c
··· 204 204 static unsigned int writeBufSize = DEFAULT_BUFF_SIZE ; /* in bytes */ 205 205 module_param(writeBufSize, int, 0); 206 206 207 + MODULE_DESCRIPTION("Atari/Amiga/Q40 core DMA sound driver"); 207 208 MODULE_LICENSE("GPL"); 208 209 209 210 static int sq_unit = -1;
+2
sound/pci/hda/Kconfig
··· 162 162 depends on ACPI || COMPILE_TEST 163 163 depends on SND_SOC 164 164 select FW_CS_DSP 165 + imply SERIAL_MULTI_INSTANTIATE 165 166 select SND_HDA_GENERIC 166 167 select SND_SOC_CS35L56_SHARED 167 168 select SND_HDA_SCODEC_CS35L56 ··· 179 178 depends on ACPI || COMPILE_TEST 180 179 depends on SND_SOC 181 180 select FW_CS_DSP 181 + imply SERIAL_MULTI_INSTANTIATE 182 182 select SND_HDA_GENERIC 183 183 select SND_SOC_CS35L56_SHARED 184 184 select SND_HDA_SCODEC_CS35L56
+3 -3
sound/pci/hda/cs35l41_hda.c
··· 1495 1495 if (comps[cs35l41->index].dev == dev) { 1496 1496 memset(&comps[cs35l41->index], 0, sizeof(*comps)); 1497 1497 sleep_flags = lock_system_sleep(); 1498 - device_link_remove(&comps->codec->core.dev, cs35l41->dev); 1498 + device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev); 1499 1499 unlock_system_sleep(sleep_flags); 1500 1500 } 1501 1501 } ··· 2019 2019 { 2020 2020 struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev); 2021 2021 2022 + component_del(cs35l41->dev, &cs35l41_hda_comp_ops); 2023 + 2022 2024 pm_runtime_get_sync(cs35l41->dev); 2023 2025 pm_runtime_dont_use_autosuspend(cs35l41->dev); 2024 2026 pm_runtime_disable(cs35l41->dev); 2025 2027 2026 2028 if (cs35l41->halo_initialized) 2027 2029 cs35l41_remove_dsp(cs35l41); 2028 - 2029 - component_del(cs35l41->dev, &cs35l41_hda_comp_ops); 2030 2030 2031 2031 acpi_dev_put(cs35l41->dacpi); 2032 2032
+8
sound/pci/hda/cs35l41_hda_property.c
··· 128 128 { "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, 129 129 { "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, 130 130 { "17AA38B7", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, 131 + { "17AA38C7", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 }, 132 + { "17AA38C8", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 }, 133 + { "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, 134 + { "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, 131 135 {} 132 136 }; 133 137 ··· 533 529 { "CSC3551", "17AA38B5", generic_dsd_config }, 534 530 { "CSC3551", "17AA38B6", generic_dsd_config }, 535 531 { "CSC3551", "17AA38B7", generic_dsd_config }, 532 + { "CSC3551", "17AA38C7", generic_dsd_config }, 533 + { "CSC3551", "17AA38C8", generic_dsd_config }, 534 + { "CSC3551", "17AA38F9", generic_dsd_config }, 535 + { "CSC3551", "17AA38FA", generic_dsd_config }, 536 536 {} 537 537 }; 538 538
+7 -2
sound/pci/hda/cs35l56_hda.c
··· 735 735 if (comps[cs35l56->index].dev == dev) 736 736 memset(&comps[cs35l56->index], 0, sizeof(*comps)); 737 737 738 + cs35l56->codec = NULL; 739 + 738 740 dev_dbg(cs35l56->base.dev, "Unbound\n"); 739 741 } 740 742 ··· 841 839 return ret; 842 840 843 841 cs35l56->suspended = false; 842 + 843 + if (!cs35l56->codec) 844 + return 0; 844 845 845 846 ret = cs35l56_is_fw_reload_needed(&cs35l56->base); 846 847 dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret); ··· 1077 1072 { 1078 1073 struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev); 1079 1074 1075 + component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops); 1076 + 1080 1077 pm_runtime_dont_use_autosuspend(cs35l56->base.dev); 1081 1078 pm_runtime_get_sync(cs35l56->base.dev); 1082 1079 pm_runtime_disable(cs35l56->base.dev); 1083 - 1084 - component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops); 1085 1080 1086 1081 cs_dsp_remove(&cs35l56->cs_dsp); 1087 1082
+36 -3
sound/pci/hda/patch_realtek.c
··· 583 583 switch (codec->core.vendor_id) { 584 584 case 0x10ec0236: 585 585 case 0x10ec0256: 586 + case 0x10ec0257: 586 587 case 0x19e58326: 587 588 case 0x10ec0283: 589 + case 0x10ec0285: 588 590 case 0x10ec0286: 591 + case 0x10ec0287: 589 592 case 0x10ec0288: 593 + case 0x10ec0295: 590 594 case 0x10ec0298: 591 595 alc_headset_mic_no_shutup(codec); 592 596 break; ··· 7524 7520 ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1, 7525 7521 ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318, 7526 7522 ALC256_FIXUP_CHROME_BOOK, 7523 + ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7, 7527 7524 }; 7528 7525 7529 7526 /* A special fixup for Lenovo C940 and Yoga Duet 7; ··· 7561 7556 id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* DuetITL */ 7562 7557 else 7563 7558 id = ALC287_FIXUP_TAS2781_I2C; /* 14IRP8 */ 7559 + __snd_hda_apply_fixup(codec, id, action, 0); 7560 + } 7561 + 7562 + /* Similar to above the Lenovo Yoga Pro 7 14ARP8 PCI SSID matches the codec SSID of the 7563 + Legion Y9000X 2022 IAH7.*/ 7564 + static void alc287_fixup_lenovo_14arp8_legion_iah7(struct hda_codec *codec, 7565 + const struct hda_fixup *fix, 7566 + int action) 7567 + { 7568 + int id; 7569 + 7570 + if (codec->core.subsystem_id == 0x17aa386e) 7571 + id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion Y9000X 2022 IAH7 */ 7572 + else 7573 + id = ALC285_FIXUP_SPEAKER2_TO_DAC1; /* Yoga Pro 7 14ARP8 */ 7564 7574 __snd_hda_apply_fixup(codec, id, action, 0); 7565 7575 } 7566 7576 ··· 9678 9658 .chained = true, 9679 9659 .chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK, 9680 9660 }, 9661 + [ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7] = { 9662 + .type = HDA_FIXUP_FUNC, 9663 + .v.func = alc287_fixup_lenovo_14arp8_legion_iah7, 9664 + }, 9681 9665 [ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = { 9682 9666 .type = HDA_FIXUP_FUNC, 9683 9667 .v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin, ··· 10218 10194 SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 10219 10195 SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 10220 10196 SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 10197 + SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 10198 + SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 10199 + SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 10200 + SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 10221 10201 SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED), 10222 10202 SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED), 10223 10203 SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), ··· 10530 10502 SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), 10531 10503 SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7), 10532 10504 SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), 10533 - SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), 10505 + SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC), 10534 10506 SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), 10535 10507 SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), 10536 10508 SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), ··· 10544 10516 SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2), 10545 10517 SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2), 10546 10518 SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), 10547 - SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2), 10519 + SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7 / Yoga Pro 7 14ARP8", ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7), 10548 10520 SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7), 10549 10521 SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C), 10550 10522 SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2), ··· 10555 10527 SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), 10556 10528 SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C), 10557 10529 SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C), 10530 + SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), 10558 10531 SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C), 10559 10532 SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C), 10560 10533 SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ··· 10569 10540 SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C), 10570 10541 SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C), 10571 10542 SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C), 10543 + SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), 10544 + SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), 10572 10545 SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C), 10573 10546 SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C), 10574 10547 SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN), 10575 10548 SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN), 10549 + SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), 10550 + SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), 10576 10551 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 10577 10552 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 10578 10553 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), ··· 10614 10581 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ 10615 10582 SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), 10616 10583 SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), 10584 + SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 10617 10585 SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE), 10618 10586 SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), 10619 10587 SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP), ··· 10639 10605 SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10), 10640 10606 SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), 10641 10607 SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10642 - SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10643 10608 SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 10644 10609 10645 10610 #if 0
+2 -2
sound/pci/hda/tas2781_hda_i2c.c
··· 777 777 { 778 778 struct tas2781_hda *tas_hda = dev_get_drvdata(dev); 779 779 780 + component_del(tas_hda->dev, &tas2781_hda_comp_ops); 781 + 780 782 pm_runtime_get_sync(tas_hda->dev); 781 783 pm_runtime_disable(tas_hda->dev); 782 - 783 - component_del(tas_hda->dev, &tas2781_hda_comp_ops); 784 784 785 785 pm_runtime_put_noidle(tas_hda->dev); 786 786
+1
tools/hv/Makefile
··· 17 17 MAKEFLAGS += -r 18 18 19 19 override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include 20 + override CFLAGS += -Wno-address-of-packed-member 20 21 21 22 ALL_TARGETS := hv_kvp_daemon hv_vss_daemon 22 23 ifneq ($(ARCH), aarch64)
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 53 53 #include "verifier_movsx.skel.h" 54 54 #include "verifier_netfilter_ctx.skel.h" 55 55 #include "verifier_netfilter_retcode.skel.h" 56 + #include "verifier_or_jmp32_k.skel.h" 56 57 #include "verifier_precision.skel.h" 57 58 #include "verifier_prevent_map_lookup.skel.h" 58 59 #include "verifier_raw_stack.skel.h" ··· 171 170 void test_verifier_movsx(void) { RUN(verifier_movsx); } 172 171 void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } 173 172 void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } 173 + void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); } 174 174 void test_verifier_precision(void) { RUN(verifier_precision); } 175 175 void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } 176 176 void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
+41
tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/bpf.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include "bpf_misc.h" 6 + 7 + SEC("socket") 8 + __description("or_jmp32_k: bit ops + branch on unknown value") 9 + __failure 10 + __msg("R0 invalid mem access 'scalar'") 11 + __naked void or_jmp32_k(void) 12 + { 13 + asm volatile (" \ 14 + r0 = 0xffffffff; \ 15 + r0 /= 1; \ 16 + r1 = 0; \ 17 + w1 = -1; \ 18 + w1 >>= 1; \ 19 + w0 &= w1; \ 20 + w0 |= 2; \ 21 + if w0 != 0x7ffffffd goto l1; \ 22 + r0 = 1; \ 23 + exit; \ 24 + l3: \ 25 + r0 = 5; \ 26 + *(u64*)(r0 - 8) = r0; \ 27 + exit; \ 28 + l2: \ 29 + w0 -= 0xe; \ 30 + if w0 == 1 goto l3; \ 31 + r0 = 4; \ 32 + exit; \ 33 + l1: \ 34 + w0 -= 0x7ffffff0; \ 35 + if w0 s>= 0xe goto l2; \ 36 + r0 = 3; \ 37 + exit; \ 38 + " ::: __clobber_all); 39 + } 40 + 41 + char _license[] SEC("license") = "GPL";
+7 -1
tools/testing/selftests/drivers/net/virtio_net/config
··· 1 - CONFIG_VIRTIO_NET=y 1 + CONFIG_BPF_SYSCALL=y 2 + CONFIG_CGROUP_BPF=y 3 + CONFIG_IPV6=y 4 + CONFIG_IPV6_MULTIPLE_TABLES=y 5 + CONFIG_NET_L3_MASTER_DEV=y 6 + CONFIG_NET_VRF=m 2 7 CONFIG_VIRTIO_DEBUG=y 8 + CONFIG_VIRTIO_NET=y
+10 -1
tools/testing/selftests/fchmodat2/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 - CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES) 3 + CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES) 4 + 5 + # gcc requires -static-libasan in order to ensure that Address Sanitizer's 6 + # library is the first one loaded. However, clang already statically links the 7 + # Address Sanitizer if -fsanitize is specified. Therefore, simply omit 8 + # -static-libasan for clang builds. 9 + ifeq ($(LLVM),) 10 + CFLAGS += -static-libasan 11 + endif 12 + 4 13 TEST_GEN_PROGS := fchmodat2_test 5 14 6 15 include ../lib.mk
+10 -2
tools/testing/selftests/filesystems/statmount/statmount_test.c
··· 125 125 126 126 static void cleanup_namespace(void) 127 127 { 128 - fchdir(orig_root); 129 - chroot("."); 128 + int ret; 129 + 130 + ret = fchdir(orig_root); 131 + if (ret == -1) 132 + ksft_perror("fchdir to original root"); 133 + 134 + ret = chroot("."); 135 + if (ret == -1) 136 + ksft_perror("chroot to original root"); 137 + 130 138 umount2(root_mntpoint, MNT_DETACH); 131 139 rmdir(root_mntpoint); 132 140 }
+1
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 277 277 #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) 278 278 #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) 279 279 #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) 280 + #define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23) 280 281 #define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5) 281 282 #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) 282 283
+1
tools/testing/selftests/kvm/lib/riscv/ucall.c
··· 9 9 10 10 #include "kvm_util.h" 11 11 #include "processor.h" 12 + #include "sbi.h" 12 13 13 14 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) 14 15 {
+13 -2
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 1247 1247 { 1248 1248 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ 1249 1249 unsigned long ht_gfn, max_gfn, max_pfn; 1250 - uint8_t maxphyaddr; 1250 + uint8_t maxphyaddr, guest_maxphyaddr; 1251 1251 1252 - max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; 1252 + /* 1253 + * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR 1254 + * enumerates the max _mappable_ GPA, which can be less than the raw 1255 + * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU 1256 + * doesn't support 5-level TDP. 1257 + */ 1258 + guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR); 1259 + guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; 1260 + TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, 1261 + "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR"); 1262 + 1263 + max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; 1253 1264 1254 1265 /* Avoid reserved HyperTransport region on AMD processors. */ 1255 1266 if (!host_cpu_is_amd)
+1
tools/testing/selftests/kvm/riscv/ebreak_test.c
··· 6 6 * 7 7 */ 8 8 #include "kvm_util.h" 9 + #include "ucall_common.h" 9 10 10 11 #define LABEL_ADDRESS(v) ((uint64_t)&(v)) 11 12
+1
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
··· 15 15 #include "processor.h" 16 16 #include "sbi.h" 17 17 #include "arch_timer.h" 18 + #include "ucall_common.h" 18 19 19 20 /* Maximum counters(firmware + hardware) */ 20 21 #define RISCV_MAX_PMU_COUNTERS 64
+2 -2
tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
··· 105 105 int i; 106 106 107 107 for (i = 0; i < 64; i++) { 108 - if (!(supported_features & (1u << i))) 108 + if (!(supported_features & BIT_ULL(i))) 109 109 test_init2_invalid(vm_type, 110 110 &(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) }, 111 111 "unknown feature"); 112 - else if (KNOWN_FEATURES & (1u << i)) 112 + else if (KNOWN_FEATURES & BIT_ULL(i)) 113 113 test_init2(vm_type, 114 114 &(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) }); 115 115 }
+16 -8
tools/testing/selftests/mm/map_fixed_noreplace.c
··· 67 67 dump_maps(); 68 68 ksft_exit_fail_msg("Error: munmap failed!?\n"); 69 69 } 70 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 70 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 71 + ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n"); 71 72 72 73 addr = base_addr + page_size; 73 74 size = 3 * page_size; ··· 77 76 dump_maps(); 78 77 ksft_exit_fail_msg("Error: first mmap() failed unexpectedly\n"); 79 78 } 80 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 79 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 80 + ksft_test_result_pass("mmap() 3*PAGE_SIZE at base+PAGE_SIZE\n"); 81 81 82 82 /* 83 83 * Exact same mapping again: ··· 95 93 dump_maps(); 96 94 ksft_exit_fail_msg("Error:1: mmap() succeeded when it shouldn't have\n"); 97 95 } 98 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 96 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 97 + ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n"); 99 98 100 99 /* 101 100 * Second mapping contained within first: ··· 114 111 dump_maps(); 115 112 ksft_exit_fail_msg("Error:2: mmap() succeeded when it shouldn't have\n"); 116 113 } 117 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 114 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 115 + ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+PAGE_SIZE\n"); 118 116 119 117 /* 120 118 * Overlap end of existing mapping: ··· 132 128 dump_maps(); 133 129 ksft_exit_fail_msg("Error:3: mmap() succeeded when it shouldn't have\n"); 134 130 } 135 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 131 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 132 + ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+(3*PAGE_SIZE)\n"); 136 133 137 134 /* 138 135 * Overlap start of existing mapping: ··· 150 145 dump_maps(); 151 146 ksft_exit_fail_msg("Error:4: mmap() succeeded when it shouldn't have\n"); 152 147 } 153 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 148 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 149 + ksft_test_result_pass("mmap() 2*PAGE_SIZE bytes at base\n"); 154 150 155 151 /* 156 152 * Adjacent to start of existing mapping: ··· 168 162 dump_maps(); 169 163 ksft_exit_fail_msg("Error:5: mmap() failed when it shouldn't have\n"); 170 164 } 171 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 165 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 166 + ksft_test_result_pass("mmap() PAGE_SIZE at base\n"); 172 167 173 168 /* 174 169 * Adjacent to end of existing mapping: ··· 186 179 dump_maps(); 187 180 ksft_exit_fail_msg("Error:6: mmap() failed when it shouldn't have\n"); 188 181 } 189 - ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 182 + ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); 183 + ksft_test_result_pass("mmap() PAGE_SIZE at base+(4*PAGE_SIZE)\n"); 190 184 191 185 addr = base_addr; 192 186 size = 5 * page_size;
+2
tools/testing/selftests/net/Makefile
··· 43 43 TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh 44 44 TEST_PROGS += srv6_end_x_next_csid_l3vpn_test.sh 45 45 TEST_PROGS += srv6_end_flavors_test.sh 46 + TEST_PROGS += srv6_end_dx4_netfilter_test.sh 47 + TEST_PROGS += srv6_end_dx6_netfilter_test.sh 46 48 TEST_PROGS += vrf_strict_mode_test.sh 47 49 TEST_PROGS += arp_ndisc_evict_nocarrier.sh 48 50 TEST_PROGS += ndisc_unsolicited_na_test.sh
+2
tools/testing/selftests/net/config
··· 101 101 CONFIG_CRYPTO_ARIA=y 102 102 CONFIG_XFRM_INTERFACE=m 103 103 CONFIG_XFRM_USER=m 104 + CONFIG_IP_NF_MATCH_RPFILTER=m 105 + CONFIG_IP6_NF_MATCH_RPFILTER=m
+28 -18
tools/testing/selftests/net/mptcp/userspace_pm.sh
··· 160 160 local is_v6=$1 161 161 local app_port=$app4_port 162 162 local connect_addr="10.0.1.1" 163 + local client_addr="10.0.1.2" 163 164 local listen_addr="0.0.0.0" 164 165 if [ "$is_v6" = "v6" ] 165 166 then 166 167 connect_addr="dead:beef:1::1" 168 + client_addr="dead:beef:1::2" 167 169 listen_addr="::" 168 170 app_port=$app6_port 169 171 else ··· 208 206 [ "$server_serverside" = 1 ] 209 207 then 210 208 test_pass 209 + print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}" 211 210 else 212 211 test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})" 213 212 mptcp_lib_result_print_all_tap ··· 300 297 ip netns exec "$ns2"\ 301 298 ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\ 302 299 ns2eth1 303 - print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port" 300 + print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port" 304 301 sleep 0.5 305 302 verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \ 306 303 "$client4_port" ··· 309 306 :>"$server_evts" 310 307 ip netns exec "$ns2" ./pm_nl_ctl ann\ 311 308 dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1 312 - print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port" 309 + print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port" 313 310 sleep 0.5 314 311 verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\ 315 312 "$client_addr_id" "$client6_port" "v6" ··· 319 316 client_addr_id=$((client_addr_id+1)) 320 317 ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\ 321 318 $client_addr_id dev ns2eth1 port $new4_port 322 - print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port" 319 + print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port" 323 320 sleep 0.5 324 321 verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\ 325 322 "$client_addr_id" "$new4_port" ··· 330 327 # ADD_ADDR from the server to client machine reusing the subflow port 331 328 ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\ 332 329 $server_addr_id dev ns1eth2 333 - print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port" 330 + print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port" 334 331 sleep 0.5 335 332 verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\ 336 333 "$server_addr_id" "$app4_port" ··· 339 336 :>"$client_evts" 340 337 ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\ 341 338 $server_addr_id dev ns1eth2 342 - print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port" 339 + print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port" 343 340 sleep 0.5 344 341 verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\ 345 342 "$server_addr_id" "$app6_port" "v6" ··· 349 346 server_addr_id=$((server_addr_id+1)) 350 347 ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\ 351 348 $server_addr_id dev ns1eth2 port $new4_port 352 - print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port" 349 + print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port" 353 350 sleep 0.5 354 351 verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\ 355 352 "$server_addr_id" "$new4_port" ··· 383 380 local invalid_token=$(( client4_token - 1 )) 384 381 ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\ 385 382 $client_addr_id > /dev/null 2>&1 386 - print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token" 383 + print_test "RM_ADDR id:client ns2 => ns1, invalid token" 387 384 local type 388 385 type=$(mptcp_lib_evts_get_info type "$server_evts") 389 386 if [ "$type" = "" ] ··· 397 394 local invalid_id=$(( client_addr_id + 1 )) 398 395 ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\ 399 396 $invalid_id > /dev/null 2>&1 400 - print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id" 397 + print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id" 401 398 type=$(mptcp_lib_evts_get_info type "$server_evts") 402 399 if [ "$type" = "" ] 403 400 then ··· 410 407 :>"$server_evts" 411 408 ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\ 412 409 $client_addr_id 413 - print_test "RM_ADDR id:${client_addr_id} ns2 => ns1" 410 + print_test "RM_ADDR id:client ns2 => ns1" 414 411 sleep 0.5 415 412 verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id" 416 413 ··· 419 416 client_addr_id=$(( client_addr_id - 1 )) 420 417 ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\ 421 418 $client_addr_id 422 - print_test "RM_ADDR id:${client_addr_id} ns2 => ns1" 419 + print_test "RM_ADDR id:client-1 ns2 => ns1" 423 420 sleep 0.5 424 421 verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id" 425 422 ··· 427 424 :>"$server_evts" 428 425 ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\ 429 426 $client_addr_id 430 - print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1" 427 + print_test "RM_ADDR6 id:client-1 ns2 => ns1" 431 428 sleep 0.5 432 429 verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id" 433 430 ··· 437 434 # RM_ADDR from the server to client machine 438 435 ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\ 439 436 $server_addr_id 440 - print_test "RM_ADDR id:${server_addr_id} ns1 => ns2" 437 + print_test "RM_ADDR id:server ns1 => ns2" 441 438 sleep 0.5 442 439 verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id" 443 440 ··· 446 443 server_addr_id=$(( server_addr_id - 1 )) 447 444 ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\ 448 445 $server_addr_id 449 - print_test "RM_ADDR id:${server_addr_id} ns1 => ns2" 446 + print_test "RM_ADDR id:server-1 ns1 => ns2" 450 447 sleep 0.5 451 448 verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id" 452 449 ··· 454 451 :>"$client_evts" 455 452 ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\ 456 453 $server_addr_id 457 - print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2" 454 + print_test "RM_ADDR6 id:server-1 ns1 => ns2" 458 455 sleep 0.5 459 456 verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id" 460 457 } ··· 482 479 local locid 483 480 local remid 484 481 local info 482 + local e_dport_txt 485 483 486 - info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})" 484 + # only display the fixed ports 485 + if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then 486 + e_dport_txt=":${e_dport}" 487 + fi 488 + 489 + info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})" 487 490 488 491 if [ "$e_type" = "$SUB_ESTABLISHED" ] 489 492 then ··· 775 766 :>"$client_evts" 776 767 ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\ 777 768 $server_addr_id dev ns1eth2 778 - print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port" 769 + print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port" 779 770 sleep 0.5 780 771 verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\ 781 772 "$server_addr_id" "$app6_port" ··· 870 861 local listener_pid=$! 871 862 872 863 sleep 0.5 873 - print_test "CREATE_LISTENER 10.0.2.2:$client4_port" 864 + print_test "CREATE_LISTENER 10.0.2.2 (client port)" 874 865 verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port 875 866 876 867 # ADD_ADDR from client to server machine reusing the subflow port ··· 887 878 mptcp_lib_kill_wait $listener_pid 888 879 889 880 sleep 0.5 890 - print_test "CLOSE_LISTENER 10.0.2.2:$client4_port" 881 + print_test "CLOSE_LISTENER 10.0.2.2 (client port)" 891 882 verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port 892 883 } 893 884 894 885 print_title "Make connections" 895 886 make_connection 896 887 make_connection "v6" 888 + print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)" 897 889 898 890 test_announce 899 891 test_remove
+1 -1
tools/testing/selftests/net/openvswitch/openvswitch.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 # 4 4 # OVS kernel module self tests
+1 -1
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
··· 531 531 for flat_act in parse_flat_map: 532 532 if parse_starts_block(actstr, flat_act[0], False): 533 533 actstr = actstr[len(flat_act[0]):] 534 - self["attrs"].append([flat_act[1]]) 534 + self["attrs"].append([flat_act[1], True]) 535 535 actstr = actstr[strspn(actstr, ", ") :] 536 536 parsed = True 537 537
+335
tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # author: Jianguo Wu <wujianguo@chinatelecom.cn> 5 + # 6 + # Mostly copied from tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh. 7 + # 8 + # This script is designed for testing the support of netfilter hooks for 9 + # SRv6 End.DX4 behavior. 10 + # 11 + # Hereafter a network diagram is shown, where one tenants (named 100) offer 12 + # IPv4 L3 VPN services allowing hosts to communicate with each other across 13 + # an IPv6 network. 14 + # 15 + # Routers rt-1 and rt-2 implement IPv4 L3 VPN services leveraging the SRv6 16 + # architecture. The key components for such VPNs are: a) SRv6 Encap behavior, 17 + # b) SRv6 End.DX4 behavior. 18 + # 19 + # To explain how an IPv4 L3 VPN based on SRv6 works, let us briefly consider an 20 + # example where, within the same domain of tenant 100, the host hs-1 pings 21 + # the host hs-2. 22 + # 23 + # First of all, L2 reachability of the host hs-2 is taken into account by 24 + # the router rt-1 which acts as an arp proxy. 25 + # 26 + # When the host hs-1 sends an IPv4 packet destined to hs-2, the router rt-1 27 + # receives the packet on the internal veth-t100 interface, rt-1 contains the 28 + # SRv6 Encap route for encapsulating the IPv4 packet in a IPv6 plus the Segment 29 + # Routing Header (SRH) packet. This packet is sent through the (IPv6) core 30 + # network up to the router rt-2 that receives it on veth0 interface. 31 + # 32 + # The rt-2 router uses the 'localsid' routing table to process incoming 33 + # IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these 34 + # packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and 35 + # routs the packet to the specified nexthop. Afterwards, the packet is sent to 36 + # the host hs-2 through the veth-t100 interface. 37 + # 38 + # The ping response follows the same processing but this time the role of rt-1 39 + # and rt-2 are swapped. 40 + # 41 + # And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a 42 + # rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING 43 + # hooks. 44 + # 45 + # 46 + # +-------------------+ +-------------------+ 47 + # | | | | 48 + # | hs-1 netns | | hs-2 netns | 49 + # | | | | 50 + # | +-------------+ | | +-------------+ | 51 + # | | veth0 | | | | veth0 | | 52 + # | | 10.0.0.1/24 | | | | 10.0.0.2/24 | | 53 + # | +-------------+ | | +-------------+ | 54 + # | . | | . | 55 + # +-------------------+ +-------------------+ 56 + # . . 57 + # . . 58 + # . . 59 + # +-----------------------------------+ +-----------------------------------+ 60 + # | . | | . | 61 + # | +---------------+ | | +---------------- | 62 + # | | veth-t100 | | | | veth-t100 | | 63 + # | | 10.0.0.11/24 | +----------+ | | +----------+ | 10.0.0.22/24 | | 64 + # | +-------+-------+ | route | | | | route | +-------+-------- | 65 + # | | table | | | | table | | 66 + # | +----------+ | | +----------+ | 67 + # | +--------------+ | | +--------------+ | 68 + # | | veth0 | | | | veth0 | | 69 + # | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | | 70 + # | +--------------+ | | +--------------+ | 71 + # | | | | 72 + # | rt-1 netns | | rt-2 netns | 73 + # | | | | 74 + # +-----------------------------------+ +-----------------------------------+ 75 + # 76 + # ~~~~~~~~~~~~~~~~~~~~~~~~~ 77 + # | Network configuration | 78 + # ~~~~~~~~~~~~~~~~~~~~~~~~~ 79 + # 80 + # rt-1: localsid table 81 + # +----------------------------------------------------------------+ 82 + # |SID |Action | 83 + # +----------------------------------------------------------------+ 84 + # |fc00:21:100::6004|apply SRv6 End.DX4 nh4 10.0.0.1 dev veth-t100 | 85 + # +----------------------------------------------------------------+ 86 + # 87 + # rt-1: route table 88 + # +---------------------------------------------------+ 89 + # |host |Action | 90 + # +---------------------------------------------------+ 91 + # |10.0.0.2 |apply seg6 encap segs fc00:12:100::6004| 92 + # +---------------------------------------------------+ 93 + # |10.0.0.0/24|forward to dev veth_t100 | 94 + # +---------------------------------------------------+ 95 + # 96 + # 97 + # rt-2: localsid table 98 + # +---------------------------------------------------------------+ 99 + # |SID |Action | 100 + # +---------------------------------------------------------------+ 101 + # |fc00:12:100::6004|apply SRv6 End.DX4 nh4 10.0.0.2 dev veth-t100| 102 + # +---------------------------------------------------------------+ 103 + # 104 + # rt-2: route table 105 + # +---------------------------------------------------+ 106 + # |host |Action | 107 + # +---------------------------------------------------+ 108 + # |10.0.0.1 |apply seg6 encap segs fc00:21:100::6004| 109 + # +---------------------------------------------------+ 110 + # |10.0.0.0/24|forward to dev veth_t100 | 111 + # +---------------------------------------------------+ 112 + # 113 + 114 + # Kselftest framework requirement - SKIP code is 4. 115 + ksft_skip=4 116 + 117 + readonly IPv6_RT_NETWORK=2001:11 118 + readonly IPv4_HS_NETWORK=10.0.0 119 + readonly SID_LOCATOR=fc00 120 + 121 + PING_TIMEOUT_SEC=4 122 + 123 + ret=0 124 + 125 + PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} 126 + 127 + log_test() 128 + { 129 + local rc=$1 130 + local expected=$2 131 + local msg="$3" 132 + 133 + if [ ${rc} -eq ${expected} ]; then 134 + nsuccess=$((nsuccess+1)) 135 + printf "\n TEST: %-60s [ OK ]\n" "${msg}" 136 + else 137 + ret=1 138 + nfail=$((nfail+1)) 139 + printf "\n TEST: %-60s [FAIL]\n" "${msg}" 140 + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then 141 + echo 142 + echo "hit enter to continue, 'q' to quit" 143 + read a 144 + [ "$a" = "q" ] && exit 1 145 + fi 146 + fi 147 + } 148 + 149 + print_log_test_results() 150 + { 151 + if [ "$TESTS" != "none" ]; then 152 + printf "\nTests passed: %3d\n" ${nsuccess} 153 + printf "Tests failed: %3d\n" ${nfail} 154 + fi 155 + } 156 + 157 + log_section() 158 + { 159 + echo 160 + echo "################################################################################" 161 + echo "TEST SECTION: $*" 162 + echo "################################################################################" 163 + } 164 + 165 + cleanup() 166 + { 167 + ip link del veth-rt-1 2>/dev/null || true 168 + ip link del veth-rt-2 2>/dev/null || true 169 + 170 + # destroy routers rt-* and hosts hs-* 171 + for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do 172 + ip netns del ${ns} || true 173 + done 174 + } 175 + 176 + # Setup the basic networking for the routers 177 + setup_rt_networking() 178 + { 179 + local rt=$1 180 + local nsname=rt-${rt} 181 + 182 + ip netns add ${nsname} 183 + 184 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0 185 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0 186 + 187 + ip link set veth-rt-${rt} netns ${nsname} 188 + ip -netns ${nsname} link set veth-rt-${rt} name veth0 189 + 190 + ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad 191 + ip -netns ${nsname} link set veth0 up 192 + ip -netns ${nsname} link set lo up 193 + 194 + ip netns exec ${nsname} sysctl -wq net.ipv4.ip_forward=1 195 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1 196 + } 197 + 198 + setup_rt_netfilter() 199 + { 200 + local rt=$1 201 + local nsname=rt-${rt} 202 + 203 + ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1 204 + ip netns exec ${nsname} iptables -t raw -A PREROUTING -m rpfilter --invert -j DROP 205 + } 206 + 207 + setup_hs() 208 + { 209 + local hs=$1 210 + local rt=$2 211 + local tid=$3 212 + local hsname=hs-${hs} 213 + local rtname=rt-${rt} 214 + local rtveth=veth-t${tid} 215 + 216 + # set the networking for the host 217 + ip netns add ${hsname} 218 + 219 + ip -netns ${hsname} link add veth0 type veth peer name ${rtveth} 220 + ip -netns ${hsname} link set ${rtveth} netns ${rtname} 221 + ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0 222 + ip -netns ${hsname} link set veth0 up 223 + ip -netns ${hsname} link set lo up 224 + 225 + ip -netns ${rtname} addr add ${IPv4_HS_NETWORK}.${rt}${hs}/24 dev ${rtveth} 226 + ip -netns ${rtname} link set ${rtveth} up 227 + 228 + ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1 229 + } 230 + 231 + setup_vpn_config() 232 + { 233 + local hssrc=$1 234 + local rtsrc=$2 235 + local hsdst=$3 236 + local rtdst=$4 237 + local tid=$5 238 + 239 + local hssrc_name=hs-t${tid}-${hssrc} 240 + local hsdst_name=hs-t${tid}-${hsdst} 241 + local rtsrc_name=rt-${rtsrc} 242 + local rtdst_name=rt-${rtdst} 243 + local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004 244 + 245 + # set the encap route for encapsulating packets which arrive from the 246 + # host hssrc and destined to the access router rtsrc. 247 + ip -netns ${rtsrc_name} -4 route add ${IPv4_HS_NETWORK}.${hsdst}/32 \ 248 + encap seg6 mode encap segs ${vpn_sid} dev veth0 249 + ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \ 250 + via 2001:11::${rtdst} dev veth0 251 + 252 + # set the decap route for decapsulating packets which arrive from 253 + # the rtdst router and destined to the hsdst host. 254 + ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \ 255 + encap seg6local action End.DX4 nh4 ${IPv4_HS_NETWORK}.${hsdst} dev veth-t${tid} 256 + } 257 + 258 + setup() 259 + { 260 + ip link add veth-rt-1 type veth peer name veth-rt-2 261 + # setup the networking for router rt-1 and router rt-2 262 + setup_rt_networking 1 263 + setup_rt_networking 2 264 + 265 + # setup two hosts for the tenant 100. 266 + # - host hs-1 is directly connected to the router rt-1; 267 + # - host hs-2 is directly connected to the router rt-2. 268 + setup_hs 1 1 100 269 + setup_hs 2 2 100 270 + 271 + # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2. 272 + setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant 273 + setup_vpn_config 2 2 1 1 100 274 + } 275 + 276 + check_hs_connectivity() 277 + { 278 + local hssrc=$1 279 + local hsdst=$2 280 + local tid=$3 281 + 282 + ip netns exec hs-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \ 283 + ${IPv4_HS_NETWORK}.${hsdst} >/dev/null 2>&1 284 + } 285 + 286 + check_and_log_hs_connectivity() 287 + { 288 + local hssrc=$1 289 + local hsdst=$2 290 + local tid=$3 291 + 292 + check_hs_connectivity ${hssrc} ${hsdst} ${tid} 293 + log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})" 294 + } 295 + 296 + host_tests() 297 + { 298 + log_section "SRv6 VPN connectivity test among hosts in the same tenant" 299 + 300 + check_and_log_hs_connectivity 1 2 100 301 + check_and_log_hs_connectivity 2 1 100 302 + } 303 + 304 + router_netfilter_tests() 305 + { 306 + log_section "SRv6 VPN connectivity test with netfilter enabled in routers" 307 + setup_rt_netfilter 1 308 + setup_rt_netfilter 2 309 + 310 + check_and_log_hs_connectivity 1 2 100 311 + check_and_log_hs_connectivity 2 1 100 312 + } 313 + 314 + if [ "$(id -u)" -ne 0 ];then 315 + echo "SKIP: Need root privileges" 316 + exit $ksft_skip 317 + fi 318 + 319 + if [ ! -x "$(command -v ip)" ]; then 320 + echo "SKIP: Could not run test without ip tool" 321 + exit $ksft_skip 322 + fi 323 + 324 + cleanup &>/dev/null 325 + 326 + setup 327 + 328 + host_tests 329 + router_netfilter_tests 330 + 331 + print_log_test_results 332 + 333 + cleanup &>/dev/null 334 + 335 + exit ${ret}
+340
tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # author: Jianguo Wu <wujianguo@chinatelecom.cn> 5 + # 6 + # Mostly copied from tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh. 7 + # 8 + # This script is designed for testing the support of netfilter hooks for 9 + # SRv6 End.DX4 behavior. 10 + # 11 + # Hereafter a network diagram is shown, where one tenants (named 100) offer 12 + # IPv6 L3 VPN services allowing hosts to communicate with each other across 13 + # an IPv6 network. 14 + # 15 + # Routers rt-1 and rt-2 implement IPv6 L3 VPN services leveraging the SRv6 16 + # architecture. The key components for such VPNs are: a) SRv6 Encap behavior, 17 + # b) SRv6 End.DX4 behavior. 18 + # 19 + # To explain how an IPv6 L3 VPN based on SRv6 works, let us briefly consider an 20 + # example where, within the same domain of tenant 100, the host hs-1 pings 21 + # the host hs-2. 22 + # 23 + # First of all, L2 reachability of the host hs-2 is taken into account by 24 + # the router rt-1 which acts as an arp proxy. 25 + # 26 + # When the host hs-1 sends an IPv6 packet destined to hs-2, the router rt-1 27 + # receives the packet on the internal veth-t100 interface, rt-1 contains the 28 + # SRv6 Encap route for encapsulating the IPv6 packet in a IPv6 plus the Segment 29 + # Routing Header (SRH) packet. This packet is sent through the (IPv6) core 30 + # network up to the router rt-2 that receives it on veth0 interface. 31 + # 32 + # The rt-2 router uses the 'localsid' routing table to process incoming 33 + # IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these 34 + # packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and 35 + # routs the packet to the specified nexthop. Afterwards, the packet is sent to 36 + # the host hs-2 through the veth-t100 interface. 37 + # 38 + # The ping response follows the same processing but this time the role of rt-1 39 + # and rt-2 are swapped. 40 + # 41 + # And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a 42 + # rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING 43 + # hooks. 44 + # 45 + # 46 + # +-------------------+ +-------------------+ 47 + # | | | | 48 + # | hs-1 netns | | hs-2 netns | 49 + # | | | | 50 + # | +-------------+ | | +-------------+ | 51 + # | | veth0 | | | | veth0 | | 52 + # | | cafe::1/64 | | | | cafe::2/64 | | 53 + # | +-------------+ | | +-------------+ | 54 + # | . | | . | 55 + # +-------------------+ +-------------------+ 56 + # . . 57 + # . . 58 + # . . 59 + # +-----------------------------------+ +-----------------------------------+ 60 + # | . | | . | 61 + # | +---------------+ | | +---------------- | 62 + # | | veth-t100 | | | | veth-t100 | | 63 + # | | cafe::11/64 | +----------+ | | +----------+ | cafe::22/64 | | 64 + # | +-------+-------+ | route | | | | route | +-------+-------- | 65 + # | | table | | | | table | | 66 + # | +----------+ | | +----------+ | 67 + # | +--------------+ | | +--------------+ | 68 + # | | veth0 | | | | veth0 | | 69 + # | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | | 70 + # | +--------------+ | | +--------------+ | 71 + # | | | | 72 + # | rt-1 netns | | rt-2 netns | 73 + # | | | | 74 + # +-----------------------------------+ +-----------------------------------+ 75 + # 76 + # ~~~~~~~~~~~~~~~~~~~~~~~~~ 77 + # | Network configuration | 78 + # ~~~~~~~~~~~~~~~~~~~~~~~~~ 79 + # 80 + # rt-1: localsid table 81 + # +----------------------------------------------------------------+ 82 + # |SID |Action | 83 + # +----------------------------------------------------------------+ 84 + # |fc00:21:100::6004|apply SRv6 End.DX6 nh6 cafe::1 dev veth-t100 | 85 + # +----------------------------------------------------------------+ 86 + # 87 + # rt-1: route table 88 + # +---------------------------------------------------+ 89 + # |host |Action | 90 + # +---------------------------------------------------+ 91 + # |cafe::2 |apply seg6 encap segs fc00:12:100::6004| 92 + # +---------------------------------------------------+ 93 + # |cafe::/64 |forward to dev veth_t100 | 94 + # +---------------------------------------------------+ 95 + # 96 + # 97 + # rt-2: localsid table 98 + # +---------------------------------------------------------------+ 99 + # |SID |Action | 100 + # +---------------------------------------------------------------+ 101 + # |fc00:12:100::6004|apply SRv6 End.DX6 nh6 cafe::2 dev veth-t100 | 102 + # +---------------------------------------------------------------+ 103 + # 104 + # rt-2: route table 105 + # +---------------------------------------------------+ 106 + # |host |Action | 107 + # +---------------------------------------------------+ 108 + # |cafe::1 |apply seg6 encap segs fc00:21:100::6004| 109 + # +---------------------------------------------------+ 110 + # |cafe::/64 |forward to dev veth_t100 | 111 + # +---------------------------------------------------+ 112 + # 113 + 114 + # Kselftest framework requirement - SKIP code is 4. 115 + ksft_skip=4 116 + 117 + readonly IPv6_RT_NETWORK=2001:11 118 + readonly IPv6_HS_NETWORK=cafe 119 + readonly SID_LOCATOR=fc00 120 + 121 + PING_TIMEOUT_SEC=4 122 + 123 + ret=0 124 + 125 + PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} 126 + 127 + log_test() 128 + { 129 + local rc=$1 130 + local expected=$2 131 + local msg="$3" 132 + 133 + if [ ${rc} -eq ${expected} ]; then 134 + nsuccess=$((nsuccess+1)) 135 + printf "\n TEST: %-60s [ OK ]\n" "${msg}" 136 + else 137 + ret=1 138 + nfail=$((nfail+1)) 139 + printf "\n TEST: %-60s [FAIL]\n" "${msg}" 140 + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then 141 + echo 142 + echo "hit enter to continue, 'q' to quit" 143 + read a 144 + [ "$a" = "q" ] && exit 1 145 + fi 146 + fi 147 + } 148 + 149 + print_log_test_results() 150 + { 151 + if [ "$TESTS" != "none" ]; then 152 + printf "\nTests passed: %3d\n" ${nsuccess} 153 + printf "Tests failed: %3d\n" ${nfail} 154 + fi 155 + } 156 + 157 + log_section() 158 + { 159 + echo 160 + echo "################################################################################" 161 + echo "TEST SECTION: $*" 162 + echo "################################################################################" 163 + } 164 + 165 + cleanup() 166 + { 167 + ip link del veth-rt-1 2>/dev/null || true 168 + ip link del veth-rt-2 2>/dev/null || true 169 + 170 + # destroy routers rt-* and hosts hs-* 171 + for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do 172 + ip netns del ${ns} || true 173 + done 174 + } 175 + 176 + # Setup the basic networking for the routers 177 + setup_rt_networking() 178 + { 179 + local rt=$1 180 + local nsname=rt-${rt} 181 + 182 + ip netns add ${nsname} 183 + 184 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0 185 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0 186 + 187 + ip link set veth-rt-${rt} netns ${nsname} 188 + ip -netns ${nsname} link set veth-rt-${rt} name veth0 189 + 190 + ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad 191 + ip -netns ${nsname} link set veth0 up 192 + ip -netns ${nsname} link set lo up 193 + 194 + ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1 195 + } 196 + 197 + setup_rt_netfilter() 198 + { 199 + local rt=$1 200 + local nsname=rt-${rt} 201 + 202 + ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1 203 + ip netns exec ${nsname} ip6tables -t raw -A PREROUTING -m rpfilter --invert -j DROP 204 + } 205 + 206 + setup_hs() 207 + { 208 + local hs=$1 209 + local rt=$2 210 + local tid=$3 211 + local hsname=hs-${hs} 212 + local rtname=rt-${rt} 213 + local rtveth=veth-t${tid} 214 + 215 + # set the networking for the host 216 + ip netns add ${hsname} 217 + 218 + ip -netns ${hsname} link add veth0 type veth peer name ${rtveth} 219 + ip -netns ${hsname} link set ${rtveth} netns ${rtname} 220 + ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad 221 + ip -netns ${hsname} link set veth0 up 222 + ip -netns ${hsname} link set lo up 223 + 224 + ip -netns ${rtname} addr add ${IPv6_HS_NETWORK}::${rt}${hs}/64 dev ${rtveth} 225 + ip -netns ${rtname} link set ${rtveth} up 226 + 227 + ip netns exec ${rtname} sysctl -wq net.ipv6.conf.all.accept_dad=0 228 + ip netns exec ${rtname} sysctl -wq net.ipv6.conf.default.accept_dad=0 229 + 230 + ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1 231 + } 232 + 233 + setup_vpn_config() 234 + { 235 + local hssrc=$1 236 + local rtsrc=$2 237 + local hsdst=$3 238 + local rtdst=$4 239 + local tid=$5 240 + 241 + local hssrc_name=hs-t${tid}-${hssrc} 242 + local hsdst_name=hs-t${tid}-${hsdst} 243 + local rtsrc_name=rt-${rtsrc} 244 + local rtdst_name=rt-${rtdst} 245 + local rtveth=veth-t${tid} 246 + local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004 247 + 248 + ip -netns ${rtsrc_name} -6 neigh add proxy ${IPv6_HS_NETWORK}::${hsdst} dev ${rtveth} 249 + 250 + # set the encap route for encapsulating packets which arrive from the 251 + # host hssrc and destined to the access router rtsrc. 252 + ip -netns ${rtsrc_name} -6 route add ${IPv6_HS_NETWORK}::${hsdst}/128 \ 253 + encap seg6 mode encap segs ${vpn_sid} dev veth0 254 + ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \ 255 + via 2001:11::${rtdst} dev veth0 256 + 257 + # set the decap route for decapsulating packets which arrive from 258 + # the rtdst router and destined to the hsdst host. 259 + ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \ 260 + encap seg6local action End.DX6 nh6 ${IPv6_HS_NETWORK}::${hsdst} dev veth-t${tid} 261 + } 262 + 263 + setup() 264 + { 265 + ip link add veth-rt-1 type veth peer name veth-rt-2 266 + # setup the networking for router rt-1 and router rt-2 267 + setup_rt_networking 1 268 + setup_rt_networking 2 269 + 270 + # setup two hosts for the tenant 100. 271 + # - host hs-1 is directly connected to the router rt-1; 272 + # - host hs-2 is directly connected to the router rt-2. 273 + setup_hs 1 1 100 274 + setup_hs 2 2 100 275 + 276 + # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2. 277 + setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant 278 + setup_vpn_config 2 2 1 1 100 279 + } 280 + 281 + check_hs_connectivity() 282 + { 283 + local hssrc=$1 284 + local hsdst=$2 285 + local tid=$3 286 + 287 + ip netns exec hs-${hssrc} ping -6 -c 1 -W ${PING_TIMEOUT_SEC} \ 288 + ${IPv6_HS_NETWORK}::${hsdst} >/dev/null 2>&1 289 + } 290 + 291 + check_and_log_hs_connectivity() 292 + { 293 + local hssrc=$1 294 + local hsdst=$2 295 + local tid=$3 296 + 297 + check_hs_connectivity ${hssrc} ${hsdst} ${tid} 298 + log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})" 299 + } 300 + 301 + host_tests() 302 + { 303 + log_section "SRv6 VPN connectivity test among hosts in the same tenant" 304 + 305 + check_and_log_hs_connectivity 1 2 100 306 + check_and_log_hs_connectivity 2 1 100 307 + } 308 + 309 + router_netfilter_tests() 310 + { 311 + log_section "SRv6 VPN connectivity test with netfilter enabled in routers" 312 + setup_rt_netfilter 1 313 + setup_rt_netfilter 2 314 + 315 + check_and_log_hs_connectivity 1 2 100 316 + check_and_log_hs_connectivity 2 1 100 317 + } 318 + 319 + if [ "$(id -u)" -ne 0 ];then 320 + echo "SKIP: Need root privileges" 321 + exit $ksft_skip 322 + fi 323 + 324 + if [ ! -x "$(command -v ip)" ]; then 325 + echo "SKIP: Could not run test without ip tool" 326 + exit $ksft_skip 327 + fi 328 + 329 + cleanup &>/dev/null 330 + 331 + setup 332 + 333 + host_tests 334 + router_netfilter_tests 335 + 336 + print_log_test_results 337 + 338 + cleanup &>/dev/null 339 + 340 + exit ${ret}
+12 -2
tools/testing/selftests/openat2/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 - CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan 3 + CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined 4 4 TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test 5 + 6 + # gcc requires -static-libasan in order to ensure that Address Sanitizer's 7 + # library is the first one loaded. However, clang already statically links the 8 + # Address Sanitizer if -fsanitize is specified. Therefore, simply omit 9 + # -static-libasan for clang builds. 10 + ifeq ($(LLVM),) 11 + CFLAGS += -static-libasan 12 + endif 13 + 14 + LOCAL_HDRS += helpers.h 5 15 6 16 include ../lib.mk 7 17 8 - $(TEST_GEN_PROGS): helpers.c helpers.h 18 + $(TEST_GEN_PROGS): helpers.c
+3 -3
tools/testing/selftests/seccomp/seccomp_benchmark.c
··· 194 194 ksft_set_plan(7); 195 195 196 196 ksft_print_msg("Running on:\n"); 197 - ksft_print_msg(""); 197 + ksft_print_msg("%s", ""); 198 198 system("uname -a"); 199 199 200 200 ksft_print_msg("Current BPF sysctl settings:\n"); 201 201 /* Avoid using "sysctl" which may not be installed. */ 202 - ksft_print_msg(""); 202 + ksft_print_msg("%s", ""); 203 203 system("grep -H . /proc/sys/net/core/bpf_jit_enable"); 204 - ksft_print_msg(""); 204 + ksft_print_msg("%s", ""); 205 205 system("grep -H . /proc/sys/net/core/bpf_jit_harden"); 206 206 207 207 affinity();
+3
virt/kvm/dirty_ring.c
··· 55 55 struct kvm_memory_slot *memslot; 56 56 int as_id, id; 57 57 58 + if (!mask) 59 + return; 60 + 58 61 as_id = slot >> 16; 59 62 id = (u16)slot; 60 63
+3 -2
virt/kvm/guest_memfd.c
··· 510 510 } 511 511 512 512 if (folio_test_hwpoison(folio)) { 513 + folio_unlock(folio); 514 + folio_put(folio); 513 515 r = -EHWPOISON; 514 - goto out_unlock; 516 + goto out_fput; 515 517 } 516 518 517 519 page = folio_file_page(folio, index); ··· 524 522 525 523 r = 0; 526 524 527 - out_unlock: 528 525 folio_unlock(folio); 529 526 out_fput: 530 527 fput(file);
+10 -9
virt/kvm/kvm_main.c
··· 651 651 range->on_lock(kvm); 652 652 653 653 if (IS_KVM_NULL_FN(range->handler)) 654 - break; 654 + goto mmu_unlock; 655 655 } 656 656 r.ret |= range->handler(kvm, &gfn_range); 657 657 } ··· 660 660 if (range->flush_on_ret && r.ret) 661 661 kvm_flush_remote_tlbs(kvm); 662 662 663 + mmu_unlock: 663 664 if (r.found_memslot) 664 665 KVM_MMU_UNLOCK(kvm); 665 666 ··· 4026 4025 { 4027 4026 struct kvm *kvm = me->kvm; 4028 4027 struct kvm_vcpu *vcpu; 4029 - int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 4028 + int last_boosted_vcpu; 4030 4029 unsigned long i; 4031 4030 int yielded = 0; 4032 4031 int try = 3; 4033 4032 int pass; 4034 4033 4034 + last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu); 4035 4035 kvm_vcpu_set_in_spin_loop(me, true); 4036 4036 /* 4037 4037 * We boost the priority of a VCPU that is runnable but not ··· 4070 4068 4071 4069 yielded = kvm_vcpu_yield_to(vcpu); 4072 4070 if (yielded > 0) { 4073 - kvm->last_boosted_vcpu = i; 4071 + WRITE_ONCE(kvm->last_boosted_vcpu, i); 4074 4072 break; 4075 4073 } else if (yielded < 0) { 4076 4074 try--; ··· 4429 4427 struct kvm_regs *kvm_regs; 4430 4428 4431 4429 r = -ENOMEM; 4432 - kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 4430 + kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 4433 4431 if (!kvm_regs) 4434 4432 goto out; 4435 4433 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); ··· 4456 4454 break; 4457 4455 } 4458 4456 case KVM_GET_SREGS: { 4459 - kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 4460 - GFP_KERNEL_ACCOUNT); 4457 + kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 4461 4458 r = -ENOMEM; 4462 4459 if (!kvm_sregs) 4463 4460 goto out; ··· 4548 4547 break; 4549 4548 } 4550 4549 case KVM_GET_FPU: { 4551 - fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4550 + fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 4552 4551 r = -ENOMEM; 4553 4552 if (!fpu) 4554 4553 goto out; ··· 6211 6210 active = kvm_active_vms; 6212 6211 mutex_unlock(&kvm_lock); 6213 6212 6214 - env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 6213 + env = kzalloc(sizeof(*env), GFP_KERNEL); 6215 6214 if (!env) 6216 6215 return; 6217 6216 ··· 6227 6226 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 6228 6227 6229 6228 if (!IS_ERR(kvm->debugfs_dentry)) { 6230 - char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 6229 + char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); 6231 6230 6232 6231 if (p) { 6233 6232 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);