Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Adjacent changes:

net/core/dev.c
9f30831390ed ("net: add rcu safety to rtnl_prop_list_size()")
723de3ebef03 ("net: free altname using an RCU callback")

net/unix/garbage.c
11498715f266 ("af_unix: Remove io_uring code for GC.")
25236c91b5ab ("af_unix: Fix task hung while purging oob_skb in GC.")

drivers/net/ethernet/renesas/ravb_main.c
ed4adc07207d ("net: ravb: Count packets instead of descriptors in GbEth RX path"
)
c2da9408579d ("ravb: Add Rx checksum offload support for GbEth")

net/mptcp/protocol.c
bdd70eb68913 ("mptcp: drop the push_pending field")
28e5c1380506 ("mptcp: annotate lockless accesses around read-mostly fields")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2943 -1872
+7 -4
.mailmap
··· 191 191 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> 192 192 Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com> 193 193 Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com> 194 - Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com> 195 - Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com> 196 - Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com> 197 - Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com> 194 + Geliang Tang <geliang@kernel.org> <geliang.tang@linux.dev> 195 + Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com> 196 + Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com> 197 + Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com> 198 + Geliang Tang <geliang@kernel.org> <geliangtang@163.com> 198 199 Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org> 199 200 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> 200 201 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> ··· 290 289 John Crispin <john@phrozen.org> <blogic@openwrt.org> 291 290 John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com> 292 291 John Keeping <john@keeping.me.uk> <john@metanate.com> 292 + John Moon <john@jmoon.dev> <quic_johmoo@quicinc.com> 293 293 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> 294 294 John Stultz <johnstul@us.ibm.com> 295 295 <jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com> ··· 346 344 Leon Romanovsky <leon@kernel.org> <leon@leon.nu> 347 345 Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com> 348 346 Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com> 347 + Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org> 349 348 Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org> 350 349 Linas Vepstas <linas@austin.ibm.com> 351 350 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
+24 -24
Documentation/ABI/testing/sysfs-class-net-statistics
··· 1 - What: /sys/class/<iface>/statistics/collisions 1 + What: /sys/class/net/<iface>/statistics/collisions 2 2 Date: April 2005 3 3 KernelVersion: 2.6.12 4 4 Contact: netdev@vger.kernel.org ··· 6 6 Indicates the number of collisions seen by this network device. 7 7 This value might not be relevant with all MAC layers. 8 8 9 - What: /sys/class/<iface>/statistics/multicast 9 + What: /sys/class/net/<iface>/statistics/multicast 10 10 Date: April 2005 11 11 KernelVersion: 2.6.12 12 12 Contact: netdev@vger.kernel.org ··· 14 14 Indicates the number of multicast packets received by this 15 15 network device. 16 16 17 - What: /sys/class/<iface>/statistics/rx_bytes 17 + What: /sys/class/net/<iface>/statistics/rx_bytes 18 18 Date: April 2005 19 19 KernelVersion: 2.6.12 20 20 Contact: netdev@vger.kernel.org ··· 23 23 See the network driver for the exact meaning of when this 24 24 value is incremented. 25 25 26 - What: /sys/class/<iface>/statistics/rx_compressed 26 + What: /sys/class/net/<iface>/statistics/rx_compressed 27 27 Date: April 2005 28 28 KernelVersion: 2.6.12 29 29 Contact: netdev@vger.kernel.org ··· 32 32 network device. This value might only be relevant for interfaces 33 33 that support packet compression (e.g: PPP). 34 34 35 - What: /sys/class/<iface>/statistics/rx_crc_errors 35 + What: /sys/class/net/<iface>/statistics/rx_crc_errors 36 36 Date: April 2005 37 37 KernelVersion: 2.6.12 38 38 Contact: netdev@vger.kernel.org ··· 41 41 by this network device. Note that the specific meaning might 42 42 depend on the MAC layer used by the interface. 43 43 44 - What: /sys/class/<iface>/statistics/rx_dropped 44 + What: /sys/class/net/<iface>/statistics/rx_dropped 45 45 Date: April 2005 46 46 KernelVersion: 2.6.12 47 47 Contact: netdev@vger.kernel.org ··· 51 51 packet processing. See the network driver for the exact 52 52 meaning of this value. 53 53 54 - What: /sys/class/<iface>/statistics/rx_errors 54 + What: /sys/class/net/<iface>/statistics/rx_errors 55 55 Date: April 2005 56 56 KernelVersion: 2.6.12 57 57 Contact: netdev@vger.kernel.org ··· 59 59 Indicates the number of receive errors on this network device. 60 60 See the network driver for the exact meaning of this value. 61 61 62 - What: /sys/class/<iface>/statistics/rx_fifo_errors 62 + What: /sys/class/net/<iface>/statistics/rx_fifo_errors 63 63 Date: April 2005 64 64 KernelVersion: 2.6.12 65 65 Contact: netdev@vger.kernel.org ··· 68 68 network device. See the network driver for the exact 69 69 meaning of this value. 70 70 71 - What: /sys/class/<iface>/statistics/rx_frame_errors 71 + What: /sys/class/net/<iface>/statistics/rx_frame_errors 72 72 Date: April 2005 73 73 KernelVersion: 2.6.12 74 74 Contact: netdev@vger.kernel.org ··· 78 78 on the MAC layer protocol used. See the network driver for 79 79 the exact meaning of this value. 80 80 81 - What: /sys/class/<iface>/statistics/rx_length_errors 81 + What: /sys/class/net/<iface>/statistics/rx_length_errors 82 82 Date: April 2005 83 83 KernelVersion: 2.6.12 84 84 Contact: netdev@vger.kernel.org ··· 87 87 error, oversized or undersized. See the network driver for the 88 88 exact meaning of this value. 89 89 90 - What: /sys/class/<iface>/statistics/rx_missed_errors 90 + What: /sys/class/net/<iface>/statistics/rx_missed_errors 91 91 Date: April 2005 92 92 KernelVersion: 2.6.12 93 93 Contact: netdev@vger.kernel.org ··· 96 96 due to lack of capacity in the receive side. See the network 97 97 driver for the exact meaning of this value. 98 98 99 - What: /sys/class/<iface>/statistics/rx_nohandler 99 + What: /sys/class/net/<iface>/statistics/rx_nohandler 100 100 Date: February 2016 101 101 KernelVersion: 4.6 102 102 Contact: netdev@vger.kernel.org ··· 104 104 Indicates the number of received packets that were dropped on 105 105 an inactive device by the network core. 106 106 107 - What: /sys/class/<iface>/statistics/rx_over_errors 107 + What: /sys/class/net/<iface>/statistics/rx_over_errors 108 108 Date: April 2005 109 109 KernelVersion: 2.6.12 110 110 Contact: netdev@vger.kernel.org ··· 114 114 (e.g: larger than MTU). See the network driver for the exact 115 115 meaning of this value. 116 116 117 - What: /sys/class/<iface>/statistics/rx_packets 117 + What: /sys/class/net/<iface>/statistics/rx_packets 118 118 Date: April 2005 119 119 KernelVersion: 2.6.12 120 120 Contact: netdev@vger.kernel.org ··· 122 122 Indicates the total number of good packets received by this 123 123 network device. 124 124 125 - What: /sys/class/<iface>/statistics/tx_aborted_errors 125 + What: /sys/class/net/<iface>/statistics/tx_aborted_errors 126 126 Date: April 2005 127 127 KernelVersion: 2.6.12 128 128 Contact: netdev@vger.kernel.org ··· 132 132 a medium collision). See the network driver for the exact 133 133 meaning of this value. 134 134 135 - What: /sys/class/<iface>/statistics/tx_bytes 135 + What: /sys/class/net/<iface>/statistics/tx_bytes 136 136 Date: April 2005 137 137 KernelVersion: 2.6.12 138 138 Contact: netdev@vger.kernel.org ··· 143 143 transmitted packets or all packets that have been queued for 144 144 transmission. 145 145 146 - What: /sys/class/<iface>/statistics/tx_carrier_errors 146 + What: /sys/class/net/<iface>/statistics/tx_carrier_errors 147 147 Date: April 2005 148 148 KernelVersion: 2.6.12 149 149 Contact: netdev@vger.kernel.org ··· 152 152 because of carrier errors (e.g: physical link down). See the 153 153 network driver for the exact meaning of this value. 154 154 155 - What: /sys/class/<iface>/statistics/tx_compressed 155 + What: /sys/class/net/<iface>/statistics/tx_compressed 156 156 Date: April 2005 157 157 KernelVersion: 2.6.12 158 158 Contact: netdev@vger.kernel.org ··· 161 161 this might only be relevant for devices that support 162 162 compression (e.g: PPP). 163 163 164 - What: /sys/class/<iface>/statistics/tx_dropped 164 + What: /sys/class/net/<iface>/statistics/tx_dropped 165 165 Date: April 2005 166 166 KernelVersion: 2.6.12 167 167 Contact: netdev@vger.kernel.org ··· 170 170 See the driver for the exact reasons as to why the packets were 171 171 dropped. 172 172 173 - What: /sys/class/<iface>/statistics/tx_errors 173 + What: /sys/class/net/<iface>/statistics/tx_errors 174 174 Date: April 2005 175 175 KernelVersion: 2.6.12 176 176 Contact: netdev@vger.kernel.org ··· 179 179 a network device. See the driver for the exact reasons as to 180 180 why the packets were dropped. 181 181 182 - What: /sys/class/<iface>/statistics/tx_fifo_errors 182 + What: /sys/class/net/<iface>/statistics/tx_fifo_errors 183 183 Date: April 2005 184 184 KernelVersion: 2.6.12 185 185 Contact: netdev@vger.kernel.org ··· 188 188 FIFO error. See the driver for the exact reasons as to why the 189 189 packets were dropped. 190 190 191 - What: /sys/class/<iface>/statistics/tx_heartbeat_errors 191 + What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors 192 192 Date: April 2005 193 193 KernelVersion: 2.6.12 194 194 Contact: netdev@vger.kernel.org ··· 197 197 reported as heartbeat errors. See the driver for the exact 198 198 reasons as to why the packets were dropped. 199 199 200 - What: /sys/class/<iface>/statistics/tx_packets 200 + What: /sys/class/net/<iface>/statistics/tx_packets 201 201 Date: April 2005 202 202 KernelVersion: 2.6.12 203 203 Contact: netdev@vger.kernel.org ··· 206 206 device. See the driver for whether this reports the number of all 207 207 attempted or successful transmissions. 208 208 209 - What: /sys/class/<iface>/statistics/tx_window_errors 209 + What: /sys/class/net/<iface>/statistics/tx_window_errors 210 210 Date: April 2005 211 211 KernelVersion: 2.6.12 212 212 Contact: netdev@vger.kernel.org
+4 -1
Documentation/devicetree/bindings/Makefile
··· 28 28 find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \ 29 29 -name 'processed-schema*' \) 30 30 31 - find_cmd = $(find_all_cmd) | sed 's|^$(srctree)/$(src)/||' | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | sed 's|^|$(srctree)/$(src)/|' 31 + find_cmd = $(find_all_cmd) | \ 32 + sed 's|^$(srctree)/||' | \ 33 + grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | \ 34 + sed 's|^|$(srctree)/|' 32 35 CHK_DT_DOCS := $(shell $(find_cmd)) 33 36 34 37 quiet_cmd_yamllint = LINT $(src)
+2 -1
Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
··· 7 7 title: Ceva AHCI SATA Controller 8 8 9 9 maintainers: 10 - - Piyush Mehta <piyush.mehta@amd.com> 10 + - Mubin Sayyed <mubin.sayyed@amd.com> 11 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 11 12 12 13 description: | 13 14 The Ceva SATA controller mostly conforms to the AHCI interface with some
+5 -2
Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml
··· 29 29 30 30 audio-ports: 31 31 description: 32 - Array of 8-bit values, 2 values per DAI (Documentation/sound/soc/dai.rst). 32 + Array of 2 values per DAI (Documentation/sound/soc/dai.rst). 33 33 The implementation allows one or two DAIs. 34 34 If two DAIs are defined, they must be of different type. 35 35 $ref: /schemas/types.yaml#/definitions/uint32-matrix 36 + minItems: 1 37 + maxItems: 2 36 38 items: 37 - minItems: 1 38 39 items: 39 40 - description: | 40 41 The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S 41 42 (see include/dt-bindings/display/tda998x.h). 43 + enum: [ 1, 2 ] 42 44 - description: 43 45 The second value defines the tda998x AP_ENA reg content when the 44 46 DAI in question is used. 47 + maximum: 0xff 45 48 46 49 '#sound-dai-cells': 47 50 enum: [ 0, 1 ]
+2 -1
Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
··· 12 12 PS_MODE). Every pin can be configured as input/output. 13 13 14 14 maintainers: 15 - - Piyush Mehta <piyush.mehta@amd.com> 15 + - Mubin Sayyed <mubin.sayyed@amd.com> 16 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 16 17 17 18 properties: 18 19 compatible:
+2 -2
Documentation/devicetree/bindings/net/marvell,prestera.yaml
··· 78 78 pcie@0 { 79 79 #address-cells = <3>; 80 80 #size-cells = <2>; 81 - ranges = <0x0 0x0 0x0 0x0 0x0 0x0>; 82 - reg = <0x0 0x0 0x0 0x0 0x0 0x0>; 81 + ranges = <0x02000000 0x0 0x100000 0x10000000 0x0 0x0>; 82 + reg = <0x0 0x1000>; 83 83 device_type = "pci"; 84 84 85 85 switch@0,0 {
+2 -1
Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
··· 7 7 title: Zynq UltraScale+ MPSoC and Versal reset 8 8 9 9 maintainers: 10 - - Piyush Mehta <piyush.mehta@amd.com> 10 + - Mubin Sayyed <mubin.sayyed@amd.com> 11 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 11 12 12 13 description: | 13 14 The Zynq UltraScale+ MPSoC and Versal has several different resets.
+1 -1
Documentation/devicetree/bindings/tpm/tpm-common.yaml
··· 42 42 43 43 resets: 44 44 description: Reset controller to reset the TPM 45 - $ref: /schemas/types.yaml#/definitions/phandle 45 + maxItems: 1 46 46 47 47 reset-gpios: 48 48 description: Output GPIO pin to reset the TPM
+6 -3
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
··· 55 55 56 56 samsung,sysreg: 57 57 $ref: /schemas/types.yaml#/definitions/phandle-array 58 - description: Should be phandle/offset pair. The phandle to the syscon node 59 - which indicates the FSYSx sysreg interface and the offset of 60 - the control register for UFS io coherency setting. 58 + items: 59 + - items: 60 + - description: phandle to FSYSx sysreg node 61 + - description: offset of the control register for UFS io coherency setting 62 + description: 63 + Phandle and offset to the FSYSx sysreg for UFS io coherency setting. 61 64 62 65 dma-coherent: true 63 66
+2 -1
Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
··· 7 7 title: Xilinx SuperSpeed DWC3 USB SoC controller 8 8 9 9 maintainers: 10 - - Piyush Mehta <piyush.mehta@amd.com> 10 + - Mubin Sayyed <mubin.sayyed@amd.com> 11 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 11 12 12 13 properties: 13 14 compatible:
+2 -1
Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
··· 16 16 USB 2.0 traffic. 17 17 18 18 maintainers: 19 - - Piyush Mehta <piyush.mehta@amd.com> 20 19 - Michal Simek <michal.simek@amd.com> 20 + - Mubin Sayyed <mubin.sayyed@amd.com> 21 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 21 22 22 23 properties: 23 24 compatible:
+2 -1
Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
··· 7 7 title: Xilinx udc controller 8 8 9 9 maintainers: 10 - - Piyush Mehta <piyush.mehta@amd.com> 10 + - Mubin Sayyed <mubin.sayyed@amd.com> 11 + - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> 11 12 12 13 properties: 13 14 compatible:
-4
Documentation/netlink/specs/dpll.yaml
··· 423 423 - type 424 424 425 425 dump: 426 - pre: dpll-lock-dumpit 427 - post: dpll-unlock-dumpit 428 426 reply: *dev-attrs 429 427 430 428 - ··· 510 512 - fractional-frequency-offset 511 513 512 514 dump: 513 - pre: dpll-lock-dumpit 514 - post: dpll-unlock-dumpit 515 515 request: 516 516 attributes: 517 517 - id
+2 -2
Documentation/networking/net_cachelines/net_device.rst
··· 136 136 possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish 137 137 void* ml_priv 138 138 enum_netdev_ml_priv_type ml_priv_type 139 - struct_pcpu_lstats__percpu* lstats 140 - struct_pcpu_sw_netstats__percpu* tstats 139 + struct_pcpu_lstats__percpu* lstats read_mostly dev_lstats_add() 140 + struct_pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add() 141 141 struct_pcpu_dstats__percpu* dstats 142 142 struct_garp_port* garp_port 143 143 struct_mrp_port* mrp_port
+2 -2
Documentation/networking/net_cachelines/tcp_sock.rst
··· 38 38 u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx) 39 39 u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window 40 40 u32 rcv_ssthresh read_mostly - __tcp_select_window 41 - u82 scaling_ratio 41 + u8 scaling_ratio read_mostly read_mostly tcp_win_from_space 42 42 struct tcp_rack 43 43 u16 advmss - read_mostly tcp_rcv_space_adjust 44 44 u8 compressed_ack 45 45 u8:2 dup_ack_counter 46 46 u8:1 tlp_retrans 47 - u8:1 tcp_usec_ts 47 + u8:1 tcp_usec_ts read_mostly read_mostly 48 48 u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) 49 49 u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) 50 50 u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
+1 -1
Documentation/sphinx/kernel_feat.py
··· 109 109 else: 110 110 out_lines += line + "\n" 111 111 112 - nodeList = self.nestedParse(out_lines, fname) 112 + nodeList = self.nestedParse(out_lines, self.arguments[0]) 113 113 return nodeList 114 114 115 115 def nestedParse(self, lines, fname):
+4 -4
MAINTAINERS
··· 10811 10811 10812 10812 INTEL GVT-g DRIVERS (Intel GPU Virtualization) 10813 10813 M: Zhenyu Wang <zhenyuw@linux.intel.com> 10814 - M: Zhi Wang <zhi.a.wang@intel.com> 10814 + M: Zhi Wang <zhi.wang.linux@gmail.com> 10815 10815 L: intel-gvt-dev@lists.freedesktop.org 10816 10816 L: intel-gfx@lists.freedesktop.org 10817 10817 S: Supported 10818 - W: https://01.org/igvt-g 10818 + W: https://github.com/intel/gvt-linux/wiki 10819 10819 T: git https://github.com/intel/gvt-linux.git 10820 10820 F: drivers/gpu/drm/i915/gvt/ 10821 10821 ··· 15344 15344 NETWORKING [MPTCP] 15345 15345 M: Matthieu Baerts <matttbe@kernel.org> 15346 15346 M: Mat Martineau <martineau@kernel.org> 15347 - R: Geliang Tang <geliang.tang@linux.dev> 15347 + R: Geliang Tang <geliang@kernel.org> 15348 15348 L: netdev@vger.kernel.org 15349 15349 L: mptcp@lists.linux.dev 15350 15350 S: Maintained ··· 17202 17202 R: Will Deacon <will@kernel.org> 17203 17203 R: James Clark <james.clark@arm.com> 17204 17204 R: Mike Leach <mike.leach@linaro.org> 17205 - R: Leo Yan <leo.yan@linaro.org> 17205 + R: Leo Yan <leo.yan@linux.dev> 17206 17206 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 17207 17207 S: Supported 17208 17208 F: tools/build/feature/test-libopencsd.c
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 8 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+2 -2
arch/arc/include/asm/jump_label.h
··· 31 31 static __always_inline bool arch_static_branch(struct static_key *key, 32 32 bool branch) 33 33 { 34 - asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 34 + asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 35 35 "1: \n" 36 36 "nop \n" 37 37 ".pushsection __jump_table, \"aw\" \n" ··· 47 47 static __always_inline bool arch_static_branch_jump(struct static_key *key, 48 48 bool branch) 49 49 { 50 - asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 50 + asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 51 51 "1: \n" 52 52 "b %l[l_yes] \n" 53 53 ".pushsection __jump_table, \"aw\" \n"
+2 -2
arch/arm/include/asm/jump_label.h
··· 11 11 12 12 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 13 13 { 14 - asm_volatile_goto("1:\n\t" 14 + asm goto("1:\n\t" 15 15 WASM(nop) "\n\t" 16 16 ".pushsection __jump_table, \"aw\"\n\t" 17 17 ".word 1b, %l[l_yes], %c0\n\t" ··· 25 25 26 26 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 27 27 { 28 - asm_volatile_goto("1:\n\t" 28 + asm goto("1:\n\t" 29 29 WASM(b) " %l[l_yes]\n\t" 30 30 ".pushsection __jump_table, \"aw\"\n\t" 31 31 ".word 1b, %l[l_yes], %c0\n\t"
+2
arch/arm/mm/fault.c
··· 298 298 goto done; 299 299 } 300 300 count_vm_vma_lock_event(VMA_LOCK_RETRY); 301 + if (fault & VM_FAULT_MAJOR) 302 + flags |= FAULT_FLAG_TRIED; 301 303 302 304 /* Quick path to respond to signals */ 303 305 if (fault_signal_pending(fault, regs)) {
+2 -2
arch/arm64/include/asm/alternative-macros.h
··· 229 229 if (!cpucap_is_possible(cpucap)) 230 230 return false; 231 231 232 - asm_volatile_goto( 232 + asm goto( 233 233 ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops) 234 234 : 235 235 : [cpucap] "i" (cpucap) ··· 247 247 if (!cpucap_is_possible(cpucap)) 248 248 return false; 249 249 250 - asm_volatile_goto( 250 + asm goto( 251 251 ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap]) 252 252 : 253 253 : [cpucap] "i" (cpucap)
+2 -2
arch/arm64/include/asm/jump_label.h
··· 18 18 static __always_inline bool arch_static_branch(struct static_key * const key, 19 19 const bool branch) 20 20 { 21 - asm_volatile_goto( 21 + asm goto( 22 22 "1: nop \n\t" 23 23 " .pushsection __jump_table, \"aw\" \n\t" 24 24 " .align 3 \n\t" ··· 35 35 static __always_inline bool arch_static_branch_jump(struct static_key * const key, 36 36 const bool branch) 37 37 { 38 - asm_volatile_goto( 38 + asm goto( 39 39 "1: b %l[l_yes] \n\t" 40 40 " .pushsection __jump_table, \"aw\" \n\t" 41 41 " .align 3 \n\t"
+2 -2
arch/csky/include/asm/jump_label.h
··· 12 12 static __always_inline bool arch_static_branch(struct static_key *key, 13 13 bool branch) 14 14 { 15 - asm_volatile_goto( 15 + asm goto( 16 16 "1: nop32 \n" 17 17 " .pushsection __jump_table, \"aw\" \n" 18 18 " .align 2 \n" ··· 29 29 static __always_inline bool arch_static_branch_jump(struct static_key *key, 30 30 bool branch) 31 31 { 32 - asm_volatile_goto( 32 + asm goto( 33 33 "1: bsr32 %l[label] \n" 34 34 " .pushsection __jump_table, \"aw\" \n" 35 35 " .align 2 \n"
+2 -2
arch/loongarch/include/asm/jump_label.h
··· 22 22 23 23 static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) 24 24 { 25 - asm_volatile_goto( 25 + asm goto( 26 26 "1: nop \n\t" 27 27 JUMP_TABLE_ENTRY 28 28 : : "i"(&((char *)key)[branch]) : : l_yes); ··· 35 35 36 36 static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) 37 37 { 38 - asm_volatile_goto( 38 + asm goto( 39 39 "1: b %l[l_yes] \n\t" 40 40 JUMP_TABLE_ENTRY 41 41 : : "i"(&((char *)key)[branch]) : : l_yes);
+2 -1
arch/mips/include/asm/checksum.h
··· 241 241 " .set pop" 242 242 : "=&r" (sum), "=&r" (tmp) 243 243 : "r" (saddr), "r" (daddr), 244 - "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)); 244 + "0" (htonl(len)), "r" (htonl(proto)), "r" (sum) 245 + : "memory"); 245 246 246 247 return csum_fold(sum); 247 248 }
+2 -2
arch/mips/include/asm/jump_label.h
··· 39 39 40 40 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 41 41 { 42 - asm_volatile_goto("1:\t" B_INSN " 2f\n\t" 42 + asm goto("1:\t" B_INSN " 2f\n\t" 43 43 "2:\t.insn\n\t" 44 44 ".pushsection __jump_table, \"aw\"\n\t" 45 45 WORD_INSN " 1b, %l[l_yes], %0\n\t" ··· 53 53 54 54 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 55 55 { 56 - asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t" 56 + asm goto("1:\t" J_INSN " %l[l_yes]\n\t" 57 57 ".pushsection __jump_table, \"aw\"\n\t" 58 58 WORD_INSN " 1b, %l[l_yes], %0\n\t" 59 59 ".popsection\n\t"
+3
arch/mips/include/asm/ptrace.h
··· 60 60 unsigned long val) 61 61 { 62 62 regs->cp0_epc = val; 63 + regs->cp0_cause &= ~CAUSEF_BD; 63 64 } 64 65 65 66 /* Query offset/name of register from its name/offset */ ··· 155 154 } 156 155 157 156 #define instruction_pointer(regs) ((regs)->cp0_epc) 157 + extern unsigned long exception_ip(struct pt_regs *regs); 158 + #define exception_ip(regs) exception_ip(regs) 158 159 #define profile_pc(regs) instruction_pointer(regs) 159 160 160 161 extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
+7
arch/mips/kernel/ptrace.c
··· 31 31 #include <linux/seccomp.h> 32 32 #include <linux/ftrace.h> 33 33 34 + #include <asm/branch.h> 34 35 #include <asm/byteorder.h> 35 36 #include <asm/cpu.h> 36 37 #include <asm/cpu-info.h> ··· 48 47 49 48 #define CREATE_TRACE_POINTS 50 49 #include <trace/events/syscalls.h> 50 + 51 + unsigned long exception_ip(struct pt_regs *regs) 52 + { 53 + return exception_epc(regs); 54 + } 55 + EXPORT_SYMBOL(exception_ip); 51 56 52 57 /* 53 58 * Called by kernel/ptrace.c when detaching..
+2 -2
arch/parisc/include/asm/jump_label.h
··· 12 12 13 13 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 14 14 { 15 - asm_volatile_goto("1:\n\t" 15 + asm goto("1:\n\t" 16 16 "nop\n\t" 17 17 ".pushsection __jump_table, \"aw\"\n\t" 18 18 ".align %1\n\t" ··· 29 29 30 30 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 31 31 { 32 - asm_volatile_goto("1:\n\t" 32 + asm goto("1:\n\t" 33 33 "b,n %l[l_yes]\n\t" 34 34 ".pushsection __jump_table, \"aw\"\n\t" 35 35 ".align %1\n\t"
+2 -2
arch/powerpc/include/asm/jump_label.h
··· 17 17 18 18 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 19 19 { 20 - asm_volatile_goto("1:\n\t" 20 + asm goto("1:\n\t" 21 21 "nop # arch_static_branch\n\t" 22 22 ".pushsection __jump_table, \"aw\"\n\t" 23 23 ".long 1b - ., %l[l_yes] - .\n\t" ··· 32 32 33 33 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 34 34 { 35 - asm_volatile_goto("1:\n\t" 35 + asm goto("1:\n\t" 36 36 "b %l[l_yes] # arch_static_branch_jump\n\t" 37 37 ".pushsection __jump_table, \"aw\"\n\t" 38 38 ".long 1b - ., %l[l_yes] - .\n\t"
+6 -6
arch/powerpc/include/asm/uaccess.h
··· 74 74 /* -mprefixed can generate offsets beyond range, fall back hack */ 75 75 #ifdef CONFIG_PPC_KERNEL_PREFIXED 76 76 #define __put_user_asm_goto(x, addr, label, op) \ 77 - asm_volatile_goto( \ 77 + asm goto( \ 78 78 "1: " op " %0,0(%1) # put_user\n" \ 79 79 EX_TABLE(1b, %l2) \ 80 80 : \ ··· 83 83 : label) 84 84 #else 85 85 #define __put_user_asm_goto(x, addr, label, op) \ 86 - asm_volatile_goto( \ 86 + asm goto( \ 87 87 "1: " op "%U1%X1 %0,%1 # put_user\n" \ 88 88 EX_TABLE(1b, %l2) \ 89 89 : \ ··· 97 97 __put_user_asm_goto(x, ptr, label, "std") 98 98 #else /* __powerpc64__ */ 99 99 #define __put_user_asm2_goto(x, addr, label) \ 100 - asm_volatile_goto( \ 100 + asm goto( \ 101 101 "1: stw%X1 %0, %1\n" \ 102 102 "2: stw%X1 %L0, %L1\n" \ 103 103 EX_TABLE(1b, %l2) \ ··· 146 146 /* -mprefixed can generate offsets beyond range, fall back hack */ 147 147 #ifdef CONFIG_PPC_KERNEL_PREFIXED 148 148 #define __get_user_asm_goto(x, addr, label, op) \ 149 - asm_volatile_goto( \ 149 + asm_goto_output( \ 150 150 "1: "op" %0,0(%1) # get_user\n" \ 151 151 EX_TABLE(1b, %l2) \ 152 152 : "=r" (x) \ ··· 155 155 : label) 156 156 #else 157 157 #define __get_user_asm_goto(x, addr, label, op) \ 158 - asm_volatile_goto( \ 158 + asm_goto_output( \ 159 159 "1: "op"%U1%X1 %0, %1 # get_user\n" \ 160 160 EX_TABLE(1b, %l2) \ 161 161 : "=r" (x) \ ··· 169 169 __get_user_asm_goto(x, addr, label, "ld") 170 170 #else /* __powerpc64__ */ 171 171 #define __get_user_asm2_goto(x, addr, label) \ 172 - asm_volatile_goto( \ 172 + asm_goto_output( \ 173 173 "1: lwz%X1 %0, %1\n" \ 174 174 "2: lwz%X1 %L0, %L1\n" \ 175 175 EX_TABLE(1b, %l2) \
+1 -1
arch/powerpc/kernel/irq_64.c
··· 230 230 * This allows interrupts to be unmasked without hard disabling, and 231 231 * also without new hard interrupts coming in ahead of pending ones. 232 232 */ 233 - asm_volatile_goto( 233 + asm goto( 234 234 "1: \n" 235 235 " lbz 9,%0(13) \n" 236 236 " cmpwi 9,0 \n"
+2 -2
arch/riscv/include/asm/arch_hweight.h
··· 20 20 static __always_inline unsigned int __arch_hweight32(unsigned int w) 21 21 { 22 22 #ifdef CONFIG_RISCV_ISA_ZBB 23 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 23 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 24 24 RISCV_ISA_EXT_ZBB, 1) 25 25 : : : : legacy); 26 26 ··· 51 51 static __always_inline unsigned long __arch_hweight64(__u64 w) 52 52 { 53 53 # ifdef CONFIG_RISCV_ISA_ZBB 54 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 54 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 55 55 RISCV_ISA_EXT_ZBB, 1) 56 56 : : : : legacy); 57 57
+4 -4
arch/riscv/include/asm/bitops.h
··· 39 39 { 40 40 int num; 41 41 42 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 42 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 43 43 RISCV_ISA_EXT_ZBB, 1) 44 44 : : : : legacy); 45 45 ··· 95 95 { 96 96 int num; 97 97 98 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 98 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 99 99 RISCV_ISA_EXT_ZBB, 1) 100 100 : : : : legacy); 101 101 ··· 154 154 if (!x) 155 155 return 0; 156 156 157 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 157 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 158 158 RISCV_ISA_EXT_ZBB, 1) 159 159 : : : : legacy); 160 160 ··· 209 209 if (!x) 210 210 return 0; 211 211 212 - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 212 + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, 213 213 RISCV_ISA_EXT_ZBB, 1) 214 214 : : : : legacy); 215 215
+1 -1
arch/riscv/include/asm/checksum.h
··· 53 53 IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { 54 54 unsigned long fold_temp; 55 55 56 - asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 56 + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 57 57 RISCV_ISA_EXT_ZBB, 1) 58 58 : 59 59 :
+2 -2
arch/riscv/include/asm/cpufeature.h
··· 80 80 "ext must be < RISCV_ISA_EXT_MAX"); 81 81 82 82 if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { 83 - asm_volatile_goto( 83 + asm goto( 84 84 ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) 85 85 : 86 86 : [ext] "i" (ext) ··· 103 103 "ext must be < RISCV_ISA_EXT_MAX"); 104 104 105 105 if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { 106 - asm_volatile_goto( 106 + asm goto( 107 107 ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) 108 108 : 109 109 : [ext] "i" (ext)
+3
arch/riscv/include/asm/hugetlb.h
··· 11 11 } 12 12 #define arch_clear_hugepage_flags arch_clear_hugepage_flags 13 13 14 + bool arch_hugetlb_migration_supported(struct hstate *h); 15 + #define arch_hugetlb_migration_supported arch_hugetlb_migration_supported 16 + 14 17 #ifdef CONFIG_RISCV_ISA_SVNAPOT 15 18 #define __HAVE_ARCH_HUGE_PTE_CLEAR 16 19 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+2 -2
arch/riscv/include/asm/jump_label.h
··· 17 17 static __always_inline bool arch_static_branch(struct static_key * const key, 18 18 const bool branch) 19 19 { 20 - asm_volatile_goto( 20 + asm goto( 21 21 " .align 2 \n\t" 22 22 " .option push \n\t" 23 23 " .option norelax \n\t" ··· 39 39 static __always_inline bool arch_static_branch_jump(struct static_key * const key, 40 40 const bool branch) 41 41 { 42 - asm_volatile_goto( 42 + asm goto( 43 43 " .align 2 \n\t" 44 44 " .option push \n\t" 45 45 " .option norelax \n\t"
+5
arch/riscv/include/asm/stacktrace.h
··· 21 21 return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); 22 22 } 23 23 24 + 25 + #ifdef CONFIG_VMAP_STACK 26 + DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); 27 + #endif /* CONFIG_VMAP_STACK */ 28 + 24 29 #endif /* _ASM_RISCV_STACKTRACE_H */
+1 -1
arch/riscv/include/asm/tlb.h
··· 16 16 static inline void tlb_flush(struct mmu_gather *tlb) 17 17 { 18 18 #ifdef CONFIG_MMU 19 - if (tlb->fullmm || tlb->need_flush_all) 19 + if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables) 20 20 flush_tlb_mm(tlb->mm); 21 21 else 22 22 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+1
arch/riscv/include/asm/tlbflush.h
··· 75 75 76 76 #define flush_tlb_mm(mm) flush_tlb_all() 77 77 #define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() 78 + #define local_flush_tlb_kernel_range(start, end) flush_tlb_all() 78 79 #endif /* !CONFIG_SMP || !CONFIG_MMU */ 79 80 80 81 #endif /* _ASM_RISCV_TLBFLUSH_H */
+5 -5
arch/riscv/lib/csum.c
··· 53 53 * support, so nop when Zbb is available and jump when Zbb is 54 54 * not available. 55 55 */ 56 - asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 56 + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 57 57 RISCV_ISA_EXT_ZBB, 1) 58 58 : 59 59 : ··· 170 170 * support, so nop when Zbb is available and jump when Zbb is 171 171 * not available. 172 172 */ 173 - asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 173 + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 174 174 RISCV_ISA_EXT_ZBB, 1) 175 175 : 176 176 : ··· 178 178 : no_zbb); 179 179 180 180 #ifdef CONFIG_32BIT 181 - asm_volatile_goto(".option push \n\ 181 + asm_goto_output(".option push \n\ 182 182 .option arch,+zbb \n\ 183 183 rori %[fold_temp], %[csum], 16 \n\ 184 184 andi %[offset], %[offset], 1 \n\ ··· 193 193 194 194 return (unsigned short)csum; 195 195 #else /* !CONFIG_32BIT */ 196 - asm_volatile_goto(".option push \n\ 196 + asm_goto_output(".option push \n\ 197 197 .option arch,+zbb \n\ 198 198 rori %[fold_temp], %[csum], 32 \n\ 199 199 add %[csum], %[fold_temp], %[csum] \n\ ··· 257 257 * support, so nop when Zbb is available and jump when Zbb is 258 258 * not available. 259 259 */ 260 - asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 260 + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, 261 261 RISCV_ISA_EXT_ZBB, 1) 262 262 : 263 263 :
+73 -5
arch/riscv/mm/hugetlbpage.c
··· 125 125 return pte; 126 126 } 127 127 128 + unsigned long hugetlb_mask_last_page(struct hstate *h) 129 + { 130 + unsigned long hp_size = huge_page_size(h); 131 + 132 + switch (hp_size) { 133 + #ifndef __PAGETABLE_PMD_FOLDED 134 + case PUD_SIZE: 135 + return P4D_SIZE - PUD_SIZE; 136 + #endif 137 + case PMD_SIZE: 138 + return PUD_SIZE - PMD_SIZE; 139 + case napot_cont_size(NAPOT_CONT64KB_ORDER): 140 + return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER); 141 + default: 142 + break; 143 + } 144 + 145 + return 0UL; 146 + } 147 + 128 148 static pte_t get_clear_contig(struct mm_struct *mm, 129 149 unsigned long addr, 130 150 pte_t *ptep, ··· 197 177 return entry; 198 178 } 199 179 180 + static void clear_flush(struct mm_struct *mm, 181 + unsigned long addr, 182 + pte_t *ptep, 183 + unsigned long pgsize, 184 + unsigned long ncontig) 185 + { 186 + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); 187 + unsigned long i, saddr = addr; 188 + 189 + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 190 + ptep_get_and_clear(mm, addr, ptep); 191 + 192 + flush_tlb_range(&vma, saddr, addr); 193 + } 194 + 195 + /* 196 + * When dealing with NAPOT mappings, the privileged specification indicates that 197 + * "if an update needs to be made, the OS generally should first mark all of the 198 + * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions 199 + * within the range, [...] then update the PTE(s), as described in Section 200 + * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by 201 + * arm64. 202 + */ 200 203 void set_huge_pte_at(struct mm_struct *mm, 201 204 unsigned long addr, 202 205 pte_t *ptep, 203 206 pte_t pte, 204 207 unsigned long sz) 205 208 { 206 - unsigned long hugepage_shift; 209 + unsigned long hugepage_shift, pgsize; 207 210 int i, pte_num; 208 211 209 212 if (sz >= PGDIR_SIZE) ··· 241 198 hugepage_shift = PAGE_SHIFT; 242 199 243 200 pte_num = sz >> hugepage_shift; 244 - for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift)) 201 + pgsize = 1 << hugepage_shift; 202 + 203 + if (!pte_present(pte)) { 204 + for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) 205 + set_ptes(mm, addr, ptep, pte, 1); 206 + return; 207 + } 208 + 209 + if (!pte_napot(pte)) { 210 + set_ptes(mm, addr, ptep, pte, 1); 211 + return; 212 + } 213 + 214 + clear_flush(mm, addr, ptep, pgsize, pte_num); 215 + 216 + for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) 245 217 set_pte_at(mm, addr, ptep, pte); 246 218 } 247 219 ··· 364 306 pte_clear(mm, addr, ptep); 365 307 } 366 308 367 - static __init bool is_napot_size(unsigned long size) 309 + static bool is_napot_size(unsigned long size) 368 310 { 369 311 unsigned long order; 370 312 ··· 392 334 393 335 #else 394 336 395 - static __init bool is_napot_size(unsigned long size) 337 + static bool is_napot_size(unsigned long size) 396 338 { 397 339 return false; 398 340 } ··· 409 351 return pmd_leaf(pmd); 410 352 } 411 353 412 - bool __init arch_hugetlb_valid_size(unsigned long size) 354 + static bool __hugetlb_valid_size(unsigned long size) 413 355 { 414 356 if (size == HPAGE_SIZE) 415 357 return true; ··· 419 361 return true; 420 362 else 421 363 return false; 364 + } 365 + 366 + bool __init arch_hugetlb_valid_size(unsigned long size) 367 + { 368 + return __hugetlb_valid_size(size); 369 + } 370 + 371 + bool arch_hugetlb_migration_supported(struct hstate *h) 372 + { 373 + return __hugetlb_valid_size(huge_page_size(h)); 422 374 } 423 375 424 376 #ifdef CONFIG_CONTIG_ALLOC
+4
arch/riscv/mm/init.c
··· 1385 1385 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 1386 1386 arch_numa_init(); 1387 1387 sparse_init(); 1388 + #ifdef CONFIG_SPARSEMEM_VMEMMAP 1389 + /* The entire VMEMMAP region has been populated. Flush TLB for this region */ 1390 + local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END); 1391 + #endif 1388 1392 zone_sizes_init(); 1389 1393 arch_reserve_crashkernel(); 1390 1394 memblock_dump_all();
+2
arch/riscv/mm/tlbflush.c
··· 66 66 local_flush_tlb_range_threshold_asid(start, size, stride, asid); 67 67 } 68 68 69 + /* Flush a range of kernel pages without broadcasting */ 69 70 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 70 71 { 71 72 local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID); ··· 234 233 { 235 234 __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, 236 235 FLUSH_TLB_MAX_SIZE, PAGE_SIZE); 236 + cpumask_clear(&batch->cpumask); 237 237 }
+2 -2
arch/s390/include/asm/jump_label.h
··· 25 25 */ 26 26 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 27 27 { 28 - asm_volatile_goto("0: brcl 0,%l[label]\n" 28 + asm goto("0: brcl 0,%l[label]\n" 29 29 ".pushsection __jump_table,\"aw\"\n" 30 30 ".balign 8\n" 31 31 ".long 0b-.,%l[label]-.\n" ··· 39 39 40 40 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 41 41 { 42 - asm_volatile_goto("0: brcl 15,%l[label]\n" 42 + asm goto("0: brcl 15,%l[label]\n" 43 43 ".pushsection __jump_table,\"aw\"\n" 44 44 ".balign 8\n" 45 45 ".long 0b-.,%l[label]-.\n"
+2 -2
arch/sparc/include/asm/jump_label.h
··· 10 10 11 11 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 12 12 { 13 - asm_volatile_goto("1:\n\t" 13 + asm goto("1:\n\t" 14 14 "nop\n\t" 15 15 "nop\n\t" 16 16 ".pushsection __jump_table, \"aw\"\n\t" ··· 26 26 27 27 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 28 28 { 29 - asm_volatile_goto("1:\n\t" 29 + asm goto("1:\n\t" 30 30 "b %l[l_yes]\n\t" 31 31 "nop\n\t" 32 32 ".pushsection __jump_table, \"aw\"\n\t"
+1 -1
arch/um/include/asm/cpufeature.h
··· 75 75 */ 76 76 static __always_inline bool _static_cpu_has(u16 bit) 77 77 { 78 - asm_volatile_goto("1: jmp 6f\n" 78 + asm goto("1: jmp 6f\n" 79 79 "2:\n" 80 80 ".skip -(((5f-4f) - (2b-1b)) > 0) * " 81 81 "((5f-4f) - (2b-1b)),0x90\n"
+1 -1
arch/x86/Kconfig.cpu
··· 379 379 config X86_MINIMUM_CPU_FAMILY 380 380 int 381 381 default "64" if X86_64 382 - default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) 382 + default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8) 383 383 default "5" if X86_32 && X86_CMPXCHG64 384 384 default "4" 385 385
+6 -8
arch/x86/boot/header.S
··· 106 106 .word 0 # MinorSubsystemVersion 107 107 .long 0 # Win32VersionValue 108 108 109 - .long setup_size + ZO__end + pecompat_vsize 110 - # SizeOfImage 109 + .long setup_size + ZO__end # SizeOfImage 111 110 112 111 .long salign # SizeOfHeaders 113 112 .long 0 # CheckSum ··· 142 143 .ascii ".setup" 143 144 .byte 0 144 145 .byte 0 145 - .long setup_size - salign # VirtualSize 146 + .long pecompat_fstart - salign # VirtualSize 146 147 .long salign # VirtualAddress 147 148 .long pecompat_fstart - salign # SizeOfRawData 148 149 .long salign # PointerToRawData ··· 155 156 #ifdef CONFIG_EFI_MIXED 156 157 .asciz ".compat" 157 158 158 - .long 8 # VirtualSize 159 - .long setup_size + ZO__end # VirtualAddress 159 + .long pecompat_fsize # VirtualSize 160 + .long pecompat_fstart # VirtualAddress 160 161 .long pecompat_fsize # SizeOfRawData 161 162 .long pecompat_fstart # PointerToRawData 162 163 ··· 171 172 * modes this image supports. 172 173 */ 173 174 .pushsection ".pecompat", "a", @progbits 174 - .balign falign 175 - .set pecompat_vsize, salign 175 + .balign salign 176 176 .globl pecompat_fstart 177 177 pecompat_fstart: 178 178 .byte 0x1 # Version 179 179 .byte 8 # Size 180 180 .word IMAGE_FILE_MACHINE_I386 # PE machine type 181 181 .long setup_size + ZO_efi32_pe_entry # Entrypoint 182 + .byte 0x0 # Sentinel 182 183 .popsection 183 184 #else 184 - .set pecompat_vsize, 0 185 185 .set pecompat_fstart, setup_size 186 186 #endif 187 187 .ascii ".text"
+3 -3
arch/x86/boot/setup.ld
··· 24 24 .text : { *(.text .text.*) } 25 25 .text32 : { *(.text32) } 26 26 27 + .pecompat : { *(.pecompat) } 28 + PROVIDE(pecompat_fsize = setup_size - pecompat_fstart); 29 + 27 30 . = ALIGN(16); 28 31 .rodata : { *(.rodata*) } 29 32 ··· 38 35 39 36 . = ALIGN(16); 40 37 .data : { *(.data*) } 41 - 42 - .pecompat : { *(.pecompat) } 43 - PROVIDE(pecompat_fsize = setup_size - pecompat_fstart); 44 38 45 39 .signature : { 46 40 setup_sig = .;
+1 -1
arch/x86/include/asm/cpufeature.h
··· 168 168 */ 169 169 static __always_inline bool _static_cpu_has(u16 bit) 170 170 { 171 - asm_volatile_goto( 171 + asm goto( 172 172 ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]") 173 173 ".pushsection .altinstr_aux,\"ax\"\n" 174 174 "6:\n"
+3 -3
arch/x86/include/asm/jump_label.h
··· 24 24 25 25 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 26 26 { 27 - asm_volatile_goto("1:" 27 + asm goto("1:" 28 28 "jmp %l[l_yes] # objtool NOPs this \n\t" 29 29 JUMP_TABLE_ENTRY 30 30 : : "i" (key), "i" (2 | branch) : : l_yes); ··· 38 38 39 39 static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) 40 40 { 41 - asm_volatile_goto("1:" 41 + asm goto("1:" 42 42 ".byte " __stringify(BYTES_NOP5) "\n\t" 43 43 JUMP_TABLE_ENTRY 44 44 : : "i" (key), "i" (branch) : : l_yes); ··· 52 52 53 53 static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) 54 54 { 55 - asm_volatile_goto("1:" 55 + asm goto("1:" 56 56 "jmp %l[l_yes]\n\t" 57 57 JUMP_TABLE_ENTRY 58 58 : : "i" (key), "i" (branch) : : l_yes);
+1 -1
arch/x86/include/asm/rmwcc.h
··· 13 13 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 14 14 ({ \ 15 15 bool c = false; \ 16 - asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ 16 + asm goto (fullop "; j" #cc " %l[cc_label]" \ 17 17 : : [var] "m" (_var), ## __VA_ARGS__ \ 18 18 : clobbers : cc_label); \ 19 19 if (0) { \
+1 -1
arch/x86/include/asm/special_insns.h
··· 205 205 #ifdef CONFIG_X86_USER_SHADOW_STACK 206 206 static inline int write_user_shstk_64(u64 __user *addr, u64 val) 207 207 { 208 - asm_volatile_goto("1: wrussq %[val], (%[addr])\n" 208 + asm goto("1: wrussq %[val], (%[addr])\n" 209 209 _ASM_EXTABLE(1b, %l[fail]) 210 210 :: [addr] "r" (addr), [val] "r" (val) 211 211 :: fail);
+5 -5
arch/x86/include/asm/uaccess.h
··· 133 133 134 134 #ifdef CONFIG_X86_32 135 135 #define __put_user_goto_u64(x, addr, label) \ 136 - asm_volatile_goto("\n" \ 136 + asm goto("\n" \ 137 137 "1: movl %%eax,0(%1)\n" \ 138 138 "2: movl %%edx,4(%1)\n" \ 139 139 _ASM_EXTABLE_UA(1b, %l2) \ ··· 295 295 } while (0) 296 296 297 297 #define __get_user_asm(x, addr, itype, ltype, label) \ 298 - asm_volatile_goto("\n" \ 298 + asm_goto_output("\n" \ 299 299 "1: mov"itype" %[umem],%[output]\n" \ 300 300 _ASM_EXTABLE_UA(1b, %l2) \ 301 301 : [output] ltype(x) \ ··· 375 375 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 376 376 __typeof__(*(_ptr)) __old = *_old; \ 377 377 __typeof__(*(_ptr)) __new = (_new); \ 378 - asm_volatile_goto("\n" \ 378 + asm_goto_output("\n" \ 379 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 380 380 _ASM_EXTABLE_UA(1b, %l[label]) \ 381 381 : CC_OUT(z) (success), \ ··· 394 394 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ 395 395 __typeof__(*(_ptr)) __old = *_old; \ 396 396 __typeof__(*(_ptr)) __new = (_new); \ 397 - asm_volatile_goto("\n" \ 397 + asm_goto_output("\n" \ 398 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 399 399 _ASM_EXTABLE_UA(1b, %l[label]) \ 400 400 : CC_OUT(z) (success), \ ··· 477 477 * aliasing issues. 478 478 */ 479 479 #define __put_user_goto(x, addr, itype, ltype, label) \ 480 - asm_volatile_goto("\n" \ 480 + asm goto("\n" \ 481 481 "1: mov"itype" %0,%1\n" \ 482 482 _ASM_EXTABLE_UA(1b, %l2) \ 483 483 : : ltype(x), "m" (__m(addr)) \
+5 -8
arch/x86/kernel/fpu/signal.c
··· 274 274 * Attempt to restore the FPU registers directly from user memory. 275 275 * Pagefaults are handled and any errors returned are fatal. 276 276 */ 277 - static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, 278 - bool fx_only, unsigned int size) 277 + static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) 279 278 { 280 279 struct fpu *fpu = &current->thread.fpu; 281 280 int ret; 282 281 282 + /* Restore enabled features only. */ 283 + xrestore &= fpu->fpstate->user_xfeatures; 283 284 retry: 284 285 fpregs_lock(); 285 286 /* Ensure that XFD is up to date */ ··· 310 309 if (ret != X86_TRAP_PF) 311 310 return false; 312 311 313 - if (!fault_in_readable(buf, size)) 312 + if (!fault_in_readable(buf, fpu->fpstate->user_size)) 314 313 goto retry; 315 314 return false; 316 315 } ··· 340 339 struct user_i387_ia32_struct env; 341 340 bool success, fx_only = false; 342 341 union fpregs_state *fpregs; 343 - unsigned int state_size; 344 342 u64 user_xfeatures = 0; 345 343 346 344 if (use_xsave()) { ··· 349 349 return false; 350 350 351 351 fx_only = !fx_sw_user.magic1; 352 - state_size = fx_sw_user.xstate_size; 353 352 user_xfeatures = fx_sw_user.xfeatures; 354 353 } else { 355 354 user_xfeatures = XFEATURE_MASK_FPSSE; 356 - state_size = fpu->fpstate->user_size; 357 355 } 358 356 359 357 if (likely(!ia32_fxstate)) { 360 358 /* Restore the FPU registers directly from user memory. */ 361 - return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only, 362 - state_size); 359 + return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only); 363 360 } 364 361 365 362 /*
+3 -3
arch/x86/kvm/svm/svm_ops.h
··· 8 8 9 9 #define svm_asm(insn, clobber...) \ 10 10 do { \ 11 - asm_volatile_goto("1: " __stringify(insn) "\n\t" \ 11 + asm goto("1: " __stringify(insn) "\n\t" \ 12 12 _ASM_EXTABLE(1b, %l[fault]) \ 13 13 ::: clobber : fault); \ 14 14 return; \ ··· 18 18 19 19 #define svm_asm1(insn, op1, clobber...) \ 20 20 do { \ 21 - asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ 21 + asm goto("1: " __stringify(insn) " %0\n\t" \ 22 22 _ASM_EXTABLE(1b, %l[fault]) \ 23 23 :: op1 : clobber : fault); \ 24 24 return; \ ··· 28 28 29 29 #define svm_asm2(insn, op1, op2, clobber...) \ 30 30 do { \ 31 - asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ 31 + asm goto("1: " __stringify(insn) " %1, %0\n\t" \ 32 32 _ASM_EXTABLE(1b, %l[fault]) \ 33 33 :: op1, op2 : clobber : fault); \ 34 34 return; \
+2 -2
arch/x86/kvm/vmx/vmx.c
··· 738 738 */ 739 739 static int kvm_cpu_vmxoff(void) 740 740 { 741 - asm_volatile_goto("1: vmxoff\n\t" 741 + asm goto("1: vmxoff\n\t" 742 742 _ASM_EXTABLE(1b, %l[fault]) 743 743 ::: "cc", "memory" : fault); 744 744 ··· 2784 2784 2785 2785 cr4_set_bits(X86_CR4_VMXE); 2786 2786 2787 - asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" 2787 + asm goto("1: vmxon %[vmxon_pointer]\n\t" 2788 2788 _ASM_EXTABLE(1b, %l[fault]) 2789 2789 : : [vmxon_pointer] "m"(vmxon_pointer) 2790 2790 : : fault);
+3 -3
arch/x86/kvm/vmx/vmx_ops.h
··· 94 94 95 95 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 96 96 97 - asm_volatile_goto("1: vmread %[field], %[output]\n\t" 97 + asm_goto_output("1: vmread %[field], %[output]\n\t" 98 98 "jna %l[do_fail]\n\t" 99 99 100 100 _ASM_EXTABLE(1b, %l[do_exception]) ··· 188 188 189 189 #define vmx_asm1(insn, op1, error_args...) \ 190 190 do { \ 191 - asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ 191 + asm goto("1: " __stringify(insn) " %0\n\t" \ 192 192 ".byte 0x2e\n\t" /* branch not taken hint */ \ 193 193 "jna %l[error]\n\t" \ 194 194 _ASM_EXTABLE(1b, %l[fault]) \ ··· 205 205 206 206 #define vmx_asm2(insn, op1, op2, error_args...) \ 207 207 do { \ 208 - asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ 208 + asm goto("1: " __stringify(insn) " %1, %0\n\t" \ 209 209 ".byte 0x2e\n\t" /* branch not taken hint */ \ 210 210 "jna %l[error]\n\t" \ 211 211 _ASM_EXTABLE(1b, %l[fault]) \
+12 -12
arch/x86/lib/getuser.S
··· 163 163 #endif 164 164 165 165 /* get_user */ 166 - _ASM_EXTABLE(1b, __get_user_handle_exception) 167 - _ASM_EXTABLE(2b, __get_user_handle_exception) 168 - _ASM_EXTABLE(3b, __get_user_handle_exception) 166 + _ASM_EXTABLE_UA(1b, __get_user_handle_exception) 167 + _ASM_EXTABLE_UA(2b, __get_user_handle_exception) 168 + _ASM_EXTABLE_UA(3b, __get_user_handle_exception) 169 169 #ifdef CONFIG_X86_64 170 - _ASM_EXTABLE(4b, __get_user_handle_exception) 170 + _ASM_EXTABLE_UA(4b, __get_user_handle_exception) 171 171 #else 172 - _ASM_EXTABLE(4b, __get_user_8_handle_exception) 173 - _ASM_EXTABLE(5b, __get_user_8_handle_exception) 172 + _ASM_EXTABLE_UA(4b, __get_user_8_handle_exception) 173 + _ASM_EXTABLE_UA(5b, __get_user_8_handle_exception) 174 174 #endif 175 175 176 176 /* __get_user */ 177 - _ASM_EXTABLE(6b, __get_user_handle_exception) 178 - _ASM_EXTABLE(7b, __get_user_handle_exception) 179 - _ASM_EXTABLE(8b, __get_user_handle_exception) 177 + _ASM_EXTABLE_UA(6b, __get_user_handle_exception) 178 + _ASM_EXTABLE_UA(7b, __get_user_handle_exception) 179 + _ASM_EXTABLE_UA(8b, __get_user_handle_exception) 180 180 #ifdef CONFIG_X86_64 181 - _ASM_EXTABLE(9b, __get_user_handle_exception) 181 + _ASM_EXTABLE_UA(9b, __get_user_handle_exception) 182 182 #else 183 - _ASM_EXTABLE(9b, __get_user_8_handle_exception) 184 - _ASM_EXTABLE(10b, __get_user_8_handle_exception) 183 + _ASM_EXTABLE_UA(9b, __get_user_8_handle_exception) 184 + _ASM_EXTABLE_UA(10b, __get_user_8_handle_exception) 185 185 #endif
+10 -10
arch/x86/lib/putuser.S
··· 133 133 RET 134 134 SYM_CODE_END(__put_user_handle_exception) 135 135 136 - _ASM_EXTABLE(1b, __put_user_handle_exception) 137 - _ASM_EXTABLE(2b, __put_user_handle_exception) 138 - _ASM_EXTABLE(3b, __put_user_handle_exception) 139 - _ASM_EXTABLE(4b, __put_user_handle_exception) 140 - _ASM_EXTABLE(5b, __put_user_handle_exception) 141 - _ASM_EXTABLE(6b, __put_user_handle_exception) 142 - _ASM_EXTABLE(7b, __put_user_handle_exception) 143 - _ASM_EXTABLE(9b, __put_user_handle_exception) 136 + _ASM_EXTABLE_UA(1b, __put_user_handle_exception) 137 + _ASM_EXTABLE_UA(2b, __put_user_handle_exception) 138 + _ASM_EXTABLE_UA(3b, __put_user_handle_exception) 139 + _ASM_EXTABLE_UA(4b, __put_user_handle_exception) 140 + _ASM_EXTABLE_UA(5b, __put_user_handle_exception) 141 + _ASM_EXTABLE_UA(6b, __put_user_handle_exception) 142 + _ASM_EXTABLE_UA(7b, __put_user_handle_exception) 143 + _ASM_EXTABLE_UA(9b, __put_user_handle_exception) 144 144 #ifdef CONFIG_X86_32 145 - _ASM_EXTABLE(8b, __put_user_handle_exception) 146 - _ASM_EXTABLE(10b, __put_user_handle_exception) 145 + _ASM_EXTABLE_UA(8b, __put_user_handle_exception) 146 + _ASM_EXTABLE_UA(10b, __put_user_handle_exception) 147 147 #endif
+12
arch/x86/xen/smp.c
··· 65 65 char *resched_name, *callfunc_name, *debug_name; 66 66 67 67 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 68 + if (!resched_name) 69 + goto fail_mem; 68 70 per_cpu(xen_resched_irq, cpu).name = resched_name; 69 71 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 70 72 cpu, ··· 79 77 per_cpu(xen_resched_irq, cpu).irq = rc; 80 78 81 79 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 80 + if (!callfunc_name) 81 + goto fail_mem; 82 82 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; 83 83 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 84 84 cpu, ··· 94 90 95 91 if (!xen_fifo_events) { 96 92 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 93 + if (!debug_name) 94 + goto fail_mem; 95 + 97 96 per_cpu(xen_debug_irq, cpu).name = debug_name; 98 97 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, 99 98 xen_debug_interrupt, ··· 108 101 } 109 102 110 103 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 104 + if (!callfunc_name) 105 + goto fail_mem; 106 + 111 107 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; 112 108 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 113 109 cpu, ··· 124 114 125 115 return 0; 126 116 117 + fail_mem: 118 + rc = -ENOMEM; 127 119 fail: 128 120 xen_smp_intr_free(cpu); 129 121 return rc;
+2 -2
arch/xtensa/include/asm/jump_label.h
··· 13 13 static __always_inline bool arch_static_branch(struct static_key *key, 14 14 bool branch) 15 15 { 16 - asm_volatile_goto("1:\n\t" 16 + asm goto("1:\n\t" 17 17 "_nop\n\t" 18 18 ".pushsection __jump_table, \"aw\"\n\t" 19 19 ".word 1b, %l[l_yes], %c0\n\t" ··· 38 38 * make it reachable and wrap both into a no-transform block 39 39 * to avoid any assembler interference with this. 40 40 */ 41 - asm_volatile_goto("1:\n\t" 41 + asm goto("1:\n\t" 42 42 ".begin no-transform\n\t" 43 43 "_j %l[l_yes]\n\t" 44 44 "2:\n\t"
+7
block/blk-iocost.c
··· 1353 1353 1354 1354 lockdep_assert_held(&iocg->waitq.lock); 1355 1355 1356 + /* 1357 + * If the delay is set by another CPU, we may be in the past. No need to 1358 + * change anything if so. This avoids decay calculation underflow. 1359 + */ 1360 + if (time_before64(now->now, iocg->delay_at)) 1361 + return false; 1362 + 1356 1363 /* calculate the current delay in effect - 1/2 every second */ 1357 1364 tdelta = now->now - iocg->delay_at; 1358 1365 if (iocg->delay)
+2 -2
block/blk-wbt.c
··· 163 163 */ 164 164 static bool wb_recent_wait(struct rq_wb *rwb) 165 165 { 166 - struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb; 166 + struct backing_dev_info *bdi = rwb->rqos.disk->bdi; 167 167 168 - return time_before(jiffies, wb->dirty_sleep + HZ); 168 + return time_before(jiffies, bdi->last_bdp_sleep + HZ); 169 169 } 170 170 171 171 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
+2 -3
drivers/accel/ivpu/ivpu_drv.c
··· 480 480 /* Clear any pending errors */ 481 481 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); 482 482 483 - /* VPU 37XX does not require 10m D3hot delay */ 484 - if (ivpu_hw_gen(vdev) == IVPU_HW_37XX) 485 - pdev->d3hot_delay = 0; 483 + /* NPU does not require 10m D3hot delay */ 484 + pdev->d3hot_delay = 0; 486 485 487 486 ret = pcim_enable_device(pdev); 488 487 if (ret) {
-1
drivers/accel/ivpu/ivpu_fw.c
··· 222 222 const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data; 223 223 224 224 if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) || 225 - (ivpu_hw_gen(vdev) > IVPU_HW_37XX) || 226 225 (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE)) 227 226 vdev->wa.disable_d0i3_msg = true; 228 227
+1 -1
drivers/accel/ivpu/ivpu_hw_37xx.c
··· 525 525 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); 526 526 527 527 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); 528 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); 528 + val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); 529 529 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 530 530 531 531 REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+1 -6
drivers/accel/ivpu/ivpu_hw_40xx.c
··· 530 530 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); 531 531 532 532 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); 533 - val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 533 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 534 534 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 535 535 536 536 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); ··· 704 704 { 705 705 struct ivpu_hw_info *hw = vdev->hw; 706 706 u32 tile_disable; 707 - u32 tile_enable; 708 707 u32 fuse; 709 708 710 709 fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); ··· 724 725 else 725 726 ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); 726 727 727 - tile_enable = (~tile_disable) & TILE_MAX_MASK; 728 - 729 - hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); 730 - hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); 731 728 hw->tile_fuse = tile_disable; 732 729 hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 733 730
+2 -2
drivers/accel/ivpu/ivpu_job.c
··· 294 294 return -ENOENT; 295 295 296 296 if (job->file_priv->has_mmu_faults) 297 - job_status = VPU_JSM_STATUS_ABORTED; 297 + job_status = DRM_IVPU_JOB_STATUS_ABORTED; 298 298 299 299 job->bos[CMD_BUF_IDX]->job_status = job_status; 300 300 dma_fence_signal(job->done_fence); ··· 315 315 unsigned long id; 316 316 317 317 xa_for_each(&vdev->submitted_jobs_xa, id, job) 318 - ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED); 318 + ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); 319 319 } 320 320 321 321 static int ivpu_job_submit(struct ivpu_job *job)
+22 -14
drivers/accel/ivpu/ivpu_mmu.c
··· 72 72 73 73 #define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */ 74 74 #define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2) 75 - #define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1) 76 - #define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1) 77 - #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) 75 + #define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0) 76 + #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) 78 77 #define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK) 78 + #define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT) 79 79 80 80 #define IVPU_MMU_CMDQ_CMD_SIZE 16 81 81 #define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE) ··· 475 475 return 0; 476 476 } 477 477 478 + static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q) 479 + { 480 + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && 481 + (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons))); 482 + } 483 + 484 + static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q) 485 + { 486 + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && 487 + (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons))); 488 + } 489 + 478 490 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) 479 491 { 480 - struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; 481 - u64 *queue_buffer = q->base; 482 - int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); 492 + struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; 493 + u64 *queue_buffer = cmdq->base; 494 + int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); 483 495 484 - if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) { 496 + if (ivpu_mmu_queue_is_full(cmdq)) { 485 497 ivpu_err(vdev, "Failed to write MMU CMD %s\n", name); 486 498 return -EBUSY; 487 499 } 488 500 489 501 queue_buffer[idx] = data0; 490 502 queue_buffer[idx + 1] = data1; 491 - q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK; 503 + cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; 492 504 493 505 ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1); 494 506 ··· 572 560 mmu->cmdq.cons = 0; 573 561 574 562 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); 575 - clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); 576 563 mmu->evtq.prod = 0; 577 564 mmu->evtq.cons = 0; 578 565 ··· 885 874 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); 886 875 887 876 evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); 888 - if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) 877 + if (ivpu_mmu_queue_is_empty(evtq)) 889 878 return NULL; 890 879 891 - clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); 892 - 893 880 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; 894 - REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons); 895 - 896 881 return evt; 897 882 } 898 883 ··· 909 902 } 910 903 911 904 ivpu_mmu_user_context_mark_invalid(vdev, ssid); 905 + REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); 912 906 } 913 907 } 914 908
-26
drivers/acpi/apei/ghes.c
··· 680 680 static DECLARE_RWSEM(cxl_cper_rw_sem); 681 681 static cxl_cper_callback cper_callback; 682 682 683 - /* CXL Event record UUIDs are formatted as GUIDs and reported in section type */ 684 - 685 - /* 686 - * General Media Event Record 687 - * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 688 - */ 689 - #define CPER_SEC_CXL_GEN_MEDIA_GUID \ 690 - GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ 691 - 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) 692 - 693 - /* 694 - * DRAM Event Record 695 - * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 696 - */ 697 - #define CPER_SEC_CXL_DRAM_GUID \ 698 - GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ 699 - 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) 700 - 701 - /* 702 - * Memory Module Event Record 703 - * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 704 - */ 705 - #define CPER_SEC_CXL_MEM_MODULE_GUID \ 706 - GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ 707 - 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) 708 - 709 683 static void cxl_cper_post_event(enum cxl_event_type event_type, 710 684 struct cxl_cper_event_rec *rec) 711 685 {
+40 -17
drivers/base/regmap/regmap-kunit.c
··· 9 9 10 10 #define BLOCK_TEST_SIZE 12 11 11 12 + static void get_changed_bytes(void *orig, void *new, size_t size) 13 + { 14 + char *o = orig; 15 + char *n = new; 16 + int i; 17 + 18 + get_random_bytes(new, size); 19 + 20 + /* 21 + * This could be nicer and more efficient but we shouldn't 22 + * super care. 23 + */ 24 + for (i = 0; i < size; i++) 25 + while (n[i] == o[i]) 26 + get_random_bytes(&n[i], 1); 27 + } 28 + 12 29 static const struct regmap_config test_regmap_config = { 13 30 .max_register = BLOCK_TEST_SIZE, 14 31 .reg_stride = 1, ··· 1219 1202 struct regmap *map; 1220 1203 struct regmap_config config; 1221 1204 struct regmap_ram_data *data; 1222 - unsigned int val, val_test, val_last; 1205 + unsigned int val; 1206 + u16 val_test, val_last; 1223 1207 u16 val_array[BLOCK_TEST_SIZE]; 1224 1208 1225 1209 config = raw_regmap_config; ··· 1269 1251 struct regmap *map; 1270 1252 struct regmap_config config; 1271 1253 struct regmap_ram_data *data; 1272 - u16 val[2]; 1254 + u16 val[3]; 1273 1255 u16 *hw_buf; 1274 1256 unsigned int rval; 1275 1257 int i; ··· 1283 1265 1284 1266 hw_buf = (u16 *)data->vals; 1285 1267 1286 - get_random_bytes(&val, sizeof(val)); 1268 + get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); 1287 1269 1288 1270 /* Do a regular write and a raw write in cache only mode */ 1289 1271 regcache_cache_only(map, true); 1290 - KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); 1291 - if (config.val_format_endian == REGMAP_ENDIAN_BIG) 1292 - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, 1293 - be16_to_cpu(val[0]))); 1294 - else 1295 - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, 1296 - le16_to_cpu(val[0]))); 1272 + KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, 1273 + sizeof(u16) * 2)); 1274 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); 1297 1275 1298 1276 /* We should read back the new values, and defaults for the rest */ 1299 1277 for (i = 0; i < config.max_register + 1; i++) { ··· 1298 1284 switch (i) { 1299 1285 case 2: 1300 1286 case 3: 1301 - case 6: 1302 1287 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1303 1288 KUNIT_EXPECT_EQ(test, rval, 1304 - be16_to_cpu(val[i % 2])); 1289 + be16_to_cpu(val[i - 2])); 1305 1290 } else { 1306 1291 KUNIT_EXPECT_EQ(test, rval, 1307 - le16_to_cpu(val[i % 2])); 1292 + le16_to_cpu(val[i - 2])); 1308 1293 } 1294 + break; 1295 + case 4: 1296 + KUNIT_EXPECT_EQ(test, rval, val[i - 2]); 1309 1297 break; 1310 1298 default: 1311 1299 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1312 1300 break; 1313 1301 } 1314 1302 } 1303 + 1304 + /* 1305 + * The value written via _write() was translated by the core, 1306 + * translate the original copy for comparison purposes. 1307 + */ 1308 + if (config.val_format_endian == REGMAP_ENDIAN_BIG) 1309 + val[2] = cpu_to_be16(val[2]); 1310 + else 1311 + val[2] = cpu_to_le16(val[2]); 1315 1312 1316 1313 /* The values should not appear in the "hardware" */ 1317 - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val)); 1318 - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16)); 1314 + KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1319 1315 1320 1316 for (i = 0; i < config.max_register + 1; i++) 1321 1317 data->written[i] = false; ··· 1336 1312 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1337 1313 1338 1314 /* The values should now appear in the "hardware" */ 1339 - KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); 1340 - KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16)); 1315 + KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1341 1316 1342 1317 regmap_exit(map); 1343 1318 }
+4 -3
drivers/block/virtio_blk.c
··· 1593 1593 { 1594 1594 struct virtio_blk *vblk = vdev->priv; 1595 1595 1596 + /* Ensure no requests in virtqueues before deleting vqs. */ 1597 + blk_mq_freeze_queue(vblk->disk->queue); 1598 + 1596 1599 /* Ensure we don't receive any more interrupts */ 1597 1600 virtio_reset_device(vdev); 1598 1601 1599 1602 /* Make sure no work handler is accessing the device. */ 1600 1603 flush_work(&vblk->config_work); 1601 - 1602 - blk_mq_quiesce_queue(vblk->disk->queue); 1603 1604 1604 1605 vdev->config->del_vqs(vdev); 1605 1606 kfree(vblk->vqs); ··· 1619 1618 1620 1619 virtio_device_ready(vdev); 1621 1620 1622 - blk_mq_unquiesce_queue(vblk->disk->queue); 1621 + blk_mq_unfreeze_queue(vblk->disk->queue); 1623 1622 return 0; 1624 1623 } 1625 1624 #endif
+2 -3
drivers/connector/cn_proc.c
··· 108 108 filter_data[1] = 0; 109 109 } 110 110 111 - if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, 112 - cn_filter, (void *)filter_data) == -ESRCH) 113 - atomic_set(&proc_event_num_listeners, 0); 111 + cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, 112 + cn_filter, (void *)filter_data); 114 113 115 114 local_unlock(&local_event.lock); 116 115 }
+3 -3
drivers/cxl/core/trace.h
··· 338 338 339 339 TP_fast_assign( 340 340 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 341 - memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t)); 341 + __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID; 342 342 343 343 /* General Media */ 344 344 __entry->dpa = le64_to_cpu(rec->phys_addr); ··· 425 425 426 426 TP_fast_assign( 427 427 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 428 - memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t)); 428 + __entry->hdr_uuid = CXL_EVENT_DRAM_UUID; 429 429 430 430 /* DRAM */ 431 431 __entry->dpa = le64_to_cpu(rec->phys_addr); ··· 573 573 574 574 TP_fast_assign( 575 575 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 576 - memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t)); 576 + __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID; 577 577 578 578 /* Memory Module Event */ 579 579 __entry->event_type = rec->event_type;
+6 -14
drivers/dpll/dpll_netlink.c
··· 1206 1206 unsigned long i; 1207 1207 int ret = 0; 1208 1208 1209 + mutex_lock(&dpll_lock); 1209 1210 xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, 1210 1211 ctx->idx) { 1211 1212 if (!dpll_pin_available(pin)) ··· 1226 1225 } 1227 1226 genlmsg_end(skb, hdr); 1228 1227 } 1228 + mutex_unlock(&dpll_lock); 1229 + 1229 1230 if (ret == -EMSGSIZE) { 1230 1231 ctx->idx = i; 1231 1232 return skb->len; ··· 1383 1380 unsigned long i; 1384 1381 int ret = 0; 1385 1382 1383 + mutex_lock(&dpll_lock); 1386 1384 xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED, 1387 1385 ctx->idx) { 1388 1386 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, ··· 1400 1396 } 1401 1397 genlmsg_end(skb, hdr); 1402 1398 } 1399 + mutex_unlock(&dpll_lock); 1400 + 1403 1401 if (ret == -EMSGSIZE) { 1404 1402 ctx->idx = i; 1405 1403 return skb->len; ··· 1450 1444 struct genl_info *info) 1451 1445 { 1452 1446 mutex_unlock(&dpll_lock); 1453 - } 1454 - 1455 - int dpll_lock_dumpit(struct netlink_callback *cb) 1456 - { 1457 - mutex_lock(&dpll_lock); 1458 - 1459 - return 0; 1460 - } 1461 - 1462 - int dpll_unlock_dumpit(struct netlink_callback *cb) 1463 - { 1464 - mutex_unlock(&dpll_lock); 1465 - 1466 - return 0; 1467 1447 } 1468 1448 1469 1449 int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
-4
drivers/dpll/dpll_nl.c
··· 95 95 }, 96 96 { 97 97 .cmd = DPLL_CMD_DEVICE_GET, 98 - .start = dpll_lock_dumpit, 99 98 .dumpit = dpll_nl_device_get_dumpit, 100 - .done = dpll_unlock_dumpit, 101 99 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, 102 100 }, 103 101 { ··· 127 129 }, 128 130 { 129 131 .cmd = DPLL_CMD_PIN_GET, 130 - .start = dpll_lock_dumpit, 131 132 .dumpit = dpll_nl_pin_get_dumpit, 132 - .done = dpll_unlock_dumpit, 133 133 .policy = dpll_pin_get_dump_nl_policy, 134 134 .maxattr = DPLL_A_PIN_ID, 135 135 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
-2
drivers/dpll/dpll_nl.h
··· 30 30 void 31 31 dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 32 32 struct genl_info *info); 33 - int dpll_lock_dumpit(struct netlink_callback *cb); 34 - int dpll_unlock_dumpit(struct netlink_callback *cb); 35 33 36 34 int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info); 37 35 int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);
+17 -1
drivers/firewire/core-card.c
··· 429 429 */ 430 430 card->bm_generation = generation; 431 431 432 - if (root_device == NULL) { 432 + if (card->gap_count == 0) { 433 + /* 434 + * If self IDs have inconsistent gap counts, do a 435 + * bus reset ASAP. The config rom read might never 436 + * complete, so don't wait for it. However, still 437 + * send a PHY configuration packet prior to the 438 + * bus reset. The PHY configuration packet might 439 + * fail, but 1394-2008 8.4.5.2 explicitly permits 440 + * it in this case, so it should be safe to try. 441 + */ 442 + new_root_id = local_id; 443 + /* 444 + * We must always send a bus reset if the gap count 445 + * is inconsistent, so bypass the 5-reset limit. 446 + */ 447 + card->bm_retries = 0; 448 + } else if (root_device == NULL) { 433 449 /* 434 450 * Either link_on is false, or we failed to read the 435 451 * config rom. In either case, pick another root.
+1 -1
drivers/firmware/efi/arm-runtime.c
··· 107 107 efi_memory_desc_t *md; 108 108 109 109 for_each_efi_memory_desc(md) { 110 - int md_size = md->num_pages << EFI_PAGE_SHIFT; 110 + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; 111 111 struct resource *res; 112 112 113 113 if (!(md->attribute & EFI_MEMORY_SP))
+19
drivers/firmware/efi/cper.c
··· 523 523 } 524 524 } 525 525 526 + struct ignore_section { 527 + guid_t guid; 528 + const char *name; 529 + }; 530 + 531 + static const struct ignore_section ignore_sections[] = { 532 + { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" }, 533 + { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" }, 534 + { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" }, 535 + }; 536 + 526 537 static void 527 538 cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata, 528 539 int sec_no) ··· 554 543 printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); 555 544 556 545 snprintf(newpfx, sizeof(newpfx), "%s ", pfx); 546 + 547 + for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) { 548 + if (guid_equal(sec_type, &ignore_sections[i].guid)) { 549 + printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name); 550 + return; 551 + } 552 + } 553 + 557 554 if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { 558 555 struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata); 559 556
+10 -9
drivers/firmware/efi/efi-init.c
··· 144 144 case EFI_CONVENTIONAL_MEMORY: 145 145 case EFI_PERSISTENT_MEMORY: 146 146 /* 147 - * Special purpose memory is 'soft reserved', which means it 148 - * is set aside initially, but can be hotplugged back in or 149 - * be assigned to the dax driver after boot. 150 - */ 151 - if (efi_soft_reserve_enabled() && 152 - (md->attribute & EFI_MEMORY_SP)) 153 - return false; 154 - 155 - /* 156 147 * According to the spec, these regions are no longer reserved 157 148 * after calling ExitBootServices(). However, we can only use 158 149 * them as System RAM if they can be mapped writeback cacheable. ··· 187 196 size = npages << PAGE_SHIFT; 188 197 189 198 if (is_memory(md)) { 199 + /* 200 + * Special purpose memory is 'soft reserved', which 201 + * means it is set aside initially. Don't add a memblock 202 + * for it now so that it can be hotplugged back in or 203 + * be assigned to the dax driver after boot. 204 + */ 205 + if (efi_soft_reserve_enabled() && 206 + (md->attribute & EFI_MEMORY_SP)) 207 + continue; 208 + 190 209 early_init_dt_add_memory_arch(paddr, size); 191 210 192 211 if (!is_usable_memory(md))
+2 -2
drivers/firmware/efi/libstub/Makefile
··· 28 28 -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ 29 29 -DEFI_HAVE_STRCMP -fno-builtin -fpic \ 30 30 $(call cc-option,-mno-single-pic-base) 31 - cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE 31 + cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax 32 32 cflags-$(CONFIG_LOONGARCH) += -fpie 33 33 34 34 cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt ··· 143 143 # exist. 144 144 STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \ 145 145 --prefix-symbols=__efistub_ 146 - STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20 146 + STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX 147 147 148 148 # For LoongArch, keep all the symbols in .init section and make sure that no 149 149 # absolute symbols references exist.
+1
drivers/firmware/efi/libstub/alignedmem.c
··· 14 14 * @max: the address that the last allocated memory page shall not 15 15 * exceed 16 16 * @align: minimum alignment of the base of the allocation 17 + * @memory_type: the type of memory to allocate 17 18 * 18 19 * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according 19 20 * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
+2 -1
drivers/firmware/efi/libstub/efistub.h
··· 956 956 957 957 efi_status_t efi_random_alloc(unsigned long size, unsigned long align, 958 958 unsigned long *addr, unsigned long random_seed, 959 - int memory_type, unsigned long alloc_limit); 959 + int memory_type, unsigned long alloc_min, 960 + unsigned long alloc_max); 960 961 961 962 efi_status_t efi_random_get_seed(void); 962 963
+1 -1
drivers/firmware/efi/libstub/kaslr.c
··· 119 119 */ 120 120 status = efi_random_alloc(*reserve_size, min_kimg_align, 121 121 reserve_addr, phys_seed, 122 - EFI_LOADER_CODE, EFI_ALLOC_LIMIT); 122 + EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); 123 123 if (status != EFI_SUCCESS) 124 124 efi_warn("efi_random_alloc() failed: 0x%lx\n", status); 125 125 } else {
+7 -5
drivers/firmware/efi/libstub/randomalloc.c
··· 17 17 static unsigned long get_entry_num_slots(efi_memory_desc_t *md, 18 18 unsigned long size, 19 19 unsigned long align_shift, 20 - u64 alloc_limit) 20 + u64 alloc_min, u64 alloc_max) 21 21 { 22 22 unsigned long align = 1UL << align_shift; 23 23 u64 first_slot, last_slot, region_end; ··· 30 30 return 0; 31 31 32 32 region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, 33 - alloc_limit); 33 + alloc_max); 34 34 if (region_end < size) 35 35 return 0; 36 36 37 - first_slot = round_up(md->phys_addr, align); 37 + first_slot = round_up(max(md->phys_addr, alloc_min), align); 38 38 last_slot = round_down(region_end - size + 1, align); 39 39 40 40 if (first_slot > last_slot) ··· 56 56 unsigned long *addr, 57 57 unsigned long random_seed, 58 58 int memory_type, 59 - unsigned long alloc_limit) 59 + unsigned long alloc_min, 60 + unsigned long alloc_max) 60 61 { 61 62 unsigned long total_slots = 0, target_slot; 62 63 unsigned long total_mirrored_slots = 0; ··· 79 78 efi_memory_desc_t *md = (void *)map->map + map_offset; 80 79 unsigned long slots; 81 80 82 - slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit); 81 + slots = get_entry_num_slots(md, size, ilog2(align), alloc_min, 82 + alloc_max); 83 83 MD_NUM_SLOTS(md) = slots; 84 84 total_slots += slots; 85 85 if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+15 -10
drivers/firmware/efi/libstub/x86-stub.c
··· 223 223 } 224 224 } 225 225 226 - void efi_adjust_memory_range_protection(unsigned long start, 227 - unsigned long size) 226 + efi_status_t efi_adjust_memory_range_protection(unsigned long start, 227 + unsigned long size) 228 228 { 229 229 efi_status_t status; 230 230 efi_gcd_memory_space_desc_t desc; ··· 236 236 rounded_end = roundup(start + size, EFI_PAGE_SIZE); 237 237 238 238 if (memattr != NULL) { 239 - efi_call_proto(memattr, clear_memory_attributes, rounded_start, 240 - rounded_end - rounded_start, EFI_MEMORY_XP); 241 - return; 239 + status = efi_call_proto(memattr, clear_memory_attributes, 240 + rounded_start, 241 + rounded_end - rounded_start, 242 + EFI_MEMORY_XP); 243 + if (status != EFI_SUCCESS) 244 + efi_warn("Failed to clear EFI_MEMORY_XP attribute\n"); 245 + return status; 242 246 } 243 247 244 248 if (efi_dxe_table == NULL) 245 - return; 249 + return EFI_SUCCESS; 246 250 247 251 /* 248 252 * Don't modify memory region attributes, they are ··· 259 255 status = efi_dxe_call(get_memory_space_descriptor, start, &desc); 260 256 261 257 if (status != EFI_SUCCESS) 262 - return; 258 + break; 263 259 264 260 next = desc.base_address + desc.length; 265 261 ··· 284 280 unprotect_start, 285 281 unprotect_start + unprotect_size, 286 282 status); 283 + break; 287 284 } 288 285 } 286 + return EFI_SUCCESS; 289 287 } 290 288 291 289 static void setup_unaccepted_memory(void) ··· 799 793 800 794 status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, 801 795 seed[0], EFI_LOADER_CODE, 796 + LOAD_PHYSICAL_ADDR, 802 797 EFI_X86_KERNEL_ALLOC_LIMIT); 803 798 if (status != EFI_SUCCESS) 804 799 return status; ··· 812 805 813 806 *kernel_entry = addr + entry; 814 807 815 - efi_adjust_memory_range_protection(addr, kernel_total_size); 816 - 817 - return EFI_SUCCESS; 808 + return efi_adjust_memory_range_protection(addr, kernel_total_size); 818 809 } 819 810 820 811 static void __noreturn enter_kernel(unsigned long kernel_addr,
+2 -2
drivers/firmware/efi/libstub/x86-stub.h
··· 5 5 extern void trampoline_32bit_src(void *, bool); 6 6 extern const u16 trampoline_ljmp_imm_offset; 7 7 8 - void efi_adjust_memory_range_protection(unsigned long start, 9 - unsigned long size); 8 + efi_status_t efi_adjust_memory_range_protection(unsigned long start, 9 + unsigned long size); 10 10 11 11 #ifdef CONFIG_X86_64 12 12 efi_status_t efi_setup_5level_paging(void);
+1 -1
drivers/firmware/efi/libstub/zboot.c
··· 119 119 } 120 120 121 121 status = efi_random_alloc(alloc_size, min_kimg_align, &image_base, 122 - seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT); 122 + seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); 123 123 if (status != EFI_SUCCESS) { 124 124 efi_err("Failed to allocate memory\n"); 125 125 goto free_cmdline;
+1 -1
drivers/firmware/efi/riscv-runtime.c
··· 85 85 efi_memory_desc_t *md; 86 86 87 87 for_each_efi_memory_desc(md) { 88 - int md_size = md->num_pages << EFI_PAGE_SHIFT; 88 + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; 89 89 struct resource *res; 90 90 91 91 if (!(md->attribute & EFI_MEMORY_SP))
+4 -4
drivers/gpio/gpiolib.c
··· 1005 1005 err_free_gpiochip_mask: 1006 1006 gpiochip_remove_pin_ranges(gc); 1007 1007 gpiochip_free_valid_mask(gc); 1008 + err_remove_from_list: 1009 + spin_lock_irqsave(&gpio_lock, flags); 1010 + list_del(&gdev->list); 1011 + spin_unlock_irqrestore(&gpio_lock, flags); 1008 1012 if (gdev->dev.release) { 1009 1013 /* release() has been registered by gpiochip_setup_dev() */ 1010 1014 gpio_device_put(gdev); 1011 1015 goto err_print_message; 1012 1016 } 1013 - err_remove_from_list: 1014 - spin_lock_irqsave(&gpio_lock, flags); 1015 - list_del(&gdev->list); 1016 - spin_unlock_irqrestore(&gpio_lock, flags); 1017 1017 err_free_label: 1018 1018 kfree_const(gdev->label); 1019 1019 err_free_descs:
+2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1078 1078 bool in_s3; 1079 1079 bool in_s4; 1080 1080 bool in_s0ix; 1081 + /* indicate amdgpu suspension status */ 1082 + bool suspend_complete; 1081 1083 1082 1084 enum pp_mp1_state mp1_state; 1083 1085 struct amdgpu_doorbell_index doorbell_index;
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2476 2476 struct drm_device *drm_dev = dev_get_drvdata(dev); 2477 2477 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2478 2478 2479 + adev->suspend_complete = false; 2479 2480 if (amdgpu_acpi_is_s0ix_active(adev)) 2480 2481 adev->in_s0ix = true; 2481 2482 else if (amdgpu_acpi_is_s3_active(adev)) ··· 2491 2490 struct drm_device *drm_dev = dev_get_drvdata(dev); 2492 2491 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2493 2492 2493 + adev->suspend_complete = true; 2494 2494 if (amdgpu_acpi_should_gpu_reset(adev)) 2495 2495 return amdgpu_asic_reset(adev); 2496 2496
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
··· 362 362 } 363 363 } 364 364 365 - if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len)) 365 + if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) 366 366 ret = -EFAULT; 367 367 368 368 err_free_shared_buf:
+8
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 3034 3034 3035 3035 gfx_v9_0_cp_gfx_enable(adev, true); 3036 3036 3037 + /* Now only limit the quirk on the APU gfx9 series and already 3038 + * confirmed that the APU gfx10/gfx11 needn't such update. 3039 + */ 3040 + if (adev->flags & AMD_IS_APU && 3041 + adev->in_s3 && !adev->suspend_complete) { 3042 + DRM_INFO(" Will skip the CSB packet resubmit\n"); 3043 + return 0; 3044 + } 3037 3045 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); 3038 3046 if (r) { 3039 3047 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
-8
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1947 1947 1948 1948 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) 1949 1949 { 1950 - static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; 1951 - u32 vram_info; 1952 - 1953 - /* Only for dGPU, vendor informaton is reliable */ 1954 - if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) { 1955 - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1956 - adev->gmc.vram_vendor = vram_info & 0xF; 1957 - } 1958 1950 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1959 1951 adev->gmc.vram_width = 128 * 64; 1960 1952 }
-9
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 674 674 return ret; 675 675 } 676 676 677 - static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, 678 - struct amdgpu_irq_src *source, 679 - unsigned type, 680 - enum amdgpu_interrupt_state state) 681 - { 682 - return 0; 683 - } 684 - 685 677 static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, 686 678 struct amdgpu_irq_src *source, 687 679 unsigned int type, ··· 757 765 } 758 766 759 767 static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { 760 - .set = jpeg_v4_0_set_interrupt_state, 761 768 .process = jpeg_v4_0_process_interrupt, 762 769 }; 763 770
-10
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
··· 181 181 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 182 182 jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 183 183 } 184 - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); 185 184 186 185 return 0; 187 186 } ··· 515 516 return ret; 516 517 } 517 518 518 - static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, 519 - struct amdgpu_irq_src *source, 520 - unsigned type, 521 - enum amdgpu_interrupt_state state) 522 - { 523 - return 0; 524 - } 525 - 526 519 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, 527 520 struct amdgpu_irq_src *source, 528 521 struct amdgpu_iv_entry *entry) ··· 594 603 } 595 604 596 605 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { 597 - .set = jpeg_v4_0_5_set_interrupt_state, 598 606 .process = jpeg_v4_0_5_process_interrupt, 599 607 }; 600 608
+6
drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
··· 431 431 u32 inst_mask; 432 432 int i; 433 433 434 + if (amdgpu_sriov_vf(adev)) 435 + adev->rmmio_remap.reg_offset = 436 + SOC15_REG_OFFSET( 437 + NBIO, 0, 438 + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) 439 + << 2; 434 440 WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 435 441 0xff & ~(adev->gfx.xcc_mask)); 436 442
+22
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 1298 1298 return soc15_common_hw_fini(adev); 1299 1299 } 1300 1300 1301 + static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) 1302 + { 1303 + u32 sol_reg; 1304 + 1305 + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 1306 + 1307 + /* Will reset for the following suspend abort cases. 1308 + * 1) Only reset limit on APU side, dGPU hasn't checked yet. 1309 + * 2) S3 suspend abort and TOS already launched. 1310 + */ 1311 + if (adev->flags & AMD_IS_APU && adev->in_s3 && 1312 + !adev->suspend_complete && 1313 + sol_reg) 1314 + return true; 1315 + 1316 + return false; 1317 + } 1318 + 1301 1319 static int soc15_common_resume(void *handle) 1302 1320 { 1303 1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1304 1322 1323 + if (soc15_need_reset_on_resume(adev)) { 1324 + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); 1325 + soc15_asic_reset(adev); 1326 + } 1305 1327 return soc15_common_hw_init(adev); 1306 1328 } 1307 1329
+7 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 10731 10731 goto fail; 10732 10732 } 10733 10733 10734 - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10735 - if (ret) { 10736 - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 10737 - ret = -EINVAL; 10738 - goto fail; 10734 + if (dc_resource_is_dsc_encoding_supported(dc)) { 10735 + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10736 + if (ret) { 10737 + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 10738 + ret = -EINVAL; 10739 + goto fail; 10740 + } 10739 10741 } 10740 10742 10741 10743 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+3 -1
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 3817 3817 * programming has completed (we turn on phantom OTG in order 3818 3818 * to complete the plane disable for phantom pipes). 3819 3819 */ 3820 - dc->hwss.apply_ctx_to_hw(dc, context); 3820 + 3821 + if (dc->hwss.disable_phantom_streams) 3822 + dc->hwss.disable_phantom_streams(dc, context); 3821 3823 } 3822 3824 3823 3825 if (update_type != UPDATE_TYPE_FAST)
+3
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 291 291 dc_stream_release(state->phantom_streams[i]); 292 292 state->phantom_streams[i] = NULL; 293 293 } 294 + state->phantom_stream_count = 0; 294 295 295 296 for (i = 0; i < state->phantom_plane_count; i++) { 296 297 dc_plane_state_release(state->phantom_planes[i]); 297 298 state->phantom_planes[i] = NULL; 298 299 } 300 + state->phantom_plane_count = 0; 301 + 299 302 state->stream_mask = 0; 300 303 memset(&state->res_ctx, 0, sizeof(state->res_ctx)); 301 304 memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
+3 -3
drivers/gpu/drm/amd/display/dc/dml/Makefile
··· 72 72 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) 73 73 CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) 74 74 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) 75 - CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) 75 + CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) 76 76 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) 77 - CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) 77 + CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) 78 78 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) 79 - CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) 79 + CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) 80 80 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) 81 81 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) 82 82 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
+11 -4
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 1288 1288 return updated; 1289 1289 } 1290 1290 1291 - static bool should_allow_odm_power_optimization(struct dc *dc, 1291 + static bool should_apply_odm_power_optimization(struct dc *dc, 1292 1292 struct dc_state *context, struct vba_vars_st *v, int *split, 1293 1293 bool *merge) 1294 1294 { ··· 1392 1392 { 1393 1393 int i; 1394 1394 unsigned int new_vlevel; 1395 + unsigned int cur_policy[MAX_PIPES]; 1395 1396 1396 - for (i = 0; i < pipe_cnt; i++) 1397 + for (i = 0; i < pipe_cnt; i++) { 1398 + cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; 1397 1399 pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; 1400 + } 1398 1401 1399 1402 new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1400 1403 ··· 1406 1403 memset(merge, 0, MAX_PIPES * sizeof(bool)); 1407 1404 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge); 1408 1405 context->bw_ctx.dml.vba.VoltageLevel = *vlevel; 1406 + } else { 1407 + for (i = 0; i < pipe_cnt; i++) 1408 + pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; 1409 1409 } 1410 1410 } 1411 1411 ··· 1586 1580 } 1587 1581 } 1588 1582 1589 - if (should_allow_odm_power_optimization(dc, context, vba, split, merge)) 1583 + if (should_apply_odm_power_optimization(dc, context, vba, split, merge)) 1590 1584 try_odm_power_optimization_and_revalidate( 1591 1585 dc, context, pipes, split, merge, vlevel, *pipe_cnt); 1592 1586 ··· 2215 2209 int i; 2216 2210 2217 2211 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); 2218 - dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); 2212 + if (!dc->config.enable_windowed_mpo_odm) 2213 + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); 2219 2214 2220 2215 /* repopulate_pipes = 1 means the pipes were either split or merged. In this case 2221 2216 * we have to re-calculate the DET allocation and run through DML once more to
+2 -2
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 1476 1476 return DC_OK; 1477 1477 } 1478 1478 1479 - static enum dc_status apply_single_controller_ctx_to_hw( 1479 + enum dc_status dce110_apply_single_controller_ctx_to_hw( 1480 1480 struct pipe_ctx *pipe_ctx, 1481 1481 struct dc_state *context, 1482 1482 struct dc *dc) ··· 2302 2302 if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) 2303 2303 continue; 2304 2304 2305 - status = apply_single_controller_ctx_to_hw( 2305 + status = dce110_apply_single_controller_ctx_to_hw( 2306 2306 pipe_ctx, 2307 2307 context, 2308 2308 dc);
+4
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
··· 39 39 struct dc *dc, 40 40 struct dc_state *context); 41 41 42 + enum dc_status dce110_apply_single_controller_ctx_to_hw( 43 + struct pipe_ctx *pipe_ctx, 44 + struct dc_state *context, 45 + struct dc *dc); 42 46 43 47 void dce110_enable_stream(struct pipe_ctx *pipe_ctx); 44 48
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 2561 2561 tg->funcs->setup_vertical_interrupt2(tg, start_line); 2562 2562 } 2563 2563 2564 - static void dcn20_reset_back_end_for_pipe( 2564 + void dcn20_reset_back_end_for_pipe( 2565 2565 struct dc *dc, 2566 2566 struct pipe_ctx *pipe_ctx, 2567 2567 struct dc_state *context)
+4
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
··· 84 84 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); 85 85 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); 86 86 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); 87 + void dcn20_reset_back_end_for_pipe( 88 + struct dc *dc, 89 + struct pipe_ctx *pipe_ctx, 90 + struct dc_state *context); 87 91 void dcn20_init_blank( 88 92 struct dc *dc, 89 93 struct timing_generator *tg);
+34 -29
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 206 206 void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) 207 207 { 208 208 struct abm *abm = pipe_ctx->stream_res.abm; 209 - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; 209 + struct timing_generator *tg = pipe_ctx->stream_res.tg; 210 210 struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; 211 211 struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; 212 + uint32_t otg_inst; 213 + 214 + if (!abm && !tg && !panel_cntl) 215 + return; 216 + 217 + otg_inst = tg->inst; 212 218 213 219 if (dmcu) { 214 220 dce110_set_pipe(pipe_ctx); 215 221 return; 216 222 } 217 223 218 - if (abm && panel_cntl) { 219 - if (abm->funcs && abm->funcs->set_pipe_ex) { 220 - abm->funcs->set_pipe_ex(abm, 224 + if (abm->funcs && abm->funcs->set_pipe_ex) { 225 + abm->funcs->set_pipe_ex(abm, 221 226 otg_inst, 222 227 SET_ABM_PIPE_NORMAL, 223 228 panel_cntl->inst, 224 229 panel_cntl->pwrseq_inst); 225 - } else { 226 - dmub_abm_set_pipe(abm, otg_inst, 227 - SET_ABM_PIPE_NORMAL, 228 - panel_cntl->inst, 229 - panel_cntl->pwrseq_inst); 230 - } 230 + } else { 231 + dmub_abm_set_pipe(abm, otg_inst, 232 + SET_ABM_PIPE_NORMAL, 233 + panel_cntl->inst, 234 + panel_cntl->pwrseq_inst); 231 235 } 232 236 } 233 237 ··· 241 237 { 242 238 struct dc_context *dc = pipe_ctx->stream->ctx; 243 239 struct abm *abm = pipe_ctx->stream_res.abm; 240 + struct timing_generator *tg = pipe_ctx->stream_res.tg; 244 241 struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; 242 + uint32_t otg_inst; 243 + 244 + if (!abm && !tg && !panel_cntl) 245 + return false; 246 + 247 + otg_inst = tg->inst; 245 248 246 249 if (dc->dc->res_pool->dmcu) { 247 250 dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); 248 251 return true; 249 252 } 250 253 251 - if (abm != NULL) { 252 - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; 253 - 254 - if (abm && panel_cntl) { 255 - if (abm->funcs && abm->funcs->set_pipe_ex) { 256 - abm->funcs->set_pipe_ex(abm, 257 - otg_inst, 258 - SET_ABM_PIPE_NORMAL, 259 - panel_cntl->inst, 260 - panel_cntl->pwrseq_inst); 261 - } else { 262 - dmub_abm_set_pipe(abm, 263 - otg_inst, 264 - SET_ABM_PIPE_NORMAL, 265 - panel_cntl->inst, 266 - panel_cntl->pwrseq_inst); 267 - } 268 - } 254 + if (abm->funcs && abm->funcs->set_pipe_ex) { 255 + abm->funcs->set_pipe_ex(abm, 256 + otg_inst, 257 + SET_ABM_PIPE_NORMAL, 258 + panel_cntl->inst, 259 + panel_cntl->pwrseq_inst); 260 + } else { 261 + dmub_abm_set_pipe(abm, 262 + otg_inst, 263 + SET_ABM_PIPE_NORMAL, 264 + panel_cntl->inst, 265 + panel_cntl->pwrseq_inst); 269 266 } 270 267 271 - if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) 268 + if (abm->funcs && abm->funcs->set_backlight_level_pwm) 272 269 abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, 273 270 frame_ramp, 0, panel_cntl->inst); 274 271 else
+66 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 1474 1474 } 1475 1475 } 1476 1476 1477 + void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) 1478 + { 1479 + struct dce_hwseq *hws = dc->hwseq; 1480 + int i; 1481 + 1482 + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 1483 + struct pipe_ctx *pipe_ctx_old = 1484 + &dc->current_state->res_ctx.pipe_ctx[i]; 1485 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1486 + 1487 + if (!pipe_ctx_old->stream) 1488 + continue; 1489 + 1490 + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) 1491 + continue; 1492 + 1493 + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 1494 + continue; 1495 + 1496 + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || 1497 + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { 1498 + struct clock_source *old_clk = pipe_ctx_old->clock_source; 1499 + 1500 + if (hws->funcs.reset_back_end_for_pipe) 1501 + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 1502 + if (hws->funcs.enable_stream_gating) 1503 + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); 1504 + if (old_clk) 1505 + old_clk->funcs->cs_power_down(old_clk); 1506 + } 1507 + } 1508 + } 1509 + 1477 1510 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) 1478 1511 { 1479 1512 unsigned int i; 1513 + enum dc_status status = DC_OK; 1514 + struct dce_hwseq *hws = dc->hwseq; 1480 1515 1481 1516 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1482 1517 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; ··· 1532 1497 } 1533 1498 } 1534 1499 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1535 - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 1500 + struct pipe_ctx *pipe_ctx_old = 1501 + &dc->current_state->res_ctx.pipe_ctx[i]; 1502 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1536 1503 1537 - if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) { 1538 - // If old context or new context has phantom pipes, apply 1539 - // the phantom timings now. We can't change the phantom 1540 - // pipe configuration safely without driver acquiring 1541 - // the DMCUB lock first. 1542 - dc->hwss.apply_ctx_to_hw(dc, context); 1543 - break; 1504 + if (pipe_ctx->stream == NULL) 1505 + continue; 1506 + 1507 + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) 1508 + continue; 1509 + 1510 + if (pipe_ctx->stream == pipe_ctx_old->stream && 1511 + pipe_ctx->stream->link->link_state_valid) { 1512 + continue; 1544 1513 } 1514 + 1515 + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) 1516 + continue; 1517 + 1518 + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) 1519 + continue; 1520 + 1521 + if (hws->funcs.apply_single_controller_ctx_to_hw) 1522 + status = hws->funcs.apply_single_controller_ctx_to_hw( 1523 + pipe_ctx, 1524 + context, 1525 + dc); 1526 + 1527 + ASSERT(status == DC_OK); 1528 + 1529 + #ifdef CONFIG_DRM_AMD_DC_FP 1530 + if (hws->funcs.resync_fifo_dccg_dio) 1531 + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); 1532 + #endif 1545 1533 } 1546 1534 } 1547 1535
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
··· 111 111 112 112 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); 113 113 114 + void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); 115 + 114 116 void dcn32_init_blank( 115 117 struct dc *dc, 116 118 struct timing_generator *tg);
+3
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
··· 109 109 .get_dcc_en_bits = dcn10_get_dcc_en_bits, 110 110 .commit_subvp_config = dcn32_commit_subvp_config, 111 111 .enable_phantom_streams = dcn32_enable_phantom_streams, 112 + .disable_phantom_streams = dcn32_disable_phantom_streams, 112 113 .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, 113 114 .update_visual_confirm_color = dcn10_update_visual_confirm_color, 114 115 .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, ··· 160 159 .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, 161 160 .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, 162 161 .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, 162 + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, 163 + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, 163 164 }; 164 165 165 166 void dcn32_hw_sequencer_init_functions(struct dc *dc)
+1
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
··· 379 379 struct dc_cursor_attributes *cursor_attr); 380 380 void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); 381 381 void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); 382 + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); 382 383 void (*subvp_pipe_control_lock)(struct dc *dc, 383 384 struct dc_state *context, 384 385 bool lock,
+7
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
··· 165 165 void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); 166 166 void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, 167 167 struct dc_state *context); 168 + enum dc_status (*apply_single_controller_ctx_to_hw)( 169 + struct pipe_ctx *pipe_ctx, 170 + struct dc_state *context, 171 + struct dc *dc); 168 172 bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); 169 173 #endif 174 + void (*reset_back_end_for_pipe)(struct dc *dc, 175 + struct pipe_ctx *pipe_ctx, 176 + struct dc_state *context); 170 177 }; 171 178 172 179 struct dce_hwseq {
+8 -12
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 427 427 int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe); 428 428 429 429 /* 430 - * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice 431 - * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it 432 - * will have 4 pieces of slice. 433 - * return - 0 if pipe is not used for a plane with MPCC combine. otherwise 434 - * the number of MPC "cuts" for the plane. 430 + * Get the number of MPC slices associated with the pipe. 431 + * The function returns 0 if the pipe is not associated with an MPC combine 432 + * pipe topology. 435 433 */ 436 - int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head); 434 + int resource_get_mpc_slice_count(const struct pipe_ctx *pipe); 437 435 438 436 /* 439 - * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice 440 - * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it 441 - * will have 4 pieces of slice. 442 - * return - 0 if pipe is not used for ODM combine. otherwise 443 - * the number of ODM "cuts" for the timing. 437 + * Get the number of ODM slices associated with the pipe. 438 + * The function returns 0 if the pipe is not associated with an ODM combine 439 + * pipe topology. 444 440 */ 445 - int resource_get_odm_slice_count(const struct pipe_ctx *otg_master); 441 + int resource_get_odm_slice_count(const struct pipe_ctx *pipe); 446 442 447 443 /* Get the ODM slice index counting from 0 from left most slice */ 448 444 int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 999 999 vpg = dcn301_vpg_create(ctx, vpg_inst); 1000 1000 afmt = dcn301_afmt_create(ctx, afmt_inst); 1001 1001 1002 - if (!enc1 || !vpg || !afmt) { 1002 + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { 1003 1003 kfree(enc1); 1004 1004 kfree(vpg); 1005 1005 kfree(afmt);
+15 -1
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 1829 1829 dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); 1830 1830 DC_FP_END(); 1831 1831 pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; 1832 - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; 1832 + if (dc->config.enable_windowed_mpo_odm && 1833 + dc->debug.enable_single_display_2to1_odm_policy) { 1834 + switch (resource_get_odm_slice_count(pipe)) { 1835 + case 2: 1836 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; 1837 + break; 1838 + case 4: 1839 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; 1840 + break; 1841 + default: 1842 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; 1843 + } 1844 + } else { 1845 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; 1846 + } 1833 1847 pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet 1834 1848 pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; 1835 1849 pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
+3 -2
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 780 780 .disable_z10 = false, 781 781 .ignore_pg = true, 782 782 .psp_disabled_wa = true, 783 - .ips2_eval_delay_us = 200, 784 - .ips2_entry_delay_us = 400, 783 + .ips2_eval_delay_us = 1650, 784 + .ips2_entry_delay_us = 800, 785 785 .static_screen_wait_frames = 2, 786 786 }; 787 787 ··· 2130 2130 dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; 2131 2131 dc->dml2_options.use_native_pstate_optimization = true; 2132 2132 dc->dml2_options.use_native_soc_bb_construction = true; 2133 + dc->dml2_options.minimize_dispclk_using_odm = false; 2133 2134 if (dc->config.EnableMinDispClkODM) 2134 2135 dc->dml2_options.minimize_dispclk_using_odm = true; 2135 2136 dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
+1 -1
drivers/gpu/drm/i915/Kconfig
··· 140 140 141 141 Note that this driver only supports newer device from Broadwell on. 142 142 For further information and setup guide, you can visit: 143 - http://01.org/igvt-g. 143 + https://github.com/intel/gvt-linux/wiki. 144 144 145 145 If in doubt, say "N". 146 146
+1 -2
drivers/gpu/drm/i915/gvt/handlers.c
··· 2849 2849 for (i = start; i < end; i += 4) { 2850 2850 p = intel_gvt_find_mmio_info(gvt, i); 2851 2851 if (p) { 2852 - WARN(1, "dup mmio definition offset %x\n", 2853 - info->offset); 2852 + WARN(1, "dup mmio definition offset %x\n", i); 2854 2853 2855 2854 /* We return -EEXIST here to make GVT-g load fail. 2856 2855 * So duplicated MMIO can be found as soon as
+1 -1
drivers/gpu/drm/i915/intel_gvt.c
··· 41 41 * To virtualize GPU resources GVT-g driver depends on hypervisor technology 42 42 * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability 43 43 * and be virtualized within GVT-g device module. More architectural design 44 - * doc is available on https://01.org/group/2230/documentation-list. 44 + * doc is available on https://github.com/intel/gvt-linux/wiki. 45 45 */ 46 46 47 47 static LIST_HEAD(intel_gvt_devices);
+2 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 144 144 * to track crtc in the disable() hook which is called 145 145 * _after_ encoder_mask is cleared. 146 146 * @connector: If a mode is set, cached pointer to the active connector 147 - * @crtc_kickoff_cb: Callback into CRTC that will flush & start 148 - * all CTL paths 149 - * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb 150 - * @debugfs_root: Debug file system root file node 151 147 * @enc_lock: Lock around physical encoder 152 148 * create/destroy/enable/disable 153 149 * @frame_busy_mask: Bitmask tracking which phys_enc we are still ··· 2068 2072 } 2069 2073 2070 2074 /* reset the merge 3D HW block */ 2071 - if (phys_enc->hw_pp->merge_3d) { 2075 + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { 2072 2076 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2073 2077 BLEND_3D_NONE); 2074 2078 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) ··· 2099 2103 if (phys_enc->hw_wb) 2100 2104 intf_cfg.wb = phys_enc->hw_wb->idx; 2101 2105 2102 - if (phys_enc->hw_pp->merge_3d) 2106 + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) 2103 2107 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2104 2108 2105 2109 if (ctl->ops.reset_intf_cfg)
+2 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 29 29 /** 30 30 * struct dpu_rm_requirements - Reservation requirements parameter bundle 31 31 * @topology: selected topology for the display 32 - * @hw_res: Hardware resources required as reported by the encoders 33 32 */ 34 33 struct dpu_rm_requirements { 35 34 struct msm_display_topology topology; ··· 203 204 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary 204 205 * @rm: dpu resource manager handle 205 206 * @primary_idx: index of primary mixer in rm->mixer_blks[] 207 + * 208 + * Returns: lm peer mixed id on success or %-EINVAL on error 206 209 */ 207 210 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) 208 211 {
-5
drivers/gpu/drm/msm/dp/dp_ctrl.c
··· 135 135 tbd = dp_link_get_test_bits_depth(ctrl->link, 136 136 ctrl->panel->dp_mode.bpp); 137 137 138 - if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { 139 - pr_debug("BIT_DEPTH not set. Configure default\n"); 140 - tbd = DP_TEST_BIT_DEPTH_8; 141 - } 142 - 143 138 config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; 144 139 145 140 /* Num of Lanes */
+14 -8
drivers/gpu/drm/msm/dp/dp_link.c
··· 7 7 8 8 #include <drm/drm_print.h> 9 9 10 + #include "dp_reg.h" 10 11 #include "dp_link.h" 11 12 #include "dp_panel.h" 12 13 ··· 1083 1082 1084 1083 int dp_link_get_colorimetry_config(struct dp_link *dp_link) 1085 1084 { 1086 - u32 cc; 1085 + u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; 1087 1086 struct dp_link_private *link; 1088 1087 1089 1088 if (!dp_link) { ··· 1097 1096 * Unless a video pattern CTS test is ongoing, use RGB_VESA 1098 1097 * Only RGB_VESA and RGB_CEA supported for now 1099 1098 */ 1100 - if (dp_link_is_video_pattern_requested(link)) 1101 - cc = link->dp_link.test_video.test_dyn_range; 1102 - else 1103 - cc = DP_TEST_DYNAMIC_RANGE_VESA; 1099 + if (dp_link_is_video_pattern_requested(link)) { 1100 + if (link->dp_link.test_video.test_dyn_range & 1101 + DP_TEST_DYNAMIC_RANGE_CEA) 1102 + cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; 1103 + } 1104 1104 1105 1105 return cc; 1106 1106 } ··· 1181 1179 u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) 1182 1180 { 1183 1181 u32 tbd; 1182 + struct dp_link_private *link; 1183 + 1184 + link = container_of(dp_link, struct dp_link_private, dp_link); 1184 1185 1185 1186 /* 1186 1187 * Few simplistic rules and assumptions made here: ··· 1201 1196 tbd = DP_TEST_BIT_DEPTH_10; 1202 1197 break; 1203 1198 default: 1204 - tbd = DP_TEST_BIT_DEPTH_UNKNOWN; 1199 + drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n", 1200 + bpp); 1201 + tbd = DP_TEST_BIT_DEPTH_8; 1205 1202 break; 1206 1203 } 1207 1204 1208 - if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) 1209 - tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); 1205 + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); 1210 1206 1211 1207 return tbd; 1212 1208 }
+3
drivers/gpu/drm/msm/dp/dp_reg.h
··· 143 143 #define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) 144 144 #define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) 145 145 146 + #define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0) 147 + #define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04) 148 + 146 149 #define REG_DP_VALID_BOUNDARY (0x00000030) 147 150 #define REG_DP_VALID_BOUNDARY_2 (0x00000034) 148 151
+1
drivers/gpu/drm/msm/msm_mdss.c
··· 562 562 .ubwc_enc_version = UBWC_2_0, 563 563 .ubwc_dec_version = UBWC_2_0, 564 564 .highest_bank_bit = 1, 565 + .reg_bus_bw = 76800, 565 566 }; 566 567 567 568 static const struct msm_mdss_data sdm845_data = {
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
··· 9 9 #define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT) 10 10 11 11 struct nvkm_gsp_mem { 12 - u32 size; 12 + size_t size; 13 13 void *data; 14 14 dma_addr_t addr; 15 15 };
+47 -30
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
··· 997 997 return 0; 998 998 } 999 999 1000 + static void 1001 + nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1002 + { 1003 + if (mem->data) { 1004 + /* 1005 + * Poison the buffer to catch any unexpected access from 1006 + * GSP-RM if the buffer was prematurely freed. 1007 + */ 1008 + memset(mem->data, 0xFF, mem->size); 1009 + 1010 + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1011 + memset(mem, 0, sizeof(*mem)); 1012 + } 1013 + } 1014 + 1015 + static int 1016 + nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) 1017 + { 1018 + mem->size = size; 1019 + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1020 + if (WARN_ON(!mem->data)) 1021 + return -ENOMEM; 1022 + 1023 + return 0; 1024 + } 1025 + 1000 1026 static int 1001 1027 r535_gsp_postinit(struct nvkm_gsp *gsp) 1002 1028 { ··· 1050 1024 1051 1025 nvkm_inth_allow(&gsp->subdev.inth); 1052 1026 nvkm_wr32(device, 0x110004, 0x00000040); 1027 + 1028 + /* Release the DMA buffers that were needed only for boot and init */ 1029 + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); 1030 + nvkm_gsp_mem_dtor(gsp, &gsp->libos); 1031 + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); 1032 + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); 1033 + 1053 1034 return ret; 1054 1035 } 1055 1036 ··· 1565 1532 return 0; 1566 1533 } 1567 1534 1568 - static void 1569 - nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1570 - { 1571 - if (mem->data) { 1572 - dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1573 - mem->data = NULL; 1574 - } 1575 - } 1576 - 1577 - static int 1578 - nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) 1579 - { 1580 - mem->size = size; 1581 - mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1582 - if (WARN_ON(!mem->data)) 1583 - return -ENOMEM; 1584 - 1585 - return 0; 1586 - } 1587 - 1588 - 1589 1535 static int 1590 1536 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1591 1537 { ··· 1950 1938 * See kgspCreateRadix3_IMPL 1951 1939 */ 1952 1940 static int 1953 - nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, 1941 + nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, 1954 1942 struct nvkm_gsp_radix3 *rx3) 1955 1943 { 1956 1944 u64 addr; 1957 1945 1958 1946 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { 1959 1947 u64 *ptes; 1960 - int idx; 1948 + size_t bufsize; 1949 + int ret, idx; 1961 1950 1962 - rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1963 - rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, 1964 - &rx3->mem[i].addr, GFP_KERNEL); 1965 - if (WARN_ON(!rx3->mem[i].data)) 1966 - return -ENOMEM; 1951 + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1952 + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); 1953 + if (ret) 1954 + return ret; 1967 1955 1968 1956 ptes = rx3->mem[i].data; 1969 1957 if (i == 2) { ··· 2003 1991 if (ret) 2004 1992 return ret; 2005 1993 2006 - ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); 1994 + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); 2007 1995 if (ret) 2008 1996 return ret; 2009 1997 ··· 2162 2150 mutex_destroy(&gsp->cmdq.mutex); 2163 2151 2164 2152 r535_gsp_dtor_fws(gsp); 2153 + 2154 + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); 2155 + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); 2156 + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); 2157 + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); 2165 2158 } 2166 2159 2167 2160 int ··· 2211 2194 memcpy(gsp->sig.data, data, size); 2212 2195 2213 2196 /* Build radix3 page table for ELF image. */ 2214 - ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2197 + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2215 2198 if (ret) 2216 2199 return ret; 2217 2200
+9 -6
drivers/gpu/drm/scheduler/sched_main.c
··· 1178 1178 struct drm_sched_entity *entity; 1179 1179 struct dma_fence *fence; 1180 1180 struct drm_sched_fence *s_fence; 1181 - struct drm_sched_job *sched_job = NULL; 1181 + struct drm_sched_job *sched_job; 1182 1182 int r; 1183 1183 1184 1184 if (READ_ONCE(sched->pause_submit)) 1185 1185 return; 1186 1186 1187 1187 /* Find entity with a ready job */ 1188 - while (!sched_job && (entity = drm_sched_select_entity(sched))) { 1189 - sched_job = drm_sched_entity_pop_job(entity); 1190 - if (!sched_job) 1191 - complete_all(&entity->entity_idle); 1192 - } 1188 + entity = drm_sched_select_entity(sched); 1193 1189 if (!entity) 1194 1190 return; /* No more work */ 1191 + 1192 + sched_job = drm_sched_entity_pop_job(entity); 1193 + if (!sched_job) { 1194 + complete_all(&entity->entity_idle); 1195 + drm_sched_run_job_queue(sched); 1196 + return; 1197 + } 1195 1198 1196 1199 s_fence = sched_job->s_fence; 1197 1200
-6
drivers/gpu/drm/xe/xe_display.c
··· 134 134 135 135 int xe_display_init_nommio(struct xe_device *xe) 136 136 { 137 - int err; 138 - 139 137 if (!xe->info.enable_display) 140 138 return 0; 141 139 ··· 142 144 143 145 /* This must be called before any calls to HAS_PCH_* */ 144 146 intel_detect_pch(xe); 145 - 146 - err = intel_power_domains_init(xe); 147 - if (err) 148 - return err; 149 147 150 148 return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); 151 149 }
+6 -2
drivers/gpu/drm/xe/xe_exec_queue.c
··· 926 926 * @q: The exec queue 927 927 * @vm: The VM the engine does a bind or exec for 928 928 * 929 - * Get last fence, does not take a ref 929 + * Get last fence, takes a ref 930 930 * 931 931 * Returns: last fence if not signaled, dma fence stub if signaled 932 932 */ 933 933 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, 934 934 struct xe_vm *vm) 935 935 { 936 + struct dma_fence *fence; 937 + 936 938 xe_exec_queue_last_fence_lockdep_assert(q, vm); 937 939 938 940 if (q->last_fence && 939 941 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 940 942 xe_exec_queue_last_fence_put(q, vm); 941 943 942 - return q->last_fence ? q->last_fence : dma_fence_get_stub(); 944 + fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 945 + dma_fence_get(fence); 946 + return fence; 943 947 } 944 948 945 949 /**
+4 -1
drivers/gpu/drm/xe/xe_gt.c
··· 437 437 * USM has its only SA pool to non-block behind user operations 438 438 */ 439 439 if (gt_to_xe(gt)->info.has_usm) { 440 - gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); 440 + struct xe_device *xe = gt_to_xe(gt); 441 + 442 + gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), 443 + IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); 441 444 if (IS_ERR(gt->usm.bb_pool)) { 442 445 err = PTR_ERR(gt->usm.bb_pool); 443 446 goto err_force_wake;
+1 -1
drivers/gpu/drm/xe/xe_gt_pagefault.c
··· 335 335 return -EPROTO; 336 336 337 337 asid = FIELD_GET(PFD_ASID, msg[1]); 338 - pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE]; 338 + pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); 339 339 340 340 spin_lock_irqsave(&pf_queue->lock, flags); 341 341 full = pf_queue_full(pf_queue);
+22 -6
drivers/gpu/drm/xe/xe_migrate.c
··· 170 170 if (!IS_DGFX(xe)) { 171 171 /* Write out batch too */ 172 172 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; 173 - if (xe->info.has_usm) { 174 - batch = tile->primary_gt->usm.bb_pool->bo; 175 - m->usm_batch_base_ofs = m->batch_base_ofs; 176 - } 177 - 178 173 for (i = 0; i < batch->size; 179 174 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 180 175 XE_PAGE_SIZE) { ··· 179 184 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 180 185 entry); 181 186 level++; 187 + } 188 + if (xe->info.has_usm) { 189 + xe_tile_assert(tile, batch->size == SZ_1M); 190 + 191 + batch = tile->primary_gt->usm.bb_pool->bo; 192 + m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; 193 + xe_tile_assert(tile, batch->size == SZ_512K); 194 + 195 + for (i = 0; i < batch->size; 196 + i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 197 + XE_PAGE_SIZE) { 198 + entry = vm->pt_ops->pte_encode_bo(batch, i, 199 + pat_index, 0); 200 + 201 + xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 202 + entry); 203 + level++; 204 + } 182 205 } 183 206 } else { 184 207 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); ··· 1217 1204 } 1218 1205 if (q) { 1219 1206 fence = xe_exec_queue_last_fence_get(q, vm); 1220 - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1207 + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1208 + dma_fence_put(fence); 1221 1209 return false; 1210 + } 1211 + dma_fence_put(fence); 1222 1212 } 1223 1213 1224 1214 return true;
-1
drivers/gpu/drm/xe/xe_sched_job.c
··· 274 274 struct dma_fence *fence; 275 275 276 276 fence = xe_exec_queue_last_fence_get(job->q, vm); 277 - dma_fence_get(fence); 278 277 279 278 return drm_sched_job_add_dependency(&job->drm, fence); 280 279 }
-2
drivers/gpu/drm/xe/xe_sync.c
··· 307 307 /* Easy case... */ 308 308 if (!num_in_fence) { 309 309 fence = xe_exec_queue_last_fence_get(q, vm); 310 - dma_fence_get(fence); 311 310 return fence; 312 311 } 313 312 ··· 321 322 } 322 323 } 323 324 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); 324 - dma_fence_get(fences[current_fence - 1]); 325 325 cf = dma_fence_array_create(num_in_fence, fences, 326 326 vm->composite_fence_ctx, 327 327 vm->composite_fence_seqno++,
+24 -38
drivers/gpu/drm/xe/xe_vm.c
··· 37 37 #include "generated/xe_wa_oob.h" 38 38 #include "xe_wa.h" 39 39 40 - #define TEST_VM_ASYNC_OPS_ERROR 41 - 42 40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) 43 41 { 44 42 return vm->gpuvm.r_obj; ··· 112 114 num_pages - pinned, 113 115 read_only ? 0 : FOLL_WRITE, 114 116 &pages[pinned]); 115 - if (ret < 0) { 116 - if (in_kthread) 117 - ret = 0; 117 + if (ret < 0) 118 118 break; 119 - } 120 119 121 120 pinned += ret; 122 121 ret = 0; ··· 1979 1984 xe_exec_queue_last_fence_get(wait_exec_queue, vm); 1980 1985 1981 1986 xe_sync_entry_signal(&syncs[i], NULL, fence); 1987 + dma_fence_put(fence); 1982 1988 } 1983 1989 } 1984 1990 ··· 2060 2064 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; 2061 2065 struct drm_gpuva_ops *ops; 2062 2066 struct drm_gpuva_op *__op; 2063 - struct xe_vma_op *op; 2064 2067 struct drm_gpuvm_bo *vm_bo; 2065 2068 int err; 2066 2069 ··· 2105 2110 } 2106 2111 if (IS_ERR(ops)) 2107 2112 return ops; 2108 - 2109 - #ifdef TEST_VM_ASYNC_OPS_ERROR 2110 - if (operation & FORCE_ASYNC_OP_ERROR) { 2111 - op = list_first_entry_or_null(&ops->list, struct xe_vma_op, 2112 - base.entry); 2113 - if (op) 2114 - op->inject_error = true; 2115 - } 2116 - #endif 2117 2113 2118 2114 drm_gpuva_for_each_op(__op, ops) { 2119 2115 struct xe_vma_op *op = gpuva_op_to_vma_op(__op); ··· 2185 2199 return SZ_1G; 2186 2200 else if (vma->gpuva.flags & XE_VMA_PTE_2M) 2187 2201 return SZ_2M; 2202 + else if (vma->gpuva.flags & XE_VMA_PTE_4K) 2203 + return SZ_4K; 2188 2204 2189 - return SZ_4K; 2205 + return SZ_1G; /* Uninitialized, used max size */ 2190 2206 } 2191 2207 2192 2208 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) ··· 2518 2530 } 2519 2531 drm_exec_fini(&exec); 2520 2532 2521 - if (err == -EAGAIN && xe_vma_is_userptr(vma)) { 2533 + if (err == -EAGAIN) { 2522 2534 lockdep_assert_held_write(&vm->lock); 2523 - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); 2524 - if (!err) 2525 - goto retry_userptr; 2526 2535 2527 - trace_xe_vma_fail(vma); 2536 + if (op->base.op == DRM_GPUVA_OP_REMAP) { 2537 + if (!op->remap.unmap_done) 2538 + vma = gpuva_to_vma(op->base.remap.unmap->va); 2539 + else if (op->remap.prev) 2540 + vma = op->remap.prev; 2541 + else 2542 + vma = op->remap.next; 2543 + } 2544 + 2545 + if (xe_vma_is_userptr(vma)) { 2546 + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); 2547 + if (!err) 2548 + goto retry_userptr; 2549 + 2550 + trace_xe_vma_fail(vma); 2551 + } 2528 2552 } 2529 2553 2530 2554 return err; ··· 2547 2547 int ret = 0; 2548 2548 2549 2549 lockdep_assert_held_write(&vm->lock); 2550 - 2551 - #ifdef TEST_VM_ASYNC_OPS_ERROR 2552 - if (op->inject_error) { 2553 - op->inject_error = false; 2554 - return -ENOMEM; 2555 - } 2556 - #endif 2557 2550 2558 2551 switch (op->base.op) { 2559 2552 case DRM_GPUVA_OP_MAP: ··· 2662 2669 { 2663 2670 int i; 2664 2671 2665 - for (i = num_ops_list - 1; i; ++i) { 2672 + for (i = num_ops_list - 1; i >= 0; --i) { 2666 2673 struct drm_gpuva_ops *__ops = ops[i]; 2667 2674 struct drm_gpuva_op *__op; 2668 2675 ··· 2707 2714 return 0; 2708 2715 } 2709 2716 2710 - #ifdef TEST_VM_ASYNC_OPS_ERROR 2711 - #define SUPPORTED_FLAGS \ 2712 - (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \ 2713 - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff) 2714 - #else 2715 2717 #define SUPPORTED_FLAGS \ 2716 2718 (DRM_XE_VM_BIND_FLAG_READONLY | \ 2717 - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \ 2718 - 0xffff) 2719 - #endif 2719 + DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL) 2720 2720 #define XE_64K_PAGE_MASK 0xffffull 2721 2721 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) 2722 2722
-8
drivers/gpu/drm/xe/xe_vm_types.h
··· 21 21 struct xe_sync_entry; 22 22 struct xe_vm; 23 23 24 - #define TEST_VM_ASYNC_OPS_ERROR 25 - #define FORCE_ASYNC_OP_ERROR BIT(31) 26 - 27 24 #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS 28 25 #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) 29 26 #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) ··· 356 359 struct list_head link; 357 360 /** @flags: operation flags */ 358 361 enum xe_vma_op_flags flags; 359 - 360 - #ifdef TEST_VM_ASYNC_OPS_ERROR 361 - /** @inject_error: inject error to test async op error handling */ 362 - bool inject_error; 363 - #endif 364 362 365 363 union { 366 364 /** @map: VMA map operation specific data */
+10 -3
drivers/hid/hid-logitech-hidpp.c
··· 203 203 struct hidpp_scroll_counter vertical_wheel_counter; 204 204 205 205 u8 wireless_feature_index; 206 + 207 + bool connected_once; 206 208 }; 207 209 208 210 /* HID++ 1.0 error codes */ ··· 990 988 hidpp->protocol_minor = response.rap.params[1]; 991 989 992 990 print_version: 993 - hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", 994 - hidpp->protocol_major, hidpp->protocol_minor); 991 + if (!hidpp->connected_once) { 992 + hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", 993 + hidpp->protocol_major, hidpp->protocol_minor); 994 + hidpp->connected_once = true; 995 + } else 996 + hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", 997 + hidpp->protocol_major, hidpp->protocol_minor); 995 998 return 0; 996 999 } 997 1000 ··· 4191 4184 /* Get device version to check if it is connected */ 4192 4185 ret = hidpp_root_get_protocol_version(hidpp); 4193 4186 if (ret) { 4194 - hid_info(hidpp->hid_dev, "Disconnected\n"); 4187 + hid_dbg(hidpp->hid_dev, "Disconnected\n"); 4195 4188 if (hidpp->battery.ps) { 4196 4189 hidpp->battery.online = false; 4197 4190 hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
+4
drivers/hid/hid-multitouch.c
··· 2153 2153 2154 2154 { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, 2155 2155 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 2156 + USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, 2157 + 2158 + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, 2159 + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 2156 2160 USB_VENDOR_ID_SYNAPTICS, 0xce08) }, 2157 2161 2158 2162 { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+2
drivers/hid/intel-ish-hid/ishtp/bus.c
··· 722 722 spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags); 723 723 list_for_each_entry(cl, &ishtp_dev->cl_list, link) { 724 724 cl->state = ISHTP_CL_DISCONNECTED; 725 + if (warm_reset && cl->device->reference_count) 726 + continue; 725 727 726 728 /* 727 729 * Wake any pending process. The waiter would check dev->state
+3 -1
drivers/hid/intel-ish-hid/ishtp/client.c
··· 49 49 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) 50 50 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { 51 51 list_del(&rb->list); 52 - ishtp_io_rb_free(rb); 52 + spin_lock(&cl->free_list_spinlock); 53 + list_add_tail(&rb->list, &cl->free_rb_list.list); 54 + spin_unlock(&cl->free_list_spinlock); 53 55 } 54 56 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); 55 57 }
+43 -20
drivers/hid/wacom_sys.c
··· 2087 2087 return 0; 2088 2088 } 2089 2089 2090 - static int wacom_register_inputs(struct wacom *wacom) 2090 + static int wacom_setup_inputs(struct wacom *wacom) 2091 2091 { 2092 2092 struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; 2093 2093 struct wacom_wac *wacom_wac = &(wacom->wacom_wac); ··· 2106 2106 input_free_device(pen_input_dev); 2107 2107 wacom_wac->pen_input = NULL; 2108 2108 pen_input_dev = NULL; 2109 - } else { 2110 - error = input_register_device(pen_input_dev); 2111 - if (error) 2112 - goto fail; 2113 2109 } 2114 2110 2115 2111 error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); ··· 2114 2118 input_free_device(touch_input_dev); 2115 2119 wacom_wac->touch_input = NULL; 2116 2120 touch_input_dev = NULL; 2117 - } else { 2118 - error = input_register_device(touch_input_dev); 2119 - if (error) 2120 - goto fail; 2121 2121 } 2122 2122 2123 2123 error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); ··· 2122 2130 input_free_device(pad_input_dev); 2123 2131 wacom_wac->pad_input = NULL; 2124 2132 pad_input_dev = NULL; 2125 - } else { 2133 + } 2134 + 2135 + return 0; 2136 + } 2137 + 2138 + static int wacom_register_inputs(struct wacom *wacom) 2139 + { 2140 + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; 2141 + struct wacom_wac *wacom_wac = &(wacom->wacom_wac); 2142 + int error = 0; 2143 + 2144 + pen_input_dev = wacom_wac->pen_input; 2145 + touch_input_dev = wacom_wac->touch_input; 2146 + pad_input_dev = wacom_wac->pad_input; 2147 + 2148 + if (pen_input_dev) { 2149 + error = input_register_device(pen_input_dev); 2150 + if (error) 2151 + goto fail; 2152 + } 2153 + 2154 + if (touch_input_dev) { 2155 + error = input_register_device(touch_input_dev); 2156 + if (error) 2157 + goto fail; 2158 + } 2159 + 2160 + if (pad_input_dev) { 2126 2161 error = input_register_device(pad_input_dev); 2127 2162 if (error) 2128 2163 goto fail; ··· 2402 2383 if (error) 2403 2384 goto fail; 2404 2385 2386 + error = wacom_setup_inputs(wacom); 2387 + if (error) 2388 + goto fail; 2389 + 2390 + if (features->type == HID_GENERIC) 2391 + connect_mask |= HID_CONNECT_DRIVER; 2392 + 2393 + /* Regular HID work starts now */ 2394 + error = hid_hw_start(hdev, connect_mask); 2395 + if (error) { 2396 + hid_err(hdev, "hw start failed\n"); 2397 + goto fail; 2398 + } 2399 + 2405 2400 error = wacom_register_inputs(wacom); 2406 2401 if (error) 2407 2402 goto fail; ··· 2428 2395 error = wacom_initialize_remotes(wacom); 2429 2396 if (error) 2430 2397 goto fail; 2431 - } 2432 - 2433 - if (features->type == HID_GENERIC) 2434 - connect_mask |= HID_CONNECT_DRIVER; 2435 - 2436 - /* Regular HID work starts now */ 2437 - error = hid_hw_start(hdev, connect_mask); 2438 - if (error) { 2439 - hid_err(hdev, "hw start failed\n"); 2440 - goto fail; 2441 2398 } 2442 2399 2443 2400 if (!wireless) {
+8 -1
drivers/hid/wacom_wac.c
··· 2575 2575 wacom_wac->hid_data.tipswitch); 2576 2576 input_report_key(input, wacom_wac->tool[0], sense); 2577 2577 if (wacom_wac->serial[0]) { 2578 - input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); 2578 + /* 2579 + * xf86-input-wacom does not accept a serial number 2580 + * of '0'. Report the low 32 bits if possible, but 2581 + * if they are zero, report the upper ones instead. 2582 + */ 2583 + __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu; 2584 + __u32 serial_hi = wacom_wac->serial[0] >> 32; 2585 + input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi)); 2579 2586 input_report_abs(input, ABS_MISC, sense ? id : 0); 2580 2587 } 2581 2588
+7
drivers/hwmon/aspeed-pwm-tacho.c
··· 195 195 u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS]; 196 196 struct aspeed_cooling_device *cdev[8]; 197 197 const struct attribute_group *groups[3]; 198 + /* protects access to shared ASPEED_PTCR_RESULT */ 199 + struct mutex tach_lock; 198 200 }; 199 201 200 202 enum type { TYPEM, TYPEN, TYPEO }; ··· 531 529 u8 fan_tach_ch_source, type, mode, both; 532 530 int ret; 533 531 532 + mutex_lock(&priv->tach_lock); 533 + 534 534 regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0); 535 535 regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch); 536 536 ··· 549 545 (val & RESULT_STATUS_MASK), 550 546 ASPEED_RPM_STATUS_SLEEP_USEC, 551 547 usec); 548 + 549 + mutex_unlock(&priv->tach_lock); 552 550 553 551 /* return -ETIMEDOUT if we didn't get an answer. */ 554 552 if (ret) ··· 921 915 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 922 916 if (!priv) 923 917 return -ENOMEM; 918 + mutex_init(&priv->tach_lock); 924 919 priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs, 925 920 &aspeed_pwm_tacho_regmap_config); 926 921 if (IS_ERR(priv->regmap))
+22 -20
drivers/hwmon/coretemp.c
··· 41 41 42 42 #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ 43 43 #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 44 - #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ 44 + #define NUM_REAL_CORES 512 /* Number of Real cores per cpu */ 45 45 #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ 46 46 #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 47 47 #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) ··· 419 419 } 420 420 421 421 static int create_core_attrs(struct temp_data *tdata, struct device *dev, 422 - int attr_no) 422 + int index) 423 423 { 424 424 int i; 425 425 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, ··· 431 431 }; 432 432 433 433 for (i = 0; i < tdata->attr_size; i++) { 434 + /* 435 + * We map the attr number to core id of the CPU 436 + * The attr number is always core id + 2 437 + * The Pkgtemp will always show up as temp1_*, if available 438 + */ 439 + int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2; 440 + 434 441 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, 435 442 "temp%d_%s", attr_no, suffixes[i]); 436 443 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 437 444 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 438 445 tdata->sd_attrs[i].dev_attr.attr.mode = 0444; 439 446 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 440 - tdata->sd_attrs[i].index = attr_no; 447 + tdata->sd_attrs[i].index = index; 441 448 tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr; 442 449 } 443 450 tdata->attr_group.attrs = tdata->attrs; ··· 502 495 struct platform_data *pdata = platform_get_drvdata(pdev); 503 496 struct cpuinfo_x86 *c = &cpu_data(cpu); 504 497 u32 eax, edx; 505 - int err, index, attr_no; 498 + int err, index; 506 499 507 500 if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) 508 501 return 0; 509 502 510 503 /* 511 - * Find attr number for sysfs: 512 - * We map the attr number to core id of the CPU 513 - * The attr number is always core id + 2 514 - * The Pkgtemp will always show up as temp1_*, if available 504 + * Get the index of tdata in pdata->core_data[] 505 + * tdata for package: pdata->core_data[1] 506 + * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1] 515 507 */ 516 508 if (pkg_flag) { 517 - attr_no = PKG_SYSFS_ATTR_NO; 509 + index = PKG_SYSFS_ATTR_NO; 518 510 } else { 519 - index = ida_alloc(&pdata->ida, GFP_KERNEL); 511 + index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL); 520 512 if (index < 0) 521 513 return index; 522 - pdata->cpu_map[index] = topology_core_id(cpu); 523 - attr_no = index + BASE_SYSFS_ATTR_NO; 524 - } 525 514 526 - if (attr_no > MAX_CORE_DATA - 1) { 527 - err = -ERANGE; 528 - goto ida_free; 515 + pdata->cpu_map[index] = topology_core_id(cpu); 516 + index += BASE_SYSFS_ATTR_NO; 529 517 } 530 518 531 519 tdata = init_temp_data(cpu, pkg_flag); ··· 546 544 if (get_ttarget(tdata, &pdev->dev) >= 0) 547 545 tdata->attr_size++; 548 546 549 - pdata->core_data[attr_no] = tdata; 547 + pdata->core_data[index] = tdata; 550 548 551 549 /* Create sysfs interfaces */ 552 - err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no); 550 + err = create_core_attrs(tdata, pdata->hwmon_dev, index); 553 551 if (err) 554 552 goto exit_free; 555 553 556 554 return 0; 557 555 exit_free: 558 - pdata->core_data[attr_no] = NULL; 556 + pdata->core_data[index] = NULL; 559 557 kfree(tdata); 560 558 ida_free: 561 559 if (!pkg_flag) 562 - ida_free(&pdata->ida, index); 560 + ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO); 563 561 return err; 564 562 } 565 563
+5 -1
drivers/mmc/core/slot-gpio.c
··· 75 75 int mmc_gpio_get_ro(struct mmc_host *host) 76 76 { 77 77 struct mmc_gpio *ctx = host->slot.handler_priv; 78 + int cansleep; 78 79 79 80 if (!ctx || !ctx->ro_gpio) 80 81 return -ENOSYS; 81 82 82 - return gpiod_get_value_cansleep(ctx->ro_gpio); 83 + cansleep = gpiod_cansleep(ctx->ro_gpio); 84 + return cansleep ? 85 + gpiod_get_value_cansleep(ctx->ro_gpio) : 86 + gpiod_get_value(ctx->ro_gpio); 83 87 } 84 88 EXPORT_SYMBOL(mmc_gpio_get_ro); 85 89
+30
drivers/mmc/host/sdhci-pci-o2micro.c
··· 693 693 return 0; 694 694 } 695 695 696 + static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) 697 + { 698 + struct sdhci_pci_chip *chip; 699 + struct sdhci_pci_slot *slot = sdhci_priv(host); 700 + u32 scratch_32 = 0; 701 + u8 scratch_8 = 0; 702 + 703 + chip = slot->chip; 704 + 705 + if (mode == MMC_POWER_OFF) { 706 + /* UnLock WP */ 707 + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); 708 + scratch_8 &= 0x7f; 709 + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); 710 + 711 + /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */ 712 + pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32); 713 + scratch_32 &= ~(O2_SD_SEL_DLL); 714 + pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32); 715 + 716 + /* Lock WP */ 717 + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); 718 + scratch_8 |= 0x80; 719 + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); 720 + } 721 + 722 + sdhci_set_power(host, mode, vdd); 723 + } 724 + 696 725 static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) 697 726 { 698 727 struct sdhci_pci_chip *chip; ··· 1080 1051 .set_bus_width = sdhci_set_bus_width, 1081 1052 .reset = sdhci_reset, 1082 1053 .set_uhs_signaling = sdhci_set_uhs_signaling, 1054 + .set_power = sdhci_pci_o2_set_power, 1083 1055 }; 1084 1056 1085 1057 const struct sdhci_pci_fixes sdhci_o2 = {
+1
drivers/net/arcnet/arc-rawmode.c
··· 186 186 module_init(arcnet_raw_init); 187 187 module_exit(arcnet_raw_exit); 188 188 189 + MODULE_DESCRIPTION("ARCnet raw mode packet interface module"); 189 190 MODULE_LICENSE("GPL");
+1
drivers/net/arcnet/arc-rimi.c
··· 312 312 module_param(io, int, 0); 313 313 module_param(irq, int, 0); 314 314 module_param_string(device, device, sizeof(device), 0); 315 + MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver"); 315 316 MODULE_LICENSE("GPL"); 316 317 317 318 static struct net_device *my_dev;
+1
drivers/net/arcnet/capmode.c
··· 265 265 module_init(capmode_module_init); 266 266 module_exit(capmode_module_exit); 267 267 268 + MODULE_DESCRIPTION("ARCnet CAP mode packet interface module"); 268 269 MODULE_LICENSE("GPL");
+1
drivers/net/arcnet/com20020-pci.c
··· 61 61 module_param(backplane, int, 0); 62 62 module_param(clockp, int, 0); 63 63 module_param(clockm, int, 0); 64 + MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver"); 64 65 MODULE_LICENSE("GPL"); 65 66 66 67 static void led_tx_set(struct led_classdev *led_cdev,
+1
drivers/net/arcnet/com20020.c
··· 399 399 EXPORT_SYMBOL(com20020_netdev_ops); 400 400 #endif 401 401 402 + MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver"); 402 403 MODULE_LICENSE("GPL"); 403 404 404 405 #ifdef MODULE
+1
drivers/net/arcnet/com20020_cs.c
··· 97 97 module_param(clockp, int, 0); 98 98 module_param(clockm, int, 0); 99 99 100 + MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver"); 100 101 MODULE_LICENSE("GPL"); 101 102 102 103 /*====================================================================*/
+1
drivers/net/arcnet/com90io.c
··· 350 350 module_param_hw(io, int, ioport, 0); 351 351 module_param_hw(irq, int, irq, 0); 352 352 module_param_string(device, device, sizeof(device), 0); 353 + MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver"); 353 354 MODULE_LICENSE("GPL"); 354 355 355 356 #ifndef MODULE
+1
drivers/net/arcnet/com90xx.c
··· 645 645 TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); 646 646 } 647 647 648 + MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver"); 648 649 MODULE_LICENSE("GPL"); 649 650 650 651 static int __init com90xx_init(void)
+1
drivers/net/arcnet/rfc1051.c
··· 78 78 module_init(arcnet_rfc1051_init); 79 79 module_exit(arcnet_rfc1051_exit); 80 80 81 + MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module"); 81 82 MODULE_LICENSE("GPL"); 82 83 83 84 /* Determine a packet's protocol ID.
+1
drivers/net/arcnet/rfc1201.c
··· 35 35 36 36 #include "arcdevice.h" 37 37 38 + MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module"); 38 39 MODULE_LICENSE("GPL"); 39 40 40 41 static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
+2 -3
drivers/net/bonding/bond_main.c
··· 1819 1819 bond_for_each_slave(bond, slave, iter) 1820 1820 val &= slave->dev->xdp_features; 1821 1821 1822 + val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY; 1823 + 1822 1824 xdp_set_features_flag(bond_dev, val); 1823 1825 } 1824 1826 ··· 5911 5909 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 5912 5910 bond_dev->features |= BOND_XFRM_FEATURES; 5913 5911 #endif /* CONFIG_XFRM_OFFLOAD */ 5914 - 5915 - if (bond_xdp_check(bond)) 5916 - bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; 5917 5912 } 5918 5913 5919 5914 /* Destroy a bonding device.
+1 -1
drivers/net/can/dev/netlink.c
··· 346 346 /* Neither of TDC parameters nor TDC flags are 347 347 * provided: do calculation 348 348 */ 349 - can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, 349 + can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, 350 350 &priv->ctrlmode, priv->ctrlmode_supported); 351 351 } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly 352 352 * turned off. TDC is disabled: do nothing
+1
drivers/net/dsa/dsa_loop_bdinfo.c
··· 32 32 } 33 33 arch_initcall(dsa_loop_bdinfo_init) 34 34 35 + MODULE_DESCRIPTION("DSA mock-up switch driver"); 35 36 MODULE_LICENSE("GPL");
+6
drivers/net/ethernet/amd/pds_core/main.c
··· 451 451 452 452 static void pdsc_stop_health_thread(struct pdsc *pdsc) 453 453 { 454 + if (pdsc->pdev->is_virtfn) 455 + return; 456 + 454 457 timer_shutdown_sync(&pdsc->wdtimer); 455 458 if (pdsc->health_work.func) 456 459 cancel_work_sync(&pdsc->health_work); ··· 461 458 462 459 static void pdsc_restart_health_thread(struct pdsc *pdsc) 463 460 { 461 + if (pdsc->pdev->is_virtfn) 462 + return; 463 + 464 464 timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0); 465 465 mod_timer(&pdsc->wdtimer, jiffies + 1); 466 466 }
+3
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
··· 685 685 686 686 intf->rx_buf_order = get_order(RING_BUFFER_SIZE); 687 687 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); 688 + if (!buffer_pg) 689 + return -ENOMEM; 688 690 689 691 dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, 690 692 DMA_FROM_DEVICE); ··· 1095 1093 return 0; 1096 1094 1097 1095 err_reclaim_tx: 1096 + netif_napi_del(&intf->tx_napi); 1098 1097 bcmasp_reclaim_free_all_tx(intf); 1099 1098 err_phy_disconnect: 1100 1099 if (phydev)
+5 -7
drivers/net/ethernet/brocade/bna/bnad.c
··· 1091 1091 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. 1092 1092 */ 1093 1093 static void 1094 - bnad_tx_cleanup(struct delayed_work *work) 1094 + bnad_tx_cleanup(struct work_struct *work) 1095 1095 { 1096 1096 struct bnad_tx_info *tx_info = 1097 - container_of(work, struct bnad_tx_info, tx_cleanup_work); 1097 + container_of(work, struct bnad_tx_info, tx_cleanup_work.work); 1098 1098 struct bnad *bnad = NULL; 1099 1099 struct bna_tcb *tcb; 1100 1100 unsigned long flags; ··· 1170 1170 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. 1171 1171 */ 1172 1172 static void 1173 - bnad_rx_cleanup(void *work) 1173 + bnad_rx_cleanup(struct work_struct *work) 1174 1174 { 1175 1175 struct bnad_rx_info *rx_info = 1176 1176 container_of(work, struct bnad_rx_info, rx_cleanup_work); ··· 1991 1991 } 1992 1992 tx_info->tx = tx; 1993 1993 1994 - INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, 1995 - (work_func_t)bnad_tx_cleanup); 1994 + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup); 1996 1995 1997 1996 /* Register ISR for the Tx object */ 1998 1997 if (intr_info->intr_type == BNA_INTR_T_MSIX) { ··· 2247 2248 rx_info->rx = rx; 2248 2249 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2249 2250 2250 - INIT_WORK(&rx_info->rx_cleanup_work, 2251 - (work_func_t)(bnad_rx_cleanup)); 2251 + INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup); 2252 2252 2253 2253 /* 2254 2254 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+1 -1
drivers/net/ethernet/intel/i40e/i40e_dcb.c
··· 1523 1523 reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); 1524 1524 reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | 1525 1525 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | 1526 - I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); 1526 + I40E_PRTDCB_RETSTCC_ETSTC_MASK); 1527 1527 reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK, 1528 1528 bw_share[i]); 1529 1529 reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
+10 -14
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 4926 4926 void i40e_vsi_stop_rings(struct i40e_vsi *vsi) 4927 4927 { 4928 4928 struct i40e_pf *pf = vsi->back; 4929 - int pf_q, err, q_end; 4929 + u32 pf_q, tx_q_end, rx_q_end; 4930 4930 4931 4931 /* When port TX is suspended, don't wait */ 4932 4932 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) 4933 4933 return i40e_vsi_stop_rings_no_wait(vsi); 4934 4934 4935 - q_end = vsi->base_queue + vsi->num_queue_pairs; 4936 - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) 4937 - i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); 4935 + tx_q_end = vsi->base_queue + 4936 + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); 4937 + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) 4938 + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); 4938 4939 4939 - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { 4940 - err = i40e_control_wait_rx_q(pf, pf_q, false); 4941 - if (err) 4942 - dev_info(&pf->pdev->dev, 4943 - "VSI seid %d Rx ring %d disable timeout\n", 4944 - vsi->seid, pf_q); 4945 - } 4940 + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; 4941 + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) 4942 + i40e_control_rx_q(pf, pf_q, false); 4946 4943 4947 4944 msleep(I40E_DISABLE_TX_GAP_MSEC); 4948 - pf_q = vsi->base_queue; 4949 - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) 4945 + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) 4950 4946 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); 4951 4947 4952 4948 i40e_vsi_wait_queues_disabled(vsi); ··· 5356 5360 { 5357 5361 int v, ret = 0; 5358 5362 5359 - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5363 + for (v = 0; v < pf->num_alloc_vsi; v++) { 5360 5364 if (pf->vsi[v]) { 5361 5365 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); 5362 5366 if (ret)
+33 -5
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 2848 2848 (u8 *)&stats, sizeof(stats)); 2849 2849 } 2850 2850 2851 + /** 2852 + * i40e_can_vf_change_mac 2853 + * @vf: pointer to the VF info 2854 + * 2855 + * Return true if the VF is allowed to change its MAC filters, false otherwise 2856 + */ 2857 + static bool i40e_can_vf_change_mac(struct i40e_vf *vf) 2858 + { 2859 + /* If the VF MAC address has been set administratively (via the 2860 + * ndo_set_vf_mac command), then deny permission to the VF to 2861 + * add/delete unicast MAC addresses, unless the VF is trusted 2862 + */ 2863 + if (vf->pf_set_mac && !vf->trusted) 2864 + return false; 2865 + 2866 + return true; 2867 + } 2868 + 2851 2869 #define I40E_MAX_MACVLAN_PER_HW 3072 2852 2870 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ 2853 2871 (num_ports)) ··· 2925 2907 * The VF may request to set the MAC address filter already 2926 2908 * assigned to it so do not return an error in that case. 2927 2909 */ 2928 - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2929 - !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2910 + if (!i40e_can_vf_change_mac(vf) && 2911 + !is_multicast_ether_addr(addr) && 2930 2912 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2931 2913 dev_err(&pf->pdev->dev, 2932 2914 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); ··· 3132 3114 ret = -EINVAL; 3133 3115 goto error_param; 3134 3116 } 3135 - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) 3136 - was_unimac_deleted = true; 3137 3117 } 3138 3118 vsi = pf->vsi[vf->lan_vsi_idx]; 3139 3119 3140 3120 spin_lock_bh(&vsi->mac_filter_hash_lock); 3141 3121 /* delete addresses from the list */ 3142 - for (i = 0; i < al->num_elements; i++) 3122 + for (i = 0; i < al->num_elements; i++) { 3123 + const u8 *addr = al->list[i].addr; 3124 + 3125 + /* Allow to delete VF primary MAC only if it was not set 3126 + * administratively by PF or if VF is trusted. 3127 + */ 3128 + if (ether_addr_equal(addr, vf->default_lan_addr.addr) && 3129 + i40e_can_vf_change_mac(vf)) 3130 + was_unimac_deleted = true; 3131 + else 3132 + continue; 3133 + 3143 3134 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 3144 3135 ret = -EINVAL; 3145 3136 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3146 3137 goto error_param; 3147 3138 } 3139 + } 3148 3140 3149 3141 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3150 3142
+23 -2
drivers/net/ethernet/intel/ice/ice_lag.c
··· 152 152 } 153 153 154 154 /** 155 + * ice_pkg_has_lport_extract - check if lport extraction supported 156 + * @hw: HW struct 157 + */ 158 + static bool ice_pkg_has_lport_extract(struct ice_hw *hw) 159 + { 160 + int i; 161 + 162 + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { 163 + u16 offset; 164 + u8 fv_prot; 165 + 166 + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, 167 + &fv_prot, &offset); 168 + if (fv_prot == ICE_FV_PROT_MDID && 169 + offset == ICE_LP_EXT_BUF_OFFSET) 170 + return true; 171 + } 172 + return false; 173 + } 174 + 175 + /** 155 176 * ice_lag_find_primary - returns pointer to primary interfaces lag struct 156 177 * @lag: local interfaces lag struct 157 178 */ ··· 1227 1206 } 1228 1207 1229 1208 /** 1230 - * ice_lag_init_feature_support_flag - Check for NVM support for LAG 1209 + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG 1231 1210 * @pf: PF struct 1232 1211 */ 1233 1212 static void ice_lag_init_feature_support_flag(struct ice_pf *pf) ··· 1240 1219 else 1241 1220 ice_clear_feature_support(pf, ICE_F_ROCE_LAG); 1242 1221 1243 - if (caps->sriov_lag) 1222 + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) 1244 1223 ice_set_feature_support(pf, ICE_F_SRIOV_LAG); 1245 1224 else 1246 1225 ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+3
drivers/net/ethernet/intel/ice/ice_lag.h
··· 17 17 #define ICE_LAG_INVALID_PORT 0xFF 18 18 19 19 #define ICE_LAG_RESET_RETRIES 5 20 + #define ICE_SW_DEFAULT_PROFILE 0 21 + #define ICE_FV_PROT_MDID 255 22 + #define ICE_LP_EXT_BUF_OFFSET 32 20 23 21 24 struct ice_pf; 22 25 struct ice_vf;
+1 -1
drivers/net/ethernet/intel/igb/igb.h
··· 637 637 struct timespec64 period; 638 638 } perout[IGB_N_PEROUT]; 639 639 640 - char fw_version[32]; 640 + char fw_version[48]; 641 641 #ifdef CONFIG_IGB_HWMON 642 642 struct hwmon_buff *igb_hwmon_buff; 643 643 bool ets;
+18 -17
drivers/net/ethernet/intel/igb/igb_main.c
··· 3069 3069 { 3070 3070 struct e1000_hw *hw = &adapter->hw; 3071 3071 struct e1000_fw_version fw; 3072 - char *lbuf; 3073 3072 3074 3073 igb_get_fw_version(hw, &fw); 3075 3074 ··· 3076 3077 case e1000_i210: 3077 3078 case e1000_i211: 3078 3079 if (!(igb_get_flash_presence_i210(hw))) { 3079 - lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", 3080 - fw.invm_major, fw.invm_minor, 3081 - fw.invm_img_type); 3080 + snprintf(adapter->fw_version, 3081 + sizeof(adapter->fw_version), 3082 + "%2d.%2d-%d", 3083 + fw.invm_major, fw.invm_minor, 3084 + fw.invm_img_type); 3082 3085 break; 3083 3086 } 3084 3087 fallthrough; 3085 3088 default: 3086 3089 /* if option rom is valid, display its version too */ 3087 3090 if (fw.or_valid) { 3088 - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", 3089 - fw.eep_major, fw.eep_minor, 3090 - fw.etrack_id, fw.or_major, fw.or_build, 3091 - fw.or_patch); 3091 + snprintf(adapter->fw_version, 3092 + sizeof(adapter->fw_version), 3093 + "%d.%d, 0x%08x, %d.%d.%d", 3094 + fw.eep_major, fw.eep_minor, fw.etrack_id, 3095 + fw.or_major, fw.or_build, fw.or_patch); 3092 3096 /* no option rom */ 3093 3097 } else if (fw.etrack_id != 0X0000) { 3094 - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", 3095 - fw.eep_major, fw.eep_minor, 3096 - fw.etrack_id); 3098 + snprintf(adapter->fw_version, 3099 + sizeof(adapter->fw_version), 3100 + "%d.%d, 0x%08x", 3101 + fw.eep_major, fw.eep_minor, fw.etrack_id); 3097 3102 } else { 3098 - lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, 3099 - fw.eep_minor, fw.eep_build); 3103 + snprintf(adapter->fw_version, 3104 + sizeof(adapter->fw_version), 3105 + "%d.%d.%d", 3106 + fw.eep_major, fw.eep_minor, fw.eep_build); 3100 3107 } 3101 3108 break; 3102 3109 } 3103 - 3104 - /* the truncate happens here if it doesn't fit */ 3105 - strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); 3106 - kfree(lbuf); 3107 3110 } 3108 3111 3109 3112 /**
+1 -5
drivers/net/ethernet/intel/igc/igc_phy.c
··· 130 130 /* The PHY will retain its settings across a power down/up cycle */ 131 131 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 132 132 mii_reg |= MII_CR_POWER_DOWN; 133 - 134 - /* Temporary workaround - should be removed when PHY will implement 135 - * IEEE registers as properly 136 - */ 137 - /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ 133 + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 138 134 usleep_range(1000, 2000); 139 135 } 140 136
-32
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 61 61 return 0; 62 62 } 63 63 64 - static int npc_mcam_verify_pf_func(struct rvu *rvu, 65 - struct mcam_entry *entry_data, u8 intf, 66 - u16 pcifunc) 67 - { 68 - u16 pf_func, pf_func_mask; 69 - 70 - if (is_npc_intf_rx(intf)) 71 - return 0; 72 - 73 - pf_func_mask = (entry_data->kw_mask[0] >> 32) & 74 - NPC_KEX_PF_FUNC_MASK; 75 - pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; 76 - 77 - pf_func = be16_to_cpu((__force __be16)pf_func); 78 - if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || 79 - ((pf_func & ~RVU_PFVF_FUNC_MASK) != 80 - (pcifunc & ~RVU_PFVF_FUNC_MASK))) 81 - return -EINVAL; 82 - 83 - return 0; 84 - } 85 - 86 64 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 87 65 { 88 66 int blkaddr; ··· 2829 2851 else 2830 2852 nix_intf = pfvf->nix_rx_intf; 2831 2853 2832 - if (!is_pffunc_af(pcifunc) && 2833 - npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { 2834 - rc = NPC_MCAM_INVALID_REQ; 2835 - goto exit; 2836 - } 2837 - 2838 2854 /* For AF installed rules, the nix_intf should be set to target NIX */ 2839 2855 if (is_pffunc_af(req->hdr.pcifunc)) 2840 2856 nix_intf = req->intf; ··· 3178 3206 return NPC_MCAM_INVALID_REQ; 3179 3207 3180 3208 if (!is_npc_interface_valid(rvu, req->intf)) 3181 - return NPC_MCAM_INVALID_REQ; 3182 - 3183 - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, 3184 - req->hdr.pcifunc)) 3185 3209 return NPC_MCAM_INVALID_REQ; 3186 3210 3187 3211 /* Try to allocate a MCAM entry */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
··· 413 413 struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); 414 414 struct mlx5_core_dev *mdev = mdpll->mdev; 415 415 416 - cancel_delayed_work(&mdpll->work); 416 + cancel_delayed_work_sync(&mdpll->work); 417 417 mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); 418 418 destroy_workqueue(mdpll->wq); 419 419 dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+7 -2
drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
··· 37 37 38 38 /* Now, set PGIDs for each active LAG */ 39 39 for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { 40 - struct net_device *bond = lan966x->ports[lag]->bond; 40 + struct lan966x_port *port = lan966x->ports[lag]; 41 41 int num_active_ports = 0; 42 + struct net_device *bond; 42 43 unsigned long bond_mask; 43 44 u8 aggr_idx[16]; 44 45 45 - if (!bond || (visited & BIT(lag))) 46 + if (!port || !port->bond || (visited & BIT(lag))) 46 47 continue; 47 48 49 + bond = port->bond; 48 50 bond_mask = lan966x_lag_get_mask(lan966x, bond); 49 51 50 52 for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { 51 53 struct lan966x_port *port = lan966x->ports[p]; 54 + 55 + if (!port) 56 + continue; 52 57 53 58 lan_wr(ANA_PGID_PGID_SET(bond_mask), 54 59 lan966x, ANA_PGID(p));
+9
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
··· 579 579 work_done = ionic_cq_service(cq, budget, 580 580 ionic_tx_service, NULL, NULL); 581 581 582 + if (unlikely(!budget)) 583 + return budget; 584 + 582 585 if (work_done < budget && napi_complete_done(napi, work_done)) { 583 586 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); 584 587 flags |= IONIC_INTR_CRED_UNMASK; ··· 609 606 struct ionic_lif *lif; 610 607 u32 work_done = 0; 611 608 u32 flags = 0; 609 + 610 + if (unlikely(!budget)) 611 + return budget; 612 612 613 613 lif = cq->bound_q->lif; 614 614 idev = &lif->ionic->idev; ··· 661 655 662 656 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, 663 657 ionic_tx_service, NULL, NULL); 658 + 659 + if (unlikely(!budget)) 660 + return budget; 664 661 665 662 rx_work_done = ionic_cq_service(rxcq, budget, 666 663 ionic_rx_service, NULL, NULL);
+9 -13
drivers/net/ethernet/renesas/ravb_main.c
··· 818 818 struct ravb_rx_desc *desc; 819 819 struct sk_buff *skb; 820 820 dma_addr_t dma_addr; 821 + int rx_packets = 0; 821 822 u8 desc_status; 822 - int boguscnt; 823 823 u16 pkt_len; 824 824 u8 die_dt; 825 825 int entry; 826 826 int limit; 827 + int i; 827 828 828 829 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 829 - boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; 830 + limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; 830 831 stats = &priv->stats[q]; 831 832 832 - boguscnt = min(boguscnt, *quota); 833 - limit = boguscnt; 834 833 desc = &priv->gbeth_rx_ring[entry]; 835 - while (desc->die_dt != DT_FEMPTY) { 834 + for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { 836 835 /* Descriptor type must be checked before all other reads */ 837 836 dma_rmb(); 838 837 desc_status = desc->msc; 839 838 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; 840 - 841 - if (--boguscnt < 0) 842 - break; 843 839 844 840 /* We use 0-byte descriptors to mark the DMA mapping errors */ 845 841 if (!pkt_len) ··· 864 868 if (ndev->features & NETIF_F_RXCSUM) 865 869 ravb_rx_csum_gbeth(skb); 866 870 napi_gro_receive(&priv->napi[q], skb); 867 - stats->rx_packets++; 871 + rx_packets++; 868 872 stats->rx_bytes += pkt_len; 869 873 break; 870 874 case DT_FSTART: ··· 894 898 ravb_rx_csum_gbeth(skb); 895 899 napi_gro_receive(&priv->napi[q], 896 900 priv->rx_1st_skb); 897 - stats->rx_packets++; 901 + rx_packets++; 898 902 stats->rx_bytes += pkt_len; 899 903 break; 900 904 } ··· 933 937 desc->die_dt = DT_FEMPTY; 934 938 } 935 939 936 - *quota -= limit - (++boguscnt); 937 - 938 - return boguscnt <= 0; 940 + stats->rx_packets += rx_packets; 941 + *quota -= rx_packets; 942 + return *quota == 0; 939 943 } 940 944 941 945 /* Packet receive function for Ethernet AVB */
+35 -34
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
··· 830 830 { false, "UNKNOWN", "Unknown Error" }, /* 31 */ 831 831 }; 832 832 833 - static const char * const dpp_rx_err = "Read Rx Descriptor Parity checker Error"; 834 - static const char * const dpp_tx_err = "Read Tx Descriptor Parity checker Error"; 833 + #define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" 834 + #define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" 835 + 835 836 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { 836 - { true, "TDPES0", dpp_tx_err }, 837 - { true, "TDPES1", dpp_tx_err }, 838 - { true, "TDPES2", dpp_tx_err }, 839 - { true, "TDPES3", dpp_tx_err }, 840 - { true, "TDPES4", dpp_tx_err }, 841 - { true, "TDPES5", dpp_tx_err }, 842 - { true, "TDPES6", dpp_tx_err }, 843 - { true, "TDPES7", dpp_tx_err }, 844 - { true, "TDPES8", dpp_tx_err }, 845 - { true, "TDPES9", dpp_tx_err }, 846 - { true, "TDPES10", dpp_tx_err }, 847 - { true, "TDPES11", dpp_tx_err }, 848 - { true, "TDPES12", dpp_tx_err }, 849 - { true, "TDPES13", dpp_tx_err }, 850 - { true, "TDPES14", dpp_tx_err }, 851 - { true, "TDPES15", dpp_tx_err }, 852 - { true, "RDPES0", dpp_rx_err }, 853 - { true, "RDPES1", dpp_rx_err }, 854 - { true, "RDPES2", dpp_rx_err }, 855 - { true, "RDPES3", dpp_rx_err }, 856 - { true, "RDPES4", dpp_rx_err }, 857 - { true, "RDPES5", dpp_rx_err }, 858 - { true, "RDPES6", dpp_rx_err }, 859 - { true, "RDPES7", dpp_rx_err }, 860 - { true, "RDPES8", dpp_rx_err }, 861 - { true, "RDPES9", dpp_rx_err }, 862 - { true, "RDPES10", dpp_rx_err }, 863 - { true, "RDPES11", dpp_rx_err }, 864 - { true, "RDPES12", dpp_rx_err }, 865 - { true, "RDPES13", dpp_rx_err }, 866 - { true, "RDPES14", dpp_rx_err }, 867 - { true, "RDPES15", dpp_rx_err }, 837 + { true, "TDPES0", DPP_TX_ERR }, 838 + { true, "TDPES1", DPP_TX_ERR }, 839 + { true, "TDPES2", DPP_TX_ERR }, 840 + { true, "TDPES3", DPP_TX_ERR }, 841 + { true, "TDPES4", DPP_TX_ERR }, 842 + { true, "TDPES5", DPP_TX_ERR }, 843 + { true, "TDPES6", DPP_TX_ERR }, 844 + { true, "TDPES7", DPP_TX_ERR }, 845 + { true, "TDPES8", DPP_TX_ERR }, 846 + { true, "TDPES9", DPP_TX_ERR }, 847 + { true, "TDPES10", DPP_TX_ERR }, 848 + { true, "TDPES11", DPP_TX_ERR }, 849 + { true, "TDPES12", DPP_TX_ERR }, 850 + { true, "TDPES13", DPP_TX_ERR }, 851 + { true, "TDPES14", DPP_TX_ERR }, 852 + { true, "TDPES15", DPP_TX_ERR }, 853 + { true, "RDPES0", DPP_RX_ERR }, 854 + { true, "RDPES1", DPP_RX_ERR }, 855 + { true, "RDPES2", DPP_RX_ERR }, 856 + { true, "RDPES3", DPP_RX_ERR }, 857 + { true, "RDPES4", DPP_RX_ERR }, 858 + { true, "RDPES5", DPP_RX_ERR }, 859 + { true, "RDPES6", DPP_RX_ERR }, 860 + { true, "RDPES7", DPP_RX_ERR }, 861 + { true, "RDPES8", DPP_RX_ERR }, 862 + { true, "RDPES9", DPP_RX_ERR }, 863 + { true, "RDPES10", DPP_RX_ERR }, 864 + { true, "RDPES11", DPP_RX_ERR }, 865 + { true, "RDPES12", DPP_RX_ERR }, 866 + { true, "RDPES13", DPP_RX_ERR }, 867 + { true, "RDPES14", DPP_RX_ERR }, 868 + { true, "RDPES15", DPP_RX_ERR }, 868 869 }; 869 870 870 871 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+1
drivers/net/ethernet/ti/Kconfig
··· 189 189 select TI_K3_CPPI_DESC_POOL 190 190 depends on PRU_REMOTEPROC 191 191 depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER 192 + depends on PTP_1588_CLOCK_OPTIONAL 192 193 help 193 194 Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. 194 195 This subsystem is available starting with the AM65 platform.
+12 -5
drivers/net/ethernet/ti/cpts.c
··· 638 638 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); 639 639 } 640 640 641 + static void cpts_clk_unregister(void *clk) 642 + { 643 + clk_hw_unregister_mux(clk); 644 + } 645 + 646 + static void cpts_clk_del_provider(void *np) 647 + { 648 + of_clk_del_provider(np); 649 + } 650 + 641 651 static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) 642 652 { 643 653 struct device_node *refclk_np; ··· 697 687 goto mux_fail; 698 688 } 699 689 700 - ret = devm_add_action_or_reset(cpts->dev, 701 - (void(*)(void *))clk_hw_unregister_mux, 702 - clk_hw); 690 + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw); 703 691 if (ret) { 704 692 dev_err(cpts->dev, "add clkmux unreg action %d", ret); 705 693 goto mux_fail; ··· 707 699 if (ret) 708 700 goto mux_fail; 709 701 710 - ret = devm_add_action_or_reset(cpts->dev, 711 - (void(*)(void *))of_clk_del_provider, 702 + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider, 712 703 refclk_np); 713 704 if (ret) { 714 705 dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
+1
drivers/net/fddi/skfp/skfddi.c
··· 153 153 { } /* Terminating entry */ 154 154 }; 155 155 MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl); 156 + MODULE_DESCRIPTION("SysKonnect FDDI PCI driver"); 156 157 MODULE_LICENSE("GPL"); 157 158 MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>"); 158 159
+1
drivers/net/ieee802154/fakelb.c
··· 259 259 260 260 module_init(fakelb_init_module); 261 261 module_exit(fake_remove_module); 262 + MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver"); 262 263 MODULE_LICENSE("GPL");
+1
drivers/net/ipvlan/ipvtap.c
··· 237 237 module_exit(ipvtap_exit); 238 238 MODULE_ALIAS_RTNL_LINK("ipvtap"); 239 239 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); 240 + MODULE_DESCRIPTION("IP-VLAN based tap driver"); 240 241 MODULE_LICENSE("GPL");
+1
drivers/net/phy/mdio_devres.c
··· 131 131 EXPORT_SYMBOL(__devm_of_mdiobus_register); 132 132 #endif /* CONFIG_OF_MDIO */ 133 133 134 + MODULE_DESCRIPTION("Network MDIO bus devres helpers"); 134 135 MODULE_LICENSE("GPL");
+1
drivers/net/plip/plip.c
··· 1437 1437 1438 1438 module_init(plip_init); 1439 1439 module_exit(plip_cleanup_module); 1440 + MODULE_DESCRIPTION("PLIP (parallel port) network module"); 1440 1441 MODULE_LICENSE("GPL");
+1
drivers/net/ppp/bsd_comp.c
··· 1166 1166 1167 1167 module_init(bsdcomp_init); 1168 1168 module_exit(bsdcomp_cleanup); 1169 + MODULE_DESCRIPTION("PPP BSD-Compress compression module"); 1169 1170 MODULE_LICENSE("Dual BSD/GPL"); 1170 1171 MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
+1
drivers/net/ppp/ppp_async.c
··· 87 87 static int flag_time = HZ; 88 88 module_param(flag_time, int, 0); 89 89 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); 90 + MODULE_DESCRIPTION("PPP async serial channel module"); 90 91 MODULE_LICENSE("GPL"); 91 92 MODULE_ALIAS_LDISC(N_PPP); 92 93
+1
drivers/net/ppp/ppp_deflate.c
··· 630 630 631 631 module_init(deflate_init); 632 632 module_exit(deflate_cleanup); 633 + MODULE_DESCRIPTION("PPP Deflate compression module"); 633 634 MODULE_LICENSE("Dual BSD/GPL"); 634 635 MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE)); 635 636 MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
+1
drivers/net/ppp/ppp_generic.c
··· 3604 3604 EXPORT_SYMBOL(ppp_output_wakeup); 3605 3605 EXPORT_SYMBOL(ppp_register_compressor); 3606 3606 EXPORT_SYMBOL(ppp_unregister_compressor); 3607 + MODULE_DESCRIPTION("Generic PPP layer driver"); 3607 3608 MODULE_LICENSE("GPL"); 3608 3609 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); 3609 3610 MODULE_ALIAS_RTNL_LINK("ppp");
+1
drivers/net/ppp/ppp_synctty.c
··· 724 724 725 725 module_init(ppp_sync_init); 726 726 module_exit(ppp_sync_cleanup); 727 + MODULE_DESCRIPTION("PPP synchronous TTY channel module"); 727 728 MODULE_LICENSE("GPL"); 728 729 MODULE_ALIAS_LDISC(N_SYNC_PPP);
+9 -14
drivers/net/ppp/pppoe.c
··· 1007 1007 struct sk_buff *skb; 1008 1008 int error = 0; 1009 1009 1010 - if (sk->sk_state & PPPOX_BOUND) { 1011 - error = -EIO; 1012 - goto end; 1013 - } 1010 + if (sk->sk_state & PPPOX_BOUND) 1011 + return -EIO; 1014 1012 1015 1013 skb = skb_recv_datagram(sk, flags, &error); 1016 - if (error < 0) 1017 - goto end; 1014 + if (!skb) 1015 + return error; 1018 1016 1019 - if (skb) { 1020 - total_len = min_t(size_t, total_len, skb->len); 1021 - error = skb_copy_datagram_msg(skb, 0, m, total_len); 1022 - if (error == 0) { 1023 - consume_skb(skb); 1024 - return total_len; 1025 - } 1017 + total_len = min_t(size_t, total_len, skb->len); 1018 + error = skb_copy_datagram_msg(skb, 0, m, total_len); 1019 + if (error == 0) { 1020 + consume_skb(skb); 1021 + return total_len; 1026 1022 } 1027 1023 1028 1024 kfree_skb(skb); 1029 - end: 1030 1025 return error; 1031 1026 } 1032 1027
+9 -6
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
··· 618 618 &tbl_rev); 619 619 if (!IS_ERR(wifi_pkg)) { 620 620 if (tbl_rev != 2) { 621 - ret = PTR_ERR(wifi_pkg); 621 + ret = -EINVAL; 622 622 goto out_free; 623 623 } 624 624 ··· 634 634 &tbl_rev); 635 635 if (!IS_ERR(wifi_pkg)) { 636 636 if (tbl_rev != 1) { 637 - ret = PTR_ERR(wifi_pkg); 637 + ret = -EINVAL; 638 638 goto out_free; 639 639 } 640 640 ··· 650 650 &tbl_rev); 651 651 if (!IS_ERR(wifi_pkg)) { 652 652 if (tbl_rev != 0) { 653 - ret = PTR_ERR(wifi_pkg); 653 + ret = -EINVAL; 654 654 goto out_free; 655 655 } 656 656 ··· 707 707 &tbl_rev); 708 708 if (!IS_ERR(wifi_pkg)) { 709 709 if (tbl_rev != 2) { 710 - ret = PTR_ERR(wifi_pkg); 710 + ret = -EINVAL; 711 711 goto out_free; 712 712 } 713 713 ··· 723 723 &tbl_rev); 724 724 if (!IS_ERR(wifi_pkg)) { 725 725 if (tbl_rev != 1) { 726 - ret = PTR_ERR(wifi_pkg); 726 + ret = -EINVAL; 727 727 goto out_free; 728 728 } 729 729 ··· 739 739 &tbl_rev); 740 740 if (!IS_ERR(wifi_pkg)) { 741 741 if (tbl_rev != 0) { 742 - ret = PTR_ERR(wifi_pkg); 742 + ret = -EINVAL; 743 743 goto out_free; 744 744 } 745 745 ··· 1115 1115 IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n"); 1116 1116 goto read_table; 1117 1117 } 1118 + 1119 + ret = PTR_ERR(wifi_pkg); 1120 + goto out_free; 1118 1121 1119 1122 read_table: 1120 1123 fwrt->ppag_ver = tbl_rev;
+3
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 3687 3687 NL80211_TDLS_SETUP); 3688 3688 } 3689 3689 3690 + if (ret) 3691 + return ret; 3692 + 3690 3693 for_each_sta_active_link(vif, sta, link_sta, i) 3691 3694 link_sta->agg.max_rc_amsdu_len = 1; 3692 3695
+4
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
··· 505 505 return false; 506 506 507 507 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 508 + 509 + if (WARN_ON_ONCE(!mvm_sta->dup_data)) 510 + return false; 511 + 508 512 dup_data = &mvm_sta->dup_data[queue]; 509 513 510 514 /*
+2 -1
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation 3 + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2017 Intel Deutschland GmbH 6 6 */ ··· 972 972 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { 973 973 /* End TE, notify mac80211 */ 974 974 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; 975 + mvmvif->time_event_data.link_id = -1; 975 976 iwl_mvm_p2p_roc_finished(mvm); 976 977 ieee80211_remain_on_channel_expired(mvm->hw); 977 978 } else if (le32_to_cpu(notif->start)) {
+61 -12
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 520 520 } 521 521 } 522 522 523 + static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen, 524 + const u8 *addr3_override) 525 + { 526 + struct ieee80211_hdr *out_hdr = cmd; 527 + 528 + memcpy(cmd, hdr, hdrlen); 529 + if (addr3_override) 530 + memcpy(out_hdr->addr3, addr3_override, ETH_ALEN); 531 + } 532 + 523 533 /* 524 534 * Allocates and sets the Tx cmd the driver data pointers in the skb 525 535 */ 526 536 static struct iwl_device_tx_cmd * 527 537 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, 528 538 struct ieee80211_tx_info *info, int hdrlen, 529 - struct ieee80211_sta *sta, u8 sta_id) 539 + struct ieee80211_sta *sta, u8 sta_id, 540 + const u8 *addr3_override) 530 541 { 531 542 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 532 543 struct iwl_device_tx_cmd *dev_cmd; ··· 595 584 cmd->len = cpu_to_le16((u16)skb->len); 596 585 597 586 /* Copy MAC header from skb into command buffer */ 598 - memcpy(cmd->hdr, hdr, hdrlen); 587 + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); 599 588 600 589 cmd->flags = cpu_to_le16(flags); 601 590 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); ··· 610 599 cmd->len = cpu_to_le16((u16)skb->len); 611 600 612 601 /* Copy MAC header from skb into command buffer */ 613 - memcpy(cmd->hdr, hdr, hdrlen); 602 + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); 614 603 615 604 cmd->flags = cpu_to_le32(flags); 616 605 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); ··· 628 617 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); 629 618 630 619 /* Copy MAC header from skb into command buffer */ 631 - memcpy(tx_cmd->hdr, hdr, hdrlen); 620 + iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override); 632 621 633 622 out: 634 623 return dev_cmd; ··· 831 820 832 821 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); 833 822 834 - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); 823 + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id, 824 + NULL); 835 825 if (!dev_cmd) 836 826 return -1; 837 827 ··· 1152 1140 */ 1153 1141 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, 1154 1142 struct ieee80211_tx_info *info, 1155 - struct ieee80211_sta *sta) 1143 + struct ieee80211_sta *sta, 1144 + const u8 *addr3_override) 1156 1145 { 1157 1146 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1158 1147 struct iwl_mvm_sta *mvmsta; ··· 1185 1172 iwl_mvm_probe_resp_set_noa(mvm, skb); 1186 1173 1187 1174 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, 1188 - sta, mvmsta->deflink.sta_id); 1175 + sta, mvmsta->deflink.sta_id, 1176 + addr3_override); 1189 1177 if (!dev_cmd) 1190 1178 goto drop; 1191 1179 ··· 1308 1294 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1309 1295 struct ieee80211_tx_info info; 1310 1296 struct sk_buff_head mpdus_skbs; 1297 + struct ieee80211_vif *vif; 1311 1298 unsigned int payload_len; 1312 1299 int ret; 1313 1300 struct sk_buff *orig_skb = skb; 1301 + const u8 *addr3; 1314 1302 1315 1303 if (WARN_ON_ONCE(!mvmsta)) 1316 1304 return -1; ··· 1323 1307 memcpy(&info, skb->cb, sizeof(info)); 1324 1308 1325 1309 if (!skb_is_gso(skb)) 1326 - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1310 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); 1327 1311 1328 1312 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 1329 1313 tcp_hdrlen(skb) + skb->data_len; 1330 1314 1331 1315 if (payload_len <= skb_shinfo(skb)->gso_size) 1332 - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1316 + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); 1333 1317 1334 1318 __skb_queue_head_init(&mpdus_skbs); 1319 + 1320 + vif = info.control.vif; 1321 + if (!vif) 1322 + return -1; 1335 1323 1336 1324 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); 1337 1325 if (ret) ··· 1343 1323 1344 1324 WARN_ON(skb_queue_empty(&mpdus_skbs)); 1345 1325 1346 - while (!skb_queue_empty(&mpdus_skbs)) { 1347 - skb = __skb_dequeue(&mpdus_skbs); 1326 + /* 1327 + * As described in IEEE sta 802.11-2020, table 9-30 (Address 1328 + * field contents), A-MSDU address 3 should contain the BSSID 1329 + * address. 1330 + * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it 1331 + * in the command header. We need to preserve the original 1332 + * address 3 in the skb header to correctly create all the 1333 + * A-MSDU subframe headers from it. 1334 + */ 1335 + switch (vif->type) { 1336 + case NL80211_IFTYPE_STATION: 1337 + addr3 = vif->cfg.ap_addr; 1338 + break; 1339 + case NL80211_IFTYPE_AP: 1340 + addr3 = vif->addr; 1341 + break; 1342 + default: 1343 + addr3 = NULL; 1344 + break; 1345 + } 1348 1346 1349 - ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1347 + while (!skb_queue_empty(&mpdus_skbs)) { 1348 + struct ieee80211_hdr *hdr; 1349 + bool amsdu; 1350 + 1351 + skb = __skb_dequeue(&mpdus_skbs); 1352 + hdr = (void *)skb->data; 1353 + amsdu = ieee80211_is_data_qos(hdr->frame_control) && 1354 + (*ieee80211_get_qos_ctl(hdr) & 1355 + IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1356 + 1357 + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta, 1358 + amsdu ? addr3 : NULL); 1350 1359 if (ret) { 1351 1360 /* Free skbs created as part of TSO logic that have not yet been dequeued */ 1352 1361 __skb_queue_purge(&mpdus_skbs);
+1
drivers/net/xen-netback/netback.c
··· 1778 1778 } 1779 1779 module_exit(netback_fini); 1780 1780 1781 + MODULE_DESCRIPTION("Xen backend network device module"); 1781 1782 MODULE_LICENSE("Dual BSD/GPL"); 1782 1783 MODULE_ALIAS("xen-backend:vif");
+8 -6
drivers/nvme/host/core.c
··· 713 713 if (req->q->queuedata) { 714 714 struct nvme_ns *ns = req->q->disk->private_data; 715 715 716 - logging_enabled = ns->passthru_err_log_enabled; 716 + logging_enabled = ns->head->passthru_err_log_enabled; 717 717 req->timeout = NVME_IO_TIMEOUT; 718 718 } else { /* no queuedata implies admin queue */ 719 719 logging_enabled = nr->ctrl->passthru_err_log_enabled; ··· 3696 3696 3697 3697 ns->disk = disk; 3698 3698 ns->queue = disk->queue; 3699 - ns->passthru_err_log_enabled = false; 3700 3699 3701 3700 if (ctrl->opts && ctrl->opts->data_digest) 3702 3701 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); ··· 3761 3762 3762 3763 /* 3763 3764 * Set ns->disk->device->driver_data to ns so we can access 3764 - * ns->logging_enabled in nvme_passthru_err_log_enabled_store() and 3765 - * nvme_passthru_err_log_enabled_show(). 3765 + * ns->head->passthru_err_log_enabled in 3766 + * nvme_io_passthru_err_log_enabled_[store | show](). 3766 3767 */ 3767 3768 dev_set_drvdata(disk_to_dev(ns->disk), ns); 3768 3769 ··· 4190 4191 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4191 4192 { 4192 4193 struct nvme_fw_slot_info_log *log; 4194 + u8 next_fw_slot, cur_fw_slot; 4193 4195 4194 4196 log = kmalloc(sizeof(*log), GFP_KERNEL); 4195 4197 if (!log) ··· 4202 4202 goto out_free_log; 4203 4203 } 4204 4204 4205 - if (log->afi & 0x70 || !(log->afi & 0x7)) { 4205 + cur_fw_slot = log->afi & 0x7; 4206 + next_fw_slot = (log->afi & 0x70) >> 4; 4207 + if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { 4206 4208 dev_info(ctrl->device, 4207 4209 "Firmware is activated after next Controller Level Reset\n"); 4208 4210 goto out_free_log; 4209 4211 } 4210 4212 4211 - memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1], 4213 + memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], 4212 4214 sizeof(ctrl->subsys->firmware_rev)); 4213 4215 4214 4216 out_free_log:
+1 -1
drivers/nvme/host/ioctl.c
··· 228 228 length = (io.nblocks + 1) << ns->head->lba_shift; 229 229 230 230 if ((io.control & NVME_RW_PRINFO_PRACT) && 231 - ns->head->ms == sizeof(struct t10_pi_tuple)) { 231 + (ns->head->ms == ns->head->pi_size)) { 232 232 /* 233 233 * Protection information is stripped/inserted by the 234 234 * controller.
+1 -1
drivers/nvme/host/nvme.h
··· 455 455 struct list_head entry; 456 456 struct kref ref; 457 457 bool shared; 458 + bool passthru_err_log_enabled; 458 459 int instance; 459 460 struct nvme_effects_log *effects; 460 461 u64 nuse; ··· 524 523 struct device cdev_device; 525 524 526 525 struct nvme_fault_inject fault_inject; 527 - bool passthru_err_log_enabled; 528 526 }; 529 527 530 528 /* NVMe ns supports metadata actions by the controller (generate/strip) */
+15 -15
drivers/nvme/host/sysfs.c
··· 48 48 struct device_attribute *attr, const char *buf, size_t count) 49 49 { 50 50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 51 - int err; 52 51 bool passthru_err_log_enabled; 52 + int err; 53 53 54 54 err = kstrtobool(buf, &passthru_err_log_enabled); 55 55 if (err) ··· 60 60 return count; 61 61 } 62 62 63 + static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 64 + { 65 + struct gendisk *disk = dev_to_disk(dev); 66 + 67 + if (nvme_disk_is_ns_head(disk)) 68 + return disk->private_data; 69 + return nvme_get_ns_from_dev(dev)->head; 70 + } 71 + 63 72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, 64 73 struct device_attribute *attr, char *buf) 65 74 { 66 - struct nvme_ns *n = dev_get_drvdata(dev); 75 + struct nvme_ns_head *head = dev_to_ns_head(dev); 67 76 68 - return sysfs_emit(buf, n->passthru_err_log_enabled ? "on\n" : "off\n"); 77 + return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); 69 78 } 70 79 71 80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, 72 81 struct device_attribute *attr, const char *buf, size_t count) 73 82 { 74 - struct nvme_ns *ns = dev_get_drvdata(dev); 75 - int err; 83 + struct nvme_ns_head *head = dev_to_ns_head(dev); 76 84 bool passthru_err_log_enabled; 85 + int err; 77 86 78 87 err = kstrtobool(buf, &passthru_err_log_enabled); 79 88 if (err) 80 89 return -EINVAL; 81 - ns->passthru_err_log_enabled = passthru_err_log_enabled; 90 + head->passthru_err_log_enabled = passthru_err_log_enabled; 82 91 83 92 return count; 84 93 } ··· 99 90 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ 100 91 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ 101 92 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); 102 - 103 - static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 104 - { 105 - struct gendisk *disk = dev_to_disk(dev); 106 - 107 - if (nvme_disk_is_ns_head(disk)) 108 - return disk->private_data; 109 - return nvme_get_ns_from_dev(dev)->head; 110 - } 111 93 112 94 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 113 95 char *buf)
+25 -40
drivers/of/property.c
··· 763 763 /* Walk 3 levels up only if there is 'ports' node. */ 764 764 for (depth = 3; depth && node; depth--) { 765 765 node = of_get_next_parent(node); 766 - if (depth == 2 && !of_node_name_eq(node, "ports")) 766 + if (depth == 2 && !of_node_name_eq(node, "ports") && 767 + !of_node_name_eq(node, "in-ports") && 768 + !of_node_name_eq(node, "out-ports")) 767 769 break; 768 770 } 769 771 return node; ··· 1065 1063 return of_device_get_match_data(dev); 1066 1064 } 1067 1065 1068 - static struct device_node *of_get_compat_node(struct device_node *np) 1069 - { 1070 - of_node_get(np); 1071 - 1072 - while (np) { 1073 - if (!of_device_is_available(np)) { 1074 - of_node_put(np); 1075 - np = NULL; 1076 - } 1077 - 1078 - if (of_property_present(np, "compatible")) 1079 - break; 1080 - 1081 - np = of_get_next_parent(np); 1082 - } 1083 - 1084 - return np; 1085 - } 1086 - 1087 - static struct device_node *of_get_compat_node_parent(struct device_node *np) 1088 - { 1089 - struct device_node *parent, *node; 1090 - 1091 - parent = of_get_parent(np); 1092 - node = of_get_compat_node(parent); 1093 - of_node_put(parent); 1094 - 1095 - return node; 1096 - } 1097 - 1098 1066 static void of_link_to_phandle(struct device_node *con_np, 1099 1067 struct device_node *sup_np) 1100 1068 { ··· 1194 1222 * parse_prop.prop_name: Name of property holding a phandle value 1195 1223 * parse_prop.index: For properties holding a list of phandles, this is the 1196 1224 * index into the list 1225 + * @get_con_dev: If the consumer node containing the property is never converted 1226 + * to a struct device, implement this ops so fw_devlink can use it 1227 + * to find the true consumer. 1197 1228 * @optional: Describes whether a supplier is mandatory or not 1198 - * @node_not_dev: The consumer node containing the property is never converted 1199 - * to a struct device. Instead, parse ancestor nodes for the 1200 - * compatible property to find a node corresponding to a device. 1201 1229 * 1202 1230 * Returns: 1203 1231 * parse_prop() return values are ··· 1208 1236 struct supplier_bindings { 1209 1237 struct device_node *(*parse_prop)(struct device_node *np, 1210 1238 const char *prop_name, int index); 1239 + struct device_node *(*get_con_dev)(struct device_node *np); 1211 1240 bool optional; 1212 - bool node_not_dev; 1213 1241 }; 1214 1242 1215 1243 DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells") 1216 1244 DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells") 1217 1245 DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells") 1218 1246 DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells") 1219 - DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells") 1247 + DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells") 1220 1248 DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) 1221 1249 DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") 1222 1250 DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") ··· 1234 1262 DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL) 1235 1263 DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) 1236 1264 DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) 1237 - DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL) 1238 1265 DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") 1239 1266 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") 1240 1267 DEFINE_SIMPLE_PROP(leds, "leds", NULL) ··· 1299 1328 return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np; 1300 1329 } 1301 1330 1331 + static struct device_node *parse_remote_endpoint(struct device_node *np, 1332 + const char *prop_name, 1333 + int index) 1334 + { 1335 + /* Return NULL for index > 0 to signify end of remote-endpoints. */ 1336 + if (!index || strcmp(prop_name, "remote-endpoint")) 1337 + return NULL; 1338 + 1339 + return of_graph_get_remote_port_parent(np); 1340 + } 1341 + 1302 1342 static const struct supplier_bindings of_supplier_bindings[] = { 1303 1343 { .parse_prop = parse_clocks, }, 1304 1344 { .parse_prop = parse_interconnects, }, ··· 1334 1352 { .parse_prop = parse_pinctrl6, }, 1335 1353 { .parse_prop = parse_pinctrl7, }, 1336 1354 { .parse_prop = parse_pinctrl8, }, 1337 - { .parse_prop = parse_remote_endpoint, .node_not_dev = true, }, 1355 + { 1356 + .parse_prop = parse_remote_endpoint, 1357 + .get_con_dev = of_graph_get_port_parent, 1358 + }, 1338 1359 { .parse_prop = parse_pwms, }, 1339 1360 { .parse_prop = parse_resets, }, 1340 1361 { .parse_prop = parse_leds, }, ··· 1388 1403 while ((phandle = s->parse_prop(con_np, prop_name, i))) { 1389 1404 struct device_node *con_dev_np; 1390 1405 1391 - con_dev_np = s->node_not_dev 1392 - ? of_get_compat_node_parent(con_np) 1406 + con_dev_np = s->get_con_dev 1407 + ? s->get_con_dev(con_np) 1393 1408 : of_node_get(con_np); 1394 1409 matched = true; 1395 1410 i++;
+9 -3
drivers/of/unittest.c
··· 50 50 failed; \ 51 51 }) 52 52 53 + #ifdef CONFIG_OF_KOBJ 54 + #define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref) 55 + #else 56 + #define OF_KREF_READ(NODE) 1 57 + #endif 58 + 53 59 /* 54 60 * Expected message may have a message level other than KERN_INFO. 55 61 * Print the expected message only if the current loglevel will allow ··· 576 570 pr_err("missing testcase data\n"); 577 571 return; 578 572 } 579 - prefs[i] = kref_read(&p[i]->kobj.kref); 573 + prefs[i] = OF_KREF_READ(p[i]); 580 574 } 581 575 582 576 rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); ··· 699 693 unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 700 694 701 695 for (i = 0; i < ARRAY_SIZE(p); ++i) { 702 - unittest(prefs[i] == kref_read(&p[i]->kobj.kref), 696 + unittest(prefs[i] == OF_KREF_READ(p[i]), 703 697 "provider%d: expected:%d got:%d\n", 704 - i, prefs[i], kref_read(&p[i]->kobj.kref)); 698 + i, prefs[i], OF_KREF_READ(p[i])); 705 699 of_node_put(p[i]); 706 700 } 707 701 }
+6 -4
drivers/pci/controller/dwc/pcie-designware-ep.c
··· 6 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 7 */ 8 8 9 + #include <linux/align.h> 9 10 #include <linux/bitfield.h> 10 11 #include <linux/of.h> 11 12 #include <linux/platform_device.h> ··· 483 482 reg = ep_func->msi_cap + PCI_MSI_DATA_32; 484 483 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 485 484 } 486 - aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); 487 - msg_addr = ((u64)msg_addr_upper) << 32 | 488 - (msg_addr_lower & ~aligned_offset); 485 + msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; 486 + 487 + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 488 + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 489 489 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 490 490 epc->mem->window.page_size); 491 491 if (ret) ··· 553 551 } 554 552 555 553 aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 556 - msg_addr &= ~aligned_offset; 554 + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 557 555 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 558 556 epc->mem->window.page_size); 559 557 if (ret)
+1 -1
drivers/pmdomain/core.c
··· 1109 1109 1110 1110 return 0; 1111 1111 } 1112 - late_initcall(genpd_power_off_unused); 1112 + late_initcall_sync(genpd_power_off_unused); 1113 1113 1114 1114 #ifdef CONFIG_PM_SLEEP 1115 1115
+7 -8
drivers/pmdomain/mediatek/mtk-pm-domains.c
··· 561 561 goto err_put_node; 562 562 } 563 563 564 + /* recursive call to add all subdomains */ 565 + ret = scpsys_add_subdomain(scpsys, child); 566 + if (ret) 567 + goto err_put_node; 568 + 564 569 ret = pm_genpd_add_subdomain(parent_pd, child_pd); 565 570 if (ret) { 566 571 dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n", ··· 575 570 dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name, 576 571 child_pd->name); 577 572 } 578 - 579 - /* recursive call to add all subdomains */ 580 - ret = scpsys_add_subdomain(scpsys, child); 581 - if (ret) 582 - goto err_put_node; 583 573 } 584 574 585 575 return 0; ··· 588 588 { 589 589 int ret; 590 590 591 - if (scpsys_domain_is_on(pd)) 592 - scpsys_power_off(&pd->genpd); 593 - 594 591 /* 595 592 * We're in the error cleanup already, so we only complain, 596 593 * but won't emit another error on top of the original one. ··· 597 600 dev_err(pd->scpsys->dev, 598 601 "failed to remove domain '%s' : %d - state may be inconsistent\n", 599 602 pd->genpd.name, ret); 603 + if (scpsys_domain_is_on(pd)) 604 + scpsys_power_off(&pd->genpd); 600 605 601 606 clk_bulk_put(pd->num_clks, pd->clks); 602 607 clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+2 -1
drivers/pmdomain/renesas/r8a77980-sysc.c
··· 25 25 PD_CPU_NOCR }, 26 26 { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU, 27 27 PD_CPU_NOCR }, 28 - { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON }, 28 + { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON, 29 + PD_CPU_NOCR }, 29 30 { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON }, 30 31 { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR }, 31 32 { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
+6 -6
drivers/scsi/lpfc/lpfc_scsi.c
··· 1918 1918 * 1919 1919 * Returns the number of SGEs added to the SGL. 1920 1920 **/ 1921 - static int 1921 + static uint32_t 1922 1922 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1923 1923 struct sli4_sge *sgl, int datasegcnt, 1924 1924 struct lpfc_io_buf *lpfc_cmd) ··· 1926 1926 struct scatterlist *sgde = NULL; /* s/g data entry */ 1927 1927 struct sli4_sge_diseed *diseed = NULL; 1928 1928 dma_addr_t physaddr; 1929 - int i = 0, num_sge = 0, status; 1930 - uint32_t reftag; 1929 + int i = 0, status; 1930 + uint32_t reftag, num_sge = 0; 1931 1931 uint8_t txop, rxop; 1932 1932 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1933 1933 uint32_t rc; ··· 2099 2099 * 2100 2100 * Returns the number of SGEs added to the SGL. 2101 2101 **/ 2102 - static int 2102 + static uint32_t 2103 2103 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2104 2104 struct sli4_sge *sgl, int datacnt, int protcnt, 2105 2105 struct lpfc_io_buf *lpfc_cmd) ··· 2123 2123 uint32_t rc; 2124 2124 #endif 2125 2125 uint32_t checking = 1; 2126 - uint32_t dma_offset = 0; 2127 - int num_sge = 0, j = 2; 2126 + uint32_t dma_offset = 0, num_sge = 0; 2127 + int j = 2; 2128 2128 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2129 2129 2130 2130 sgpe = scsi_prot_sglist(sc);
+2 -1
drivers/scsi/scsi_error.c
··· 282 282 { 283 283 struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); 284 284 struct Scsi_Host *shost = scmd->device->host; 285 + unsigned int busy = scsi_host_busy(shost); 285 286 unsigned long flags; 286 287 287 288 spin_lock_irqsave(shost->host_lock, flags); 288 289 shost->host_failed++; 289 - scsi_eh_wakeup(shost, scsi_host_busy(shost)); 290 + scsi_eh_wakeup(shost, busy); 290 291 spin_unlock_irqrestore(shost->host_lock, flags); 291 292 } 292 293
+3 -1
drivers/scsi/scsi_lib.c
··· 278 278 rcu_read_lock(); 279 279 __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); 280 280 if (unlikely(scsi_host_in_recovery(shost))) { 281 + unsigned int busy = scsi_host_busy(shost); 282 + 281 283 spin_lock_irqsave(shost->host_lock, flags); 282 284 if (shost->host_failed || shost->host_eh_scheduled) 283 - scsi_eh_wakeup(shost, scsi_host_busy(shost)); 285 + scsi_eh_wakeup(shost, busy); 284 286 spin_unlock_irqrestore(shost->host_lock, flags); 285 287 } 286 288 rcu_read_unlock();
+5 -4
drivers/spi/spi-imx.c
··· 2 2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 3 // Copyright (C) 2008 Juergen Beisert 4 4 5 + #include <linux/bits.h> 5 6 #include <linux/clk.h> 6 7 #include <linux/completion.h> 7 8 #include <linux/delay.h> ··· 661 660 << MX51_ECSPI_CTRL_BL_OFFSET; 662 661 else { 663 662 if (spi_imx->usedma) { 664 - ctrl |= (spi_imx->bits_per_word * 665 - spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1) 663 + ctrl |= (spi_imx->bits_per_word - 1) 666 664 << MX51_ECSPI_CTRL_BL_OFFSET; 667 665 } else { 668 666 if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) 669 - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1) 667 + ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) 670 668 << MX51_ECSPI_CTRL_BL_OFFSET; 671 669 else 672 - ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) 670 + ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, 671 + BITS_PER_BYTE) * spi_imx->bits_per_word 673 672 << MX51_ECSPI_CTRL_BL_OFFSET; 674 673 } 675 674 }
+1
drivers/spi/spi-intel-pci.c
··· 85 85 { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info }, 86 86 { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, 87 87 { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, 88 + { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, 88 89 { }, 89 90 }; 90 91 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+2 -1
drivers/spi/spi-mxs.c
··· 39 39 #include <linux/spi/spi.h> 40 40 #include <linux/spi/mxs-spi.h> 41 41 #include <trace/events/spi.h> 42 + #include <linux/dma/mxs-dma.h> 42 43 43 44 #define DRIVER_NAME "mxs-spi" 44 45 ··· 253 252 desc = dmaengine_prep_slave_sg(ssp->dmach, 254 253 &dma_xfer[sg_count].sg, 1, 255 254 (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 256 - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 255 + DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); 257 256 258 257 if (!desc) { 259 258 dev_err(ssp->dev,
+8 -129
drivers/spi/spi-omap2-mcspi.c
··· 53 53 54 54 /* per-register bitmasks: */ 55 55 #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17) 56 - #define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0) 57 - #define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2) 58 56 59 57 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) 60 58 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) ··· 291 293 } 292 294 293 295 static void omap2_mcspi_set_fifo(const struct spi_device *spi, 294 - struct spi_transfer *t, int enable, int dma_enabled) 296 + struct spi_transfer *t, int enable) 295 297 { 296 298 struct spi_controller *ctlr = spi->controller; 297 299 struct omap2_mcspi_cs *cs = spi->controller_state; ··· 312 314 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; 313 315 else 314 316 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; 315 - if (dma_enabled) 316 - wcnt = t->len / bytes_per_word; 317 - else 318 - wcnt = 0; 317 + 318 + wcnt = t->len / bytes_per_word; 319 319 if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) 320 320 goto disable_fifo; 321 321 322 322 xferlevel = wcnt << 16; 323 323 if (t->rx_buf != NULL) { 324 324 chconf |= OMAP2_MCSPI_CHCONF_FFER; 325 - if (dma_enabled) 326 - xferlevel |= (bytes_per_word - 1) << 8; 327 - else 328 - xferlevel |= (max_fifo_depth - 1) << 8; 325 + xferlevel |= (bytes_per_word - 1) << 8; 329 326 } 330 327 331 328 if (t->tx_buf != NULL) { 332 329 chconf |= OMAP2_MCSPI_CHCONF_FFET; 333 - if (dma_enabled) 334 - xferlevel |= bytes_per_word - 1; 335 - else 336 - xferlevel |= (max_fifo_depth - 1); 330 + xferlevel |= bytes_per_word - 1; 337 331 } 338 332 339 333 mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel); ··· 882 892 return count - c; 883 893 } 884 894 885 - static unsigned 886 - omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer) 887 - { 888 - struct omap2_mcspi_cs *cs = spi->controller_state; 889 - struct omap2_mcspi *mcspi; 890 - unsigned int count, c; 891 - unsigned int iter, cwc; 892 - int last_request; 893 - void __iomem *base = cs->base; 894 - void __iomem *tx_reg; 895 - void __iomem *rx_reg; 896 - void __iomem *chstat_reg; 897 - void __iomem *irqstat_reg; 898 - int word_len, bytes_per_word; 899 - u8 *rx; 900 - const u8 *tx; 901 - 902 - mcspi = spi_controller_get_devdata(spi->controller); 903 - count = xfer->len; 904 - c = count; 905 - word_len = cs->word_len; 906 - bytes_per_word = mcspi_bytes_per_word(word_len); 907 - 908 - /* 909 - * We store the pre-calculated register addresses on stack to speed 910 - * up the transfer loop. 911 - */ 912 - tx_reg = base + OMAP2_MCSPI_TX0; 913 - rx_reg = base + OMAP2_MCSPI_RX0; 914 - chstat_reg = base + OMAP2_MCSPI_CHSTAT0; 915 - irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS; 916 - 917 - if (c < (word_len >> 3)) 918 - return 0; 919 - 920 - rx = xfer->rx_buf; 921 - tx = xfer->tx_buf; 922 - 923 - do { 924 - /* calculate number of words in current iteration */ 925 - cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word, 926 - c / bytes_per_word); 927 - last_request = cwc != (mcspi->fifo_depth / bytes_per_word); 928 - if (tx) { 929 - if (mcspi_wait_for_reg_bit(irqstat_reg, 930 - OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) { 931 - dev_err(&spi->dev, "TX Empty timed out\n"); 932 - goto out; 933 - } 934 - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg); 935 - 936 - for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) { 937 - if (bytes_per_word == 1) 938 - writel_relaxed(*tx, tx_reg); 939 - else if (bytes_per_word == 2) 940 - writel_relaxed(*((u16 *)tx), tx_reg); 941 - else if (bytes_per_word == 4) 942 - writel_relaxed(*((u32 *)tx), tx_reg); 943 - } 944 - } 945 - 946 - if (rx) { 947 - if (!last_request && 948 - mcspi_wait_for_reg_bit(irqstat_reg, 949 - OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) { 950 - dev_err(&spi->dev, "RX_FULL timed out\n"); 951 - goto out; 952 - } 953 - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg); 954 - 955 - for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) { 956 - if (last_request && 957 - mcspi_wait_for_reg_bit(chstat_reg, 958 - OMAP2_MCSPI_CHSTAT_RXS) < 0) { 959 - dev_err(&spi->dev, "RXS timed out\n"); 960 - goto out; 961 - } 962 - if (bytes_per_word == 1) 963 - *rx = readl_relaxed(rx_reg); 964 - else if (bytes_per_word == 2) 965 - *((u16 *)rx) = readl_relaxed(rx_reg); 966 - else if (bytes_per_word == 4) 967 - *((u32 *)rx) = readl_relaxed(rx_reg); 968 - } 969 - } 970 - 971 - if (last_request) { 972 - if (mcspi_wait_for_reg_bit(chstat_reg, 973 - OMAP2_MCSPI_CHSTAT_EOT) < 0) { 974 - dev_err(&spi->dev, "EOT timed out\n"); 975 - goto out; 976 - } 977 - if (mcspi_wait_for_reg_bit(chstat_reg, 978 - OMAP2_MCSPI_CHSTAT_TXFFE) < 0) { 979 - dev_err(&spi->dev, "TXFFE timed out\n"); 980 - goto out; 981 - } 982 - omap2_mcspi_set_enable(spi, 0); 983 - } 984 - c -= cwc * bytes_per_word; 985 - } while (c >= bytes_per_word); 986 - 987 - out: 988 - omap2_mcspi_set_enable(spi, 1); 989 - return count - c; 990 - } 991 - 992 895 static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz) 993 896 { 994 897 u32 div; ··· 1206 1323 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1207 1324 ctlr->cur_msg_mapped && 1208 1325 ctlr->can_dma(ctlr, spi, t)) 1209 - omap2_mcspi_set_fifo(spi, t, 1, 1); 1210 - else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH) 1211 - omap2_mcspi_set_fifo(spi, t, 1, 0); 1326 + omap2_mcspi_set_fifo(spi, t, 1); 1212 1327 1213 1328 omap2_mcspi_set_enable(spi, 1); 1214 1329 ··· 1219 1338 ctlr->cur_msg_mapped && 1220 1339 ctlr->can_dma(ctlr, spi, t)) 1221 1340 count = omap2_mcspi_txrx_dma(spi, t); 1222 - else if (mcspi->fifo_depth > 0) 1223 - count = omap2_mcspi_txrx_piofifo(spi, t); 1224 1341 else 1225 1342 count = omap2_mcspi_txrx_pio(spi, t); 1226 1343 ··· 1231 1352 omap2_mcspi_set_enable(spi, 0); 1232 1353 1233 1354 if (mcspi->fifo_depth > 0) 1234 - omap2_mcspi_set_fifo(spi, t, 0, 0); 1355 + omap2_mcspi_set_fifo(spi, t, 0); 1235 1356 1236 1357 out: 1237 1358 /* Restore defaults if they were overriden */ ··· 1254 1375 omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH)); 1255 1376 1256 1377 if (mcspi->fifo_depth > 0 && t) 1257 - omap2_mcspi_set_fifo(spi, t, 0, 0); 1378 + omap2_mcspi_set_fifo(spi, t, 0); 1258 1379 1259 1380 return status; 1260 1381 }
+9 -12
drivers/spi/spi-ppc4xx.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/errno.h> 27 27 #include <linux/wait.h> 28 + #include <linux/platform_device.h> 28 29 #include <linux/of_address.h> 29 30 #include <linux/of_irq.h> 30 31 #include <linux/of_platform.h> 31 32 #include <linux/interrupt.h> 32 33 #include <linux/delay.h> 34 + #include <linux/platform_device.h> 33 35 34 36 #include <linux/spi/spi.h> 35 37 #include <linux/spi/spi_bitbang.h> ··· 168 166 int scr; 169 167 u8 cdm = 0; 170 168 u32 speed; 171 - u8 bits_per_word; 172 169 173 170 /* Start with the generic configuration for this device. */ 174 - bits_per_word = spi->bits_per_word; 175 171 speed = spi->max_speed_hz; 176 172 177 173 /* ··· 177 177 * the transfer to overwrite the generic configuration with zeros. 178 178 */ 179 179 if (t) { 180 - if (t->bits_per_word) 181 - bits_per_word = t->bits_per_word; 182 - 183 180 if (t->speed_hz) 184 181 speed = min(t->speed_hz, spi->max_speed_hz); 185 182 } ··· 359 362 360 363 /* Setup the state for the bitbang driver */ 361 364 bbp = &hw->bitbang; 362 - bbp->master = hw->host; 365 + bbp->ctlr = hw->host; 363 366 bbp->setup_transfer = spi_ppc4xx_setupxfer; 364 367 bbp->txrx_bufs = spi_ppc4xx_txrx; 365 368 bbp->use_dma = 0; 366 - bbp->master->setup = spi_ppc4xx_setup; 367 - bbp->master->cleanup = spi_ppc4xx_cleanup; 368 - bbp->master->bits_per_word_mask = SPI_BPW_MASK(8); 369 - bbp->master->use_gpio_descriptors = true; 369 + bbp->ctlr->setup = spi_ppc4xx_setup; 370 + bbp->ctlr->cleanup = spi_ppc4xx_cleanup; 371 + bbp->ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 372 + bbp->ctlr->use_gpio_descriptors = true; 370 373 /* 371 374 * The SPI core will count the number of GPIO descriptors to figure 372 375 * out the number of chip selects available on the platform. 373 376 */ 374 - bbp->master->num_chipselect = 0; 377 + bbp->ctlr->num_chipselect = 0; 375 378 376 379 /* the spi->mode bits understood by this driver: */ 377 - bbp->master->mode_bits = 380 + bbp->ctlr->mode_bits = 378 381 SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; 379 382 380 383 /* Get the clock for the OPB */
+3 -2
drivers/ufs/core/ufshcd.c
··· 3057 3057 */ 3058 3058 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) 3059 3059 { 3060 - u32 mask = 1U << task_tag; 3060 + u32 mask; 3061 3061 unsigned long flags; 3062 3062 int err; 3063 3063 ··· 3074 3074 } 3075 3075 return 0; 3076 3076 } 3077 + 3078 + mask = 1U << task_tag; 3077 3079 3078 3080 /* clear outstanding transaction before retry */ 3079 3081 spin_lock_irqsave(hba->host->host_lock, flags); ··· 6354 6352 ufshcd_hold(hba); 6355 6353 if (!ufshcd_is_clkgating_allowed(hba)) 6356 6354 ufshcd_setup_clocks(hba, true); 6357 - ufshcd_release(hba); 6358 6355 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; 6359 6356 ufshcd_vops_resume(hba, pm_op); 6360 6357 } else {
+6 -2
drivers/xen/events/events_base.c
··· 923 923 return; 924 924 925 925 do_mask(info, EVT_MASK_REASON_EXPLICIT); 926 - xen_evtchn_close(evtchn); 927 926 xen_irq_info_cleanup(info); 927 + xen_evtchn_close(evtchn); 928 928 } 929 929 930 930 static void enable_pirq(struct irq_data *data) ··· 956 956 static void __unbind_from_irq(struct irq_info *info, unsigned int irq) 957 957 { 958 958 evtchn_port_t evtchn; 959 + bool close_evtchn = false; 959 960 960 961 if (!info) { 961 962 xen_irq_free_desc(irq); ··· 976 975 struct xenbus_device *dev; 977 976 978 977 if (!info->is_static) 979 - xen_evtchn_close(evtchn); 978 + close_evtchn = true; 980 979 981 980 switch (info->type) { 982 981 case IRQT_VIRQ: ··· 996 995 } 997 996 998 997 xen_irq_info_cleanup(info); 998 + 999 + if (close_evtchn) 1000 + xen_evtchn_close(evtchn); 999 1001 } 1000 1002 1001 1003 xen_free_irq(info);
+1 -1
drivers/xen/gntalloc.c
··· 317 317 rc = -EFAULT; 318 318 goto out_free; 319 319 } 320 - if (copy_to_user(arg->gref_ids, gref_ids, 320 + if (copy_to_user(arg->gref_ids_flex, gref_ids, 321 321 sizeof(gref_ids[0]) * op.count)) { 322 322 rc = -EFAULT; 323 323 goto out_free;
+1 -1
drivers/xen/pcpu.c
··· 65 65 uint32_t flags; 66 66 }; 67 67 68 - static struct bus_type xen_pcpu_subsys = { 68 + static const struct bus_type xen_pcpu_subsys = { 69 69 .name = "xen_cpu", 70 70 .dev_name = "xen_cpu", 71 71 };
+5 -10
drivers/xen/privcmd.c
··· 1223 1223 kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0])); 1224 1224 mmap_write_unlock(mm); 1225 1225 1226 - size = sizeof(*ports) * kioreq->vcpus; 1227 - ports = kzalloc(size, GFP_KERNEL); 1228 - if (!ports) { 1229 - ret = -ENOMEM; 1226 + ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports), 1227 + kioreq->vcpus, sizeof(*ports)); 1228 + if (IS_ERR(ports)) { 1229 + ret = PTR_ERR(ports); 1230 1230 goto error_kfree; 1231 - } 1232 - 1233 - if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) { 1234 - ret = -EFAULT; 1235 - goto error_kfree_ports; 1236 1231 } 1237 1232 1238 1233 for (i = 0; i < kioreq->vcpus; i++) { ··· 1251 1256 error_unbind: 1252 1257 while (--i >= 0) 1253 1258 unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]); 1254 - error_kfree_ports: 1259 + 1255 1260 kfree(ports); 1256 1261 error_kfree: 1257 1262 kfree(kioreq);
+1 -1
drivers/xen/xen-balloon.c
··· 237 237 NULL 238 238 }; 239 239 240 - static struct bus_type balloon_subsys = { 240 + static const struct bus_type balloon_subsys = { 241 241 .name = BALLOON_CLASS_NAME, 242 242 .dev_name = BALLOON_CLASS_NAME, 243 243 };
+9 -6
drivers/xen/xenbus/xenbus_client.c
··· 116 116 * @dev: xenbus device 117 117 * @path: path to watch 118 118 * @watch: watch to register 119 + * @will_handle: events queuing determine callback 119 120 * @callback: callback to register 120 121 * 121 122 * Register a @watch on the given path, using the given xenbus_watch structure 122 - * for storage, and the given @callback function as the callback. On success, 123 - * the given @path will be saved as @watch->node, and remains the 124 - * caller's to free. On error, @watch->node will 125 - * be NULL, the device will switch to %XenbusStateClosing, and the error will 126 - * be saved in the store. 123 + * for storage, @will_handle function as the callback to determine if each 124 + * event need to be queued, and the given @callback function as the callback. 125 + * On success, the given @path will be saved as @watch->node, and remains the 126 + * caller's to free. On error, @watch->node will be NULL, the device will 127 + * switch to %XenbusStateClosing, and the error will be saved in the store. 127 128 * 128 129 * Returns: %0 on success or -errno on error 129 130 */ ··· 159 158 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path 160 159 * @dev: xenbus device 161 160 * @watch: watch to register 161 + * @will_handle: events queuing determine callback 162 162 * @callback: callback to register 163 163 * @pathfmt: format of path to watch 164 164 * 165 165 * Register a watch on the given @path, using the given xenbus_watch 166 - * structure for storage, and the given @callback function as the 166 + * structure for storage, @will_handle function as the callback to determine if 167 + * each event need to be queued, and the given @callback function as the 167 168 * callback. On success, the watched path (@path/@path2) will be saved 168 169 * as @watch->node, and becomes the caller's to kfree(). 169 170 * On error, watch->node will be NULL, so the caller has nothing to
+78 -2
fs/btrfs/block-group.c
··· 1455 1455 */ 1456 1456 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1457 1457 { 1458 + LIST_HEAD(retry_list); 1458 1459 struct btrfs_block_group *block_group; 1459 1460 struct btrfs_space_info *space_info; 1460 1461 struct btrfs_trans_handle *trans; ··· 1477 1476 1478 1477 spin_lock(&fs_info->unused_bgs_lock); 1479 1478 while (!list_empty(&fs_info->unused_bgs)) { 1479 + u64 used; 1480 1480 int trimming; 1481 1481 1482 1482 block_group = list_first_entry(&fs_info->unused_bgs, ··· 1513 1511 goto next; 1514 1512 } 1515 1513 1514 + spin_lock(&space_info->lock); 1516 1515 spin_lock(&block_group->lock); 1517 - if (block_group->reserved || block_group->pinned || 1518 - block_group->used || block_group->ro || 1516 + if (btrfs_is_block_group_used(block_group) || block_group->ro || 1519 1517 list_is_singular(&block_group->list)) { 1520 1518 /* 1521 1519 * We want to bail if we made new allocations or have ··· 1525 1523 */ 1526 1524 trace_btrfs_skip_unused_block_group(block_group); 1527 1525 spin_unlock(&block_group->lock); 1526 + spin_unlock(&space_info->lock); 1528 1527 up_write(&space_info->groups_sem); 1529 1528 goto next; 1530 1529 } 1530 + 1531 + /* 1532 + * The block group may be unused but there may be space reserved 1533 + * accounting with the existence of that block group, that is, 1534 + * space_info->bytes_may_use was incremented by a task but no 1535 + * space was yet allocated from the block group by the task. 1536 + * That space may or may not be allocated, as we are generally 1537 + * pessimistic about space reservation for metadata as well as 1538 + * for data when using compression (as we reserve space based on 1539 + * the worst case, when data can't be compressed, and before 1540 + * actually attempting compression, before starting writeback). 1541 + * 1542 + * So check if the total space of the space_info minus the size 1543 + * of this block group is less than the used space of the 1544 + * space_info - if that's the case, then it means we have tasks 1545 + * that might be relying on the block group in order to allocate 1546 + * extents, and add back the block group to the unused list when 1547 + * we finish, so that we retry later in case no tasks ended up 1548 + * needing to allocate extents from the block group. 1549 + */ 1550 + used = btrfs_space_info_used(space_info, true); 1551 + if (space_info->total_bytes - block_group->length < used) { 1552 + /* 1553 + * Add a reference for the list, compensate for the ref 1554 + * drop under the "next" label for the 1555 + * fs_info->unused_bgs list. 1556 + */ 1557 + btrfs_get_block_group(block_group); 1558 + list_add_tail(&block_group->bg_list, &retry_list); 1559 + 1560 + trace_btrfs_skip_unused_block_group(block_group); 1561 + spin_unlock(&block_group->lock); 1562 + spin_unlock(&space_info->lock); 1563 + up_write(&space_info->groups_sem); 1564 + goto next; 1565 + } 1566 + 1531 1567 spin_unlock(&block_group->lock); 1568 + spin_unlock(&space_info->lock); 1532 1569 1533 1570 /* We don't want to force the issue, only flip if it's ok. */ 1534 1571 ret = inc_block_group_ro(block_group, 0); ··· 1691 1650 btrfs_put_block_group(block_group); 1692 1651 spin_lock(&fs_info->unused_bgs_lock); 1693 1652 } 1653 + list_splice_tail(&retry_list, &fs_info->unused_bgs); 1694 1654 spin_unlock(&fs_info->unused_bgs_lock); 1695 1655 mutex_unlock(&fs_info->reclaim_bgs_lock); 1696 1656 return; 1697 1657 1698 1658 flip_async: 1699 1659 btrfs_end_transaction(trans); 1660 + spin_lock(&fs_info->unused_bgs_lock); 1661 + list_splice_tail(&retry_list, &fs_info->unused_bgs); 1662 + spin_unlock(&fs_info->unused_bgs_lock); 1700 1663 mutex_unlock(&fs_info->reclaim_bgs_lock); 1701 1664 btrfs_put_block_group(block_group); 1702 1665 btrfs_discard_punt_unused_bgs_list(fs_info); ··· 2729 2684 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2730 2685 list_del_init(&block_group->bg_list); 2731 2686 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2687 + 2688 + /* 2689 + * If the block group is still unused, add it to the list of 2690 + * unused block groups. The block group may have been created in 2691 + * order to satisfy a space reservation, in which case the 2692 + * extent allocation only happens later. But often we don't 2693 + * actually need to allocate space that we previously reserved, 2694 + * so the block group may become unused for a long time. For 2695 + * example for metadata we generally reserve space for a worst 2696 + * possible scenario, but then don't end up allocating all that 2697 + * space or none at all (due to no need to COW, extent buffers 2698 + * were already COWed in the current transaction and still 2699 + * unwritten, tree heights lower than the maximum possible 2700 + * height, etc). For data we generally reserve the axact amount 2701 + * of space we are going to allocate later, the exception is 2702 + * when using compression, as we must reserve space based on the 2703 + * uncompressed data size, because the compression is only done 2704 + * when writeback triggered and we don't know how much space we 2705 + * are actually going to need, so we reserve the uncompressed 2706 + * size because the data may be uncompressible in the worst case. 2707 + */ 2708 + if (ret == 0) { 2709 + bool used; 2710 + 2711 + spin_lock(&block_group->lock); 2712 + used = btrfs_is_block_group_used(block_group); 2713 + spin_unlock(&block_group->lock); 2714 + 2715 + if (!used) 2716 + btrfs_mark_bg_unused(block_group); 2717 + } 2732 2718 } 2733 2719 btrfs_trans_release_chunk_metadata(trans); 2734 2720 }
+7
fs/btrfs/block-group.h
··· 257 257 return (block_group->start + block_group->length); 258 258 } 259 259 260 + static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg) 261 + { 262 + lockdep_assert_held(&bg->lock); 263 + 264 + return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); 265 + } 266 + 260 267 static inline bool btrfs_is_block_group_data_only( 261 268 struct btrfs_block_group *block_group) 262 269 {
+19 -10
fs/btrfs/delalloc-space.c
··· 245 245 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 246 246 u64 reserve_size = 0; 247 247 u64 qgroup_rsv_size = 0; 248 - u64 csum_leaves; 249 248 unsigned outstanding_extents; 250 249 251 250 lockdep_assert_held(&inode->lock); ··· 259 260 outstanding_extents); 260 261 reserve_size += btrfs_calc_metadata_size(fs_info, 1); 261 262 } 262 - csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, 263 - inode->csum_bytes); 264 - reserve_size += btrfs_calc_insert_metadata_size(fs_info, 265 - csum_leaves); 263 + if (!(inode->flags & BTRFS_INODE_NODATASUM)) { 264 + u64 csum_leaves; 265 + 266 + csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); 267 + reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves); 268 + } 266 269 /* 267 270 * For qgroup rsv, the calculation is very simple: 268 271 * account one nodesize for each outstanding extent ··· 279 278 spin_unlock(&block_rsv->lock); 280 279 } 281 280 282 - static void calc_inode_reservations(struct btrfs_fs_info *fs_info, 281 + static void calc_inode_reservations(struct btrfs_inode *inode, 283 282 u64 num_bytes, u64 disk_num_bytes, 284 283 u64 *meta_reserve, u64 *qgroup_reserve) 285 284 { 285 + struct btrfs_fs_info *fs_info = inode->root->fs_info; 286 286 u64 nr_extents = count_max_extents(fs_info, num_bytes); 287 - u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes); 287 + u64 csum_leaves; 288 288 u64 inode_update = btrfs_calc_metadata_size(fs_info, 1); 289 + 290 + if (inode->flags & BTRFS_INODE_NODATASUM) 291 + csum_leaves = 0; 292 + else 293 + csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes); 289 294 290 295 *meta_reserve = btrfs_calc_insert_metadata_size(fs_info, 291 296 nr_extents + csum_leaves); ··· 344 337 * everything out and try again, which is bad. This way we just 345 338 * over-reserve slightly, and clean up the mess when we are done. 346 339 */ 347 - calc_inode_reservations(fs_info, num_bytes, disk_num_bytes, 340 + calc_inode_reservations(inode, num_bytes, disk_num_bytes, 348 341 &meta_reserve, &qgroup_reserve); 349 342 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true, 350 343 noflush); ··· 366 359 nr_extents = count_max_extents(fs_info, num_bytes); 367 360 spin_lock(&inode->lock); 368 361 btrfs_mod_outstanding_extents(inode, nr_extents); 369 - inode->csum_bytes += disk_num_bytes; 362 + if (!(inode->flags & BTRFS_INODE_NODATASUM)) 363 + inode->csum_bytes += disk_num_bytes; 370 364 btrfs_calculate_inode_block_rsv_size(fs_info, inode); 371 365 spin_unlock(&inode->lock); 372 366 ··· 401 393 402 394 num_bytes = ALIGN(num_bytes, fs_info->sectorsize); 403 395 spin_lock(&inode->lock); 404 - inode->csum_bytes -= num_bytes; 396 + if (!(inode->flags & BTRFS_INODE_NODATASUM)) 397 + inode->csum_bytes -= num_bytes; 405 398 btrfs_calculate_inode_block_rsv_size(fs_info, inode); 406 399 spin_unlock(&inode->lock); 407 400
+24 -2
fs/btrfs/inode.c
··· 3184 3184 unwritten_start += logical_len; 3185 3185 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3186 3186 3187 - /* Drop extent maps for the part of the extent we didn't write. */ 3188 - btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3187 + /* 3188 + * Drop extent maps for the part of the extent we didn't write. 3189 + * 3190 + * We have an exception here for the free_space_inode, this is 3191 + * because when we do btrfs_get_extent() on the free space inode 3192 + * we will search the commit root. If this is a new block group 3193 + * we won't find anything, and we will trip over the assert in 3194 + * writepage where we do ASSERT(em->block_start != 3195 + * EXTENT_MAP_HOLE). 3196 + * 3197 + * Theoretically we could also skip this for any NOCOW extent as 3198 + * we don't mess with the extent map tree in the NOCOW case, but 3199 + * for now simply skip this if we are the free space inode. 3200 + */ 3201 + if (!btrfs_is_free_space_inode(inode)) 3202 + btrfs_drop_extent_map_range(inode, unwritten_start, 3203 + end, false); 3189 3204 3190 3205 /* 3191 3206 * If the ordered extent had an IOERR or something else went ··· 10286 10271 return -EINVAL; 10287 10272 } 10288 10273 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10274 + return -EINVAL; 10275 + 10276 + /* 10277 + * Compressed extents should always have checksums, so error out if we 10278 + * have a NOCOW file or inode was created while mounted with NODATASUM. 10279 + */ 10280 + if (inode->flags & BTRFS_INODE_NODATASUM) 10289 10281 return -EINVAL; 10290 10282 10291 10283 orig_count = iov_iter_count(from);
+2 -36
fs/btrfs/transaction.c
··· 564 564 u64 num_bytes, 565 565 u64 *delayed_refs_bytes) 566 566 { 567 - struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 568 567 struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info; 569 - u64 extra_delayed_refs_bytes = 0; 570 - u64 bytes; 568 + u64 bytes = num_bytes + *delayed_refs_bytes; 571 569 int ret; 572 - 573 - /* 574 - * If there's a gap between the size of the delayed refs reserve and 575 - * its reserved space, than some tasks have added delayed refs or bumped 576 - * its size otherwise (due to block group creation or removal, or block 577 - * group item update). Also try to allocate that gap in order to prevent 578 - * using (and possibly abusing) the global reserve when committing the 579 - * transaction. 580 - */ 581 - if (flush == BTRFS_RESERVE_FLUSH_ALL && 582 - !btrfs_block_rsv_full(delayed_refs_rsv)) { 583 - spin_lock(&delayed_refs_rsv->lock); 584 - if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) 585 - extra_delayed_refs_bytes = delayed_refs_rsv->size - 586 - delayed_refs_rsv->reserved; 587 - spin_unlock(&delayed_refs_rsv->lock); 588 - } 589 - 590 - bytes = num_bytes + *delayed_refs_bytes + extra_delayed_refs_bytes; 591 570 592 571 /* 593 572 * We want to reserve all the bytes we may need all at once, so we only 594 573 * do 1 enospc flushing cycle per transaction start. 595 574 */ 596 575 ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); 597 - if (ret == 0) { 598 - if (extra_delayed_refs_bytes > 0) 599 - btrfs_migrate_to_delayed_refs_rsv(fs_info, 600 - extra_delayed_refs_bytes); 601 - return 0; 602 - } 603 - 604 - if (extra_delayed_refs_bytes > 0) { 605 - bytes -= extra_delayed_refs_bytes; 606 - ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); 607 - if (ret == 0) 608 - return 0; 609 - } 610 576 611 577 /* 612 578 * If we are an emergency flush, which can steal from the global block 613 579 * reserve, then attempt to not reserve space for the delayed refs, as 614 580 * we will consume space for them from the global block reserve. 615 581 */ 616 - if (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { 582 + if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { 617 583 bytes -= *delayed_refs_bytes; 618 584 *delayed_refs_bytes = 0; 619 585 ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+1
fs/btrfs/zoned.c
··· 1670 1670 } 1671 1671 bitmap_free(active); 1672 1672 kfree(zone_info); 1673 + btrfs_free_chunk_map(map); 1673 1674 1674 1675 return ret; 1675 1676 }
+2 -7
fs/ceph/caps.c
··· 1452 1452 if (flushing & CEPH_CAP_XATTR_EXCL) { 1453 1453 arg->old_xattr_buf = __ceph_build_xattrs_blob(ci); 1454 1454 arg->xattr_version = ci->i_xattrs.version; 1455 - arg->xattr_buf = ci->i_xattrs.blob; 1455 + arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob); 1456 1456 } else { 1457 1457 arg->xattr_buf = NULL; 1458 1458 arg->old_xattr_buf = NULL; ··· 1553 1553 encode_cap_msg(msg, arg); 1554 1554 ceph_con_send(&arg->session->s_con, msg); 1555 1555 ceph_buffer_put(arg->old_xattr_buf); 1556 + ceph_buffer_put(arg->xattr_buf); 1556 1557 if (arg->wake) 1557 1558 wake_up_all(&ci->i_cap_wq); 1558 1559 } ··· 3216 3215 3217 3216 enum put_cap_refs_mode { 3218 3217 PUT_CAP_REFS_SYNC = 0, 3219 - PUT_CAP_REFS_NO_CHECK, 3220 3218 PUT_CAP_REFS_ASYNC, 3221 3219 }; 3222 3220 ··· 3329 3329 void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had) 3330 3330 { 3331 3331 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC); 3332 - } 3333 - 3334 - void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) 3335 - { 3336 - __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK); 3337 3332 } 3338 3333 3339 3334 /*
+2
fs/ceph/inode.c
··· 78 78 if (!inode) 79 79 return ERR_PTR(-ENOMEM); 80 80 81 + inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT; 82 + 81 83 if (!S_ISLNK(*mode)) { 82 84 err = ceph_pre_init_acls(dir, mode, as_ctx); 83 85 if (err < 0)
+4 -5
fs/ceph/mds_client.c
··· 1089 1089 struct ceph_mds_request *req = container_of(kref, 1090 1090 struct ceph_mds_request, 1091 1091 r_kref); 1092 - ceph_mdsc_release_dir_caps_no_check(req); 1092 + ceph_mdsc_release_dir_caps_async(req); 1093 1093 destroy_reply_info(&req->r_reply_info); 1094 1094 if (req->r_request) 1095 1095 ceph_msg_put(req->r_request); ··· 4261 4261 } 4262 4262 } 4263 4263 4264 - void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req) 4264 + void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req) 4265 4265 { 4266 4266 struct ceph_client *cl = req->r_mdsc->fsc->client; 4267 4267 int dcaps; ··· 4269 4269 dcaps = xchg(&req->r_dir_caps, 0); 4270 4270 if (dcaps) { 4271 4271 doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); 4272 - ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent), 4273 - dcaps); 4272 + ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps); 4274 4273 } 4275 4274 } 4276 4275 ··· 4305 4306 if (req->r_session->s_mds != session->s_mds) 4306 4307 continue; 4307 4308 4308 - ceph_mdsc_release_dir_caps_no_check(req); 4309 + ceph_mdsc_release_dir_caps_async(req); 4309 4310 4310 4311 __send_request(session, req, true); 4311 4312 }
+1 -1
fs/ceph/mds_client.h
··· 552 552 struct inode *dir, 553 553 struct ceph_mds_request *req); 554 554 extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req); 555 - extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req); 555 + extern void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req); 556 556 static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) 557 557 { 558 558 kref_get(&req->r_kref);
-2
fs/ceph/super.h
··· 1255 1255 extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps); 1256 1256 extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had); 1257 1257 extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had); 1258 - extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, 1259 - int had); 1260 1258 extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 1261 1259 struct ceph_snap_context *snapc); 1262 1260 extern void __ceph_remove_capsnap(struct inode *inode,
+16 -3
fs/hugetlbfs/inode.c
··· 100 100 loff_t len, vma_len; 101 101 int ret; 102 102 struct hstate *h = hstate_file(file); 103 + vm_flags_t vm_flags; 103 104 104 105 /* 105 106 * vma address alignment (but not the pgoff alignment) has ··· 142 141 file_accessed(file); 143 142 144 143 ret = -ENOMEM; 144 + 145 + vm_flags = vma->vm_flags; 146 + /* 147 + * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip 148 + * reserving here. Note: only for SHM hugetlbfs file, the inode 149 + * flag S_PRIVATE is set. 150 + */ 151 + if (inode->i_flags & S_PRIVATE) 152 + vm_flags |= VM_NORESERVE; 153 + 145 154 if (!hugetlb_reserve_pages(inode, 146 155 vma->vm_pgoff >> huge_page_order(h), 147 156 len >> huge_page_shift(h), vma, 148 - vma->vm_flags)) 157 + vm_flags)) 149 158 goto out; 150 159 151 160 ret = 0; ··· 1365 1354 { 1366 1355 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1367 1356 struct fs_parse_result result; 1357 + struct hstate *h; 1368 1358 char *rest; 1369 1359 unsigned long ps; 1370 1360 int opt; ··· 1410 1398 1411 1399 case Opt_pagesize: 1412 1400 ps = memparse(param->string, &rest); 1413 - ctx->hstate = size_to_hstate(ps); 1414 - if (!ctx->hstate) { 1401 + h = size_to_hstate(ps); 1402 + if (!h) { 1415 1403 pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); 1416 1404 return -EINVAL; 1417 1405 } 1406 + ctx->hstate = h; 1418 1407 return 0; 1419 1408 1420 1409 case Opt_min_size:
+8 -3
fs/namespace.c
··· 4472 4472 /* 4473 4473 * If this is an attached mount make sure it's located in the callers 4474 4474 * mount namespace. If it's not don't let the caller interact with it. 4475 - * If this is a detached mount make sure it has an anonymous mount 4476 - * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE. 4475 + * 4476 + * If this mount doesn't have a parent it's most often simply a 4477 + * detached mount with an anonymous mount namespace. IOW, something 4478 + * that's simply not attached yet. But there are apparently also users 4479 + * that do change mount properties on the rootfs itself. That obviously 4480 + * neither has a parent nor is it a detached mount so we cannot 4481 + * unconditionally check for detached mounts. 4477 4482 */ 4478 - if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns))) 4483 + if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt)) 4479 4484 goto out; 4480 4485 4481 4486 /*
+7 -1
fs/nilfs2/file.c
··· 107 107 nilfs_transaction_commit(inode->i_sb); 108 108 109 109 mapped: 110 - folio_wait_stable(folio); 110 + /* 111 + * Since checksumming including data blocks is performed to determine 112 + * the validity of the log to be written and used for recovery, it is 113 + * necessary to wait for writeback to finish here, regardless of the 114 + * stable write requirement of the backing device. 115 + */ 116 + folio_wait_writeback(folio); 111 117 out: 112 118 sb_end_pagefault(inode->i_sb); 113 119 return vmf_fs_error(ret);
+4 -3
fs/nilfs2/recovery.c
··· 472 472 473 473 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, 474 474 struct nilfs_recovery_block *rb, 475 - struct page *page) 475 + loff_t pos, struct page *page) 476 476 { 477 477 struct buffer_head *bh_org; 478 + size_t from = pos & ~PAGE_MASK; 478 479 void *kaddr; 479 480 480 481 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); ··· 483 482 return -EIO; 484 483 485 484 kaddr = kmap_atomic(page); 486 - memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); 485 + memcpy(kaddr + from, bh_org->b_data, bh_org->b_size); 487 486 kunmap_atomic(kaddr); 488 487 brelse(bh_org); 489 488 return 0; ··· 522 521 goto failed_inode; 523 522 } 524 523 525 - err = nilfs_recovery_copy_block(nilfs, rb, page); 524 + err = nilfs_recovery_copy_block(nilfs, rb, pos, page); 526 525 if (unlikely(err)) 527 526 goto failed_page; 528 527
+5 -3
fs/nilfs2/segment.c
··· 1703 1703 1704 1704 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1705 1705 b_assoc_buffers) { 1706 - set_buffer_async_write(bh); 1707 1706 if (bh == segbuf->sb_super_root) { 1708 1707 if (bh->b_folio != bd_folio) { 1709 1708 folio_lock(bd_folio); ··· 1713 1714 } 1714 1715 break; 1715 1716 } 1717 + set_buffer_async_write(bh); 1716 1718 if (bh->b_folio != fs_folio) { 1717 1719 nilfs_begin_folio_io(fs_folio); 1718 1720 fs_folio = bh->b_folio; ··· 1800 1800 1801 1801 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1802 1802 b_assoc_buffers) { 1803 - clear_buffer_async_write(bh); 1804 1803 if (bh == segbuf->sb_super_root) { 1805 1804 clear_buffer_uptodate(bh); 1806 1805 if (bh->b_folio != bd_folio) { ··· 1808 1809 } 1809 1810 break; 1810 1811 } 1812 + clear_buffer_async_write(bh); 1811 1813 if (bh->b_folio != fs_folio) { 1812 1814 nilfs_end_folio_io(fs_folio, err); 1813 1815 fs_folio = bh->b_folio; ··· 1896 1896 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | 1897 1897 BIT(BH_NILFS_Redirected)); 1898 1898 1899 - set_mask_bits(&bh->b_state, clear_bits, set_bits); 1900 1899 if (bh == segbuf->sb_super_root) { 1900 + set_buffer_uptodate(bh); 1901 + clear_buffer_dirty(bh); 1901 1902 if (bh->b_folio != bd_folio) { 1902 1903 folio_end_writeback(bd_folio); 1903 1904 bd_folio = bh->b_folio; ··· 1906 1905 update_sr = true; 1907 1906 break; 1908 1907 } 1908 + set_mask_bits(&bh->b_state, clear_bits, set_bits); 1909 1909 if (bh->b_folio != fs_folio) { 1910 1910 nilfs_end_folio_io(fs_folio, 0); 1911 1911 fs_folio = bh->b_folio;
+28 -17
fs/ntfs3/attrib.c
··· 886 886 struct runs_tree *run = &ni->file.run; 887 887 struct ntfs_sb_info *sbi; 888 888 u8 cluster_bits; 889 - struct ATTRIB *attr = NULL, *attr_b; 889 + struct ATTRIB *attr, *attr_b; 890 890 struct ATTR_LIST_ENTRY *le, *le_b; 891 891 struct mft_inode *mi, *mi_b; 892 892 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen; ··· 904 904 *len = 0; 905 905 up_read(&ni->file.run_lock); 906 906 907 - if (*len) { 908 - if (*lcn != SPARSE_LCN || !new) 909 - return 0; /* Fast normal way without allocation. */ 910 - else if (clen > *len) 911 - clen = *len; 912 - } 907 + if (*len && (*lcn != SPARSE_LCN || !new)) 908 + return 0; /* Fast normal way without allocation. */ 913 909 914 910 /* No cluster in cache or we need to allocate cluster in hole. */ 915 911 sbi = ni->mi.sbi; ··· 913 917 914 918 ni_lock(ni); 915 919 down_write(&ni->file.run_lock); 920 + 921 + /* Repeat the code above (under write lock). */ 922 + if (!run_lookup_entry(run, vcn, lcn, len, NULL)) 923 + *len = 0; 924 + 925 + if (*len) { 926 + if (*lcn != SPARSE_LCN || !new) 927 + goto out; /* normal way without allocation. */ 928 + if (clen > *len) 929 + clen = *len; 930 + } 916 931 917 932 le_b = NULL; 918 933 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); ··· 1743 1736 le_b = NULL; 1744 1737 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 1745 1738 0, NULL, &mi_b); 1746 - if (!attr_b) 1747 - return -ENOENT; 1739 + if (!attr_b) { 1740 + err = -ENOENT; 1741 + goto out; 1742 + } 1748 1743 1749 1744 attr = attr_b; 1750 1745 le = le_b; ··· 1827 1818 ok: 1828 1819 run_truncate_around(run, vcn); 1829 1820 out: 1830 - if (new_valid > data_size) 1831 - new_valid = data_size; 1821 + if (attr_b) { 1822 + if (new_valid > data_size) 1823 + new_valid = data_size; 1832 1824 1833 - valid_size = le64_to_cpu(attr_b->nres.valid_size); 1834 - if (new_valid != valid_size) { 1835 - attr_b->nres.valid_size = cpu_to_le64(valid_size); 1836 - mi_b->dirty = true; 1825 + valid_size = le64_to_cpu(attr_b->nres.valid_size); 1826 + if (new_valid != valid_size) { 1827 + attr_b->nres.valid_size = cpu_to_le64(valid_size); 1828 + mi_b->dirty = true; 1829 + } 1837 1830 } 1838 1831 1839 1832 return err; ··· 2084 2073 2085 2074 /* Update inode size. */ 2086 2075 ni->i_valid = valid_size; 2087 - ni->vfs_inode.i_size = data_size; 2076 + i_size_write(&ni->vfs_inode, data_size); 2088 2077 inode_set_bytes(&ni->vfs_inode, total_size); 2089 2078 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2090 2079 mark_inode_dirty(&ni->vfs_inode); ··· 2499 2488 mi_b->dirty = true; 2500 2489 2501 2490 done: 2502 - ni->vfs_inode.i_size += bytes; 2491 + i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes); 2503 2492 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2504 2493 mark_inode_dirty(&ni->vfs_inode); 2505 2494
+6 -6
fs/ntfs3/attrlist.c
··· 29 29 void al_destroy(struct ntfs_inode *ni) 30 30 { 31 31 run_close(&ni->attr_list.run); 32 - kfree(ni->attr_list.le); 32 + kvfree(ni->attr_list.le); 33 33 ni->attr_list.le = NULL; 34 34 ni->attr_list.size = 0; 35 35 ni->attr_list.dirty = false; ··· 127 127 { 128 128 size_t off; 129 129 u16 sz; 130 + const unsigned le_min_size = le_size(0); 130 131 131 132 if (!le) { 132 133 le = ni->attr_list.le; 133 134 } else { 134 135 sz = le16_to_cpu(le->size); 135 - if (sz < sizeof(struct ATTR_LIST_ENTRY)) { 136 + if (sz < le_min_size) { 136 137 /* Impossible 'cause we should not return such le. */ 137 138 return NULL; 138 139 } ··· 142 141 143 142 /* Check boundary. */ 144 143 off = PtrOffset(ni->attr_list.le, le); 145 - if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) { 144 + if (off + le_min_size > ni->attr_list.size) { 146 145 /* The regular end of list. */ 147 146 return NULL; 148 147 } ··· 150 149 sz = le16_to_cpu(le->size); 151 150 152 151 /* Check le for errors. */ 153 - if (sz < sizeof(struct ATTR_LIST_ENTRY) || 154 - off + sz > ni->attr_list.size || 152 + if (sz < le_min_size || off + sz > ni->attr_list.size || 155 153 sz < le->name_off + le->name_len * sizeof(short)) { 156 154 return NULL; 157 155 } ··· 318 318 memcpy(ptr, al->le, off); 319 319 memcpy(Add2Ptr(ptr, off + sz), le, old_size - off); 320 320 le = Add2Ptr(ptr, off); 321 - kfree(al->le); 321 + kvfree(al->le); 322 322 al->le = ptr; 323 323 } else { 324 324 memmove(Add2Ptr(le, sz), le, old_size - off);
+2 -2
fs/ntfs3/bitmap.c
··· 124 124 { 125 125 struct rb_node *node, *next; 126 126 127 - kfree(wnd->free_bits); 127 + kvfree(wnd->free_bits); 128 128 wnd->free_bits = NULL; 129 129 run_close(&wnd->run); 130 130 ··· 1360 1360 memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short)); 1361 1361 memset(new_free + wnd->nwnd, 0, 1362 1362 (new_wnd - wnd->nwnd) * sizeof(short)); 1363 - kfree(wnd->free_bits); 1363 + kvfree(wnd->free_bits); 1364 1364 wnd->free_bits = new_free; 1365 1365 } 1366 1366
+35 -13
fs/ntfs3/dir.c
··· 309 309 return 0; 310 310 } 311 311 312 - /* NTFS: symlinks are "dir + reparse" or "file + reparse" */ 313 - if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) 314 - dt_type = DT_LNK; 315 - else 316 - dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG; 312 + /* 313 + * NTFS: symlinks are "dir + reparse" or "file + reparse" 314 + * Unfortunately reparse attribute is used for many purposes (several dozens). 315 + * It is not possible here to know is this name symlink or not. 316 + * To get exactly the type of name we should to open inode (read mft). 317 + * getattr for opened file (fstat) correctly returns symlink. 318 + */ 319 + dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG; 320 + 321 + /* 322 + * It is not reliable to detect the type of name using duplicated information 323 + * stored in parent directory. 324 + * The only correct way to get the type of name - read MFT record and find ATTR_STD. 325 + * The code below is not good idea. 326 + * It does additional locks/reads just to get the type of name. 327 + * Should we use additional mount option to enable branch below? 328 + */ 329 + if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) && 330 + ino != ni->mi.rno) { 331 + struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL); 332 + if (!IS_ERR_OR_NULL(inode)) { 333 + dt_type = fs_umode_to_dtype(inode->i_mode); 334 + iput(inode); 335 + } 336 + } 317 337 318 338 return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); 319 339 } ··· 515 495 struct INDEX_HDR *hdr; 516 496 const struct ATTR_FILE_NAME *fname; 517 497 u32 e_size, off, end; 518 - u64 vbo = 0; 519 498 size_t drs = 0, fles = 0, bit = 0; 520 - loff_t i_size = ni->vfs_inode.i_size; 521 499 struct indx_node *node = NULL; 522 - u8 index_bits = ni->dir.index_bits; 500 + size_t max_indx = i_size_read(&ni->vfs_inode) >> ni->dir.index_bits; 523 501 524 502 if (is_empty) 525 503 *is_empty = true; ··· 536 518 e = Add2Ptr(hdr, off); 537 519 e_size = le16_to_cpu(e->size); 538 520 if (e_size < sizeof(struct NTFS_DE) || 539 - off + e_size > end) 521 + off + e_size > end) { 522 + /* Looks like corruption. */ 540 523 break; 524 + } 541 525 542 526 if (de_is_last(e)) 543 527 break; ··· 563 543 fles += 1; 564 544 } 565 545 566 - if (vbo >= i_size) 546 + if (bit >= max_indx) 567 547 goto out; 568 548 569 549 err = indx_used_bit(&ni->dir, ni, &bit); ··· 573 553 if (bit == MINUS_ONE_T) 574 554 goto out; 575 555 576 - vbo = (u64)bit << index_bits; 577 - if (vbo >= i_size) 556 + if (bit >= max_indx) 578 557 goto out; 579 558 580 559 err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits, ··· 583 564 584 565 hdr = &node->index->ihdr; 585 566 bit += 1; 586 - vbo = (u64)bit << ni->dir.idx2vbn_bits; 587 567 } 588 568 589 569 out: ··· 611 593 .iterate_shared = ntfs_readdir, 612 594 .fsync = generic_file_fsync, 613 595 .open = ntfs_file_open, 596 + .unlocked_ioctl = ntfs_ioctl, 597 + #ifdef CONFIG_COMPAT 598 + .compat_ioctl = ntfs_compat_ioctl, 599 + #endif 614 600 }; 615 601 // clang-format on
+59 -17
fs/ntfs3/file.c
··· 48 48 return 0; 49 49 } 50 50 51 - static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 51 + long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 52 52 { 53 53 struct inode *inode = file_inode(filp); 54 54 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; ··· 61 61 } 62 62 63 63 #ifdef CONFIG_COMPAT 64 - static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 64 + long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 65 65 66 66 { 67 67 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); ··· 188 188 u32 bh_next, bh_off, to; 189 189 sector_t iblock; 190 190 struct folio *folio; 191 + bool dirty = false; 191 192 192 193 for (; idx < idx_end; idx += 1, from = 0) { 193 194 page_off = (loff_t)idx << PAGE_SHIFT; ··· 224 223 /* Ok, it's mapped. Make sure it's up-to-date. */ 225 224 if (folio_test_uptodate(folio)) 226 225 set_buffer_uptodate(bh); 227 - 228 - if (!buffer_uptodate(bh)) { 229 - err = bh_read(bh, 0); 230 - if (err < 0) { 231 - folio_unlock(folio); 232 - folio_put(folio); 233 - goto out; 234 - } 226 + else if (bh_read(bh, 0) < 0) { 227 + err = -EIO; 228 + folio_unlock(folio); 229 + folio_put(folio); 230 + goto out; 235 231 } 236 232 237 233 mark_buffer_dirty(bh); 238 - 239 234 } while (bh_off = bh_next, iblock += 1, 240 235 head != (bh = bh->b_this_page)); 241 236 242 237 folio_zero_segment(folio, from, to); 238 + dirty = true; 243 239 244 240 folio_unlock(folio); 245 241 folio_put(folio); 246 242 cond_resched(); 247 243 } 248 244 out: 249 - mark_inode_dirty(inode); 245 + if (dirty) 246 + mark_inode_dirty(inode); 250 247 return err; 251 248 } 252 249 ··· 259 260 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 260 261 bool rw = vma->vm_flags & VM_WRITE; 261 262 int err; 263 + 264 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 265 + return -EIO; 262 266 263 267 if (is_encrypted(ni)) { 264 268 ntfs_inode_warn(inode, "mmap encrypted not supported"); ··· 501 499 ni_lock(ni); 502 500 err = attr_punch_hole(ni, vbo, len, &frame_size); 503 501 ni_unlock(ni); 502 + if (!err) 503 + goto ok; 504 + 504 505 if (err != E_NTFS_NOTALIGNED) 505 506 goto out; 506 507 507 508 /* Process not aligned punch. */ 509 + err = 0; 508 510 mask = frame_size - 1; 509 511 vbo_a = (vbo + mask) & ~mask; 510 512 end_a = end & ~mask; ··· 531 525 ni_lock(ni); 532 526 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 533 527 ni_unlock(ni); 528 + if (err) 529 + goto out; 534 530 } 535 531 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 536 532 /* ··· 572 564 ni_lock(ni); 573 565 err = attr_insert_range(ni, vbo, len); 574 566 ni_unlock(ni); 567 + if (err) 568 + goto out; 575 569 } else { 576 570 /* Check new size. */ 577 571 u8 cluster_bits = sbi->cluster_bits; ··· 643 633 &ni->file.run, i_size, &ni->i_valid, 644 634 true, NULL); 645 635 ni_unlock(ni); 636 + if (err) 637 + goto out; 646 638 } else if (new_size > i_size) { 647 - inode->i_size = new_size; 639 + i_size_write(inode, new_size); 648 640 } 649 641 } 642 + 643 + ok: 644 + err = file_modified(file); 645 + if (err) 646 + goto out; 650 647 651 648 out: 652 649 if (map_locked) ··· 680 663 umode_t mode = inode->i_mode; 681 664 int err; 682 665 666 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 667 + return -EIO; 668 + 683 669 err = setattr_prepare(idmap, dentry, attr); 684 670 if (err) 685 671 goto out; ··· 696 676 goto out; 697 677 } 698 678 inode_dio_wait(inode); 699 - oldsize = inode->i_size; 679 + oldsize = i_size_read(inode); 700 680 newsize = attr->ia_size; 701 681 702 682 if (newsize <= oldsize) ··· 708 688 goto out; 709 689 710 690 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 711 - inode->i_size = newsize; 691 + i_size_write(inode, newsize); 712 692 } 713 693 714 694 setattr_copy(idmap, inode, attr); ··· 737 717 struct file *file = iocb->ki_filp; 738 718 struct inode *inode = file->f_mapping->host; 739 719 struct ntfs_inode *ni = ntfs_i(inode); 720 + 721 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 722 + return -EIO; 740 723 741 724 if (is_encrypted(ni)) { 742 725 ntfs_inode_warn(inode, "encrypted i/o not supported"); ··· 774 751 { 775 752 struct inode *inode = in->f_mapping->host; 776 753 struct ntfs_inode *ni = ntfs_i(inode); 754 + 755 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 756 + return -EIO; 777 757 778 758 if (is_encrypted(ni)) { 779 759 ntfs_inode_warn(inode, "encrypted i/o not supported"); ··· 847 821 size_t count = iov_iter_count(from); 848 822 loff_t pos = iocb->ki_pos; 849 823 struct inode *inode = file_inode(file); 850 - loff_t i_size = inode->i_size; 824 + loff_t i_size = i_size_read(inode); 851 825 struct address_space *mapping = inode->i_mapping; 852 826 struct ntfs_inode *ni = ntfs_i(inode); 853 827 u64 valid = ni->i_valid; ··· 1054 1028 iocb->ki_pos += written; 1055 1029 if (iocb->ki_pos > ni->i_valid) 1056 1030 ni->i_valid = iocb->ki_pos; 1031 + if (iocb->ki_pos > i_size) 1032 + i_size_write(inode, iocb->ki_pos); 1057 1033 1058 1034 return written; 1059 1035 } ··· 1069 1041 struct address_space *mapping = file->f_mapping; 1070 1042 struct inode *inode = mapping->host; 1071 1043 ssize_t ret; 1044 + int err; 1072 1045 struct ntfs_inode *ni = ntfs_i(inode); 1046 + 1047 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1048 + return -EIO; 1073 1049 1074 1050 if (is_encrypted(ni)) { 1075 1051 ntfs_inode_warn(inode, "encrypted i/o not supported"); ··· 1099 1067 ret = generic_write_checks(iocb, from); 1100 1068 if (ret <= 0) 1101 1069 goto out; 1070 + 1071 + err = file_modified(iocb->ki_filp); 1072 + if (err) { 1073 + ret = err; 1074 + goto out; 1075 + } 1102 1076 1103 1077 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1104 1078 /* Should never be here, see ntfs_file_open(). */ ··· 1134 1096 int ntfs_file_open(struct inode *inode, struct file *file) 1135 1097 { 1136 1098 struct ntfs_inode *ni = ntfs_i(inode); 1099 + 1100 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1101 + return -EIO; 1137 1102 1138 1103 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1139 1104 (file->f_flags & O_DIRECT))) { ··· 1179 1138 down_write(&ni->file.run_lock); 1180 1139 1181 1140 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1182 - inode->i_size, &ni->i_valid, false, NULL); 1141 + i_size_read(inode), &ni->i_valid, false, 1142 + NULL); 1183 1143 1184 1144 up_write(&ni->file.run_lock); 1185 1145 ni_unlock(ni);
+11 -8
fs/ntfs3/frecord.c
··· 778 778 run_deallocate(sbi, &ni->attr_list.run, true); 779 779 run_close(&ni->attr_list.run); 780 780 ni->attr_list.size = 0; 781 - kfree(ni->attr_list.le); 781 + kvfree(ni->attr_list.le); 782 782 ni->attr_list.le = NULL; 783 783 ni->attr_list.dirty = false; 784 784 ··· 927 927 return 0; 928 928 929 929 out: 930 - kfree(ni->attr_list.le); 930 + kvfree(ni->attr_list.le); 931 931 ni->attr_list.le = NULL; 932 932 ni->attr_list.size = 0; 933 933 return err; ··· 2099 2099 gfp_t gfp_mask; 2100 2100 struct page *pg; 2101 2101 2102 - if (vbo >= ni->vfs_inode.i_size) { 2102 + if (vbo >= i_size_read(&ni->vfs_inode)) { 2103 2103 SetPageUptodate(page); 2104 2104 err = 0; 2105 2105 goto out; ··· 2173 2173 { 2174 2174 struct ntfs_sb_info *sbi = ni->mi.sbi; 2175 2175 struct inode *inode = &ni->vfs_inode; 2176 - loff_t i_size = inode->i_size; 2176 + loff_t i_size = i_size_read(inode); 2177 2177 struct address_space *mapping = inode->i_mapping; 2178 2178 gfp_t gfp_mask = mapping_gfp_mask(mapping); 2179 2179 struct page **pages = NULL; ··· 2457 2457 struct ATTR_LIST_ENTRY *le = NULL; 2458 2458 struct runs_tree *run = &ni->file.run; 2459 2459 u64 valid_size = ni->i_valid; 2460 + loff_t i_size = i_size_read(&ni->vfs_inode); 2460 2461 u64 vbo_disk; 2461 2462 size_t unc_size; 2462 2463 u32 frame_size, i, npages_disk, ondisk_size; ··· 2549 2548 } 2550 2549 } 2551 2550 2552 - frames = (ni->vfs_inode.i_size - 1) >> frame_bits; 2551 + frames = (i_size - 1) >> frame_bits; 2553 2552 2554 2553 err = attr_wof_frame_info(ni, attr, run, frame64, frames, 2555 2554 frame_bits, &ondisk_size, &vbo_data); ··· 2557 2556 goto out2; 2558 2557 2559 2558 if (frame64 == frames) { 2560 - unc_size = 1 + ((ni->vfs_inode.i_size - 1) & 2561 - (frame_size - 1)); 2559 + unc_size = 1 + ((i_size - 1) & (frame_size - 1)); 2562 2560 ondisk_size = attr_size(attr) - vbo_data; 2563 2561 } else { 2564 2562 unc_size = frame_size; ··· 3259 3259 if (is_bad_inode(inode) || sb_rdonly(sb)) 3260 3260 return 0; 3261 3261 3262 + if (unlikely(ntfs3_forced_shutdown(sb))) 3263 + return -EIO; 3264 + 3262 3265 if (!ni_trylock(ni)) { 3263 3266 /* 'ni' is under modification, skip for now. */ 3264 3267 mark_inode_dirty_sync(inode); ··· 3291 3288 modified = true; 3292 3289 } 3293 3290 3294 - ts = inode_get_mtime(inode); 3291 + ts = inode_get_ctime(inode); 3295 3292 dup.c_time = kernel2nt(&ts); 3296 3293 if (std->c_time != dup.c_time) { 3297 3294 std->c_time = dup.c_time;
+104 -124
fs/ntfs3/fslog.c
··· 465 465 { 466 466 const struct RESTART_AREA *ra; 467 467 u16 cl, fl, ul; 468 - u32 off, l_size, file_dat_bits, file_size_round; 468 + u32 off, l_size, seq_bits; 469 469 u16 ro = le16_to_cpu(rhdr->ra_off); 470 470 u32 sys_page = le32_to_cpu(rhdr->sys_page_size); 471 471 ··· 511 511 /* Make sure the sequence number bits match the log file size. */ 512 512 l_size = le64_to_cpu(ra->l_size); 513 513 514 - file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits); 515 - file_size_round = 1u << (file_dat_bits + 3); 516 - if (file_size_round != l_size && 517 - (file_size_round < l_size || (file_size_round / 2) > l_size)) { 518 - return false; 514 + seq_bits = sizeof(u64) * 8 + 3; 515 + while (l_size) { 516 + l_size >>= 1; 517 + seq_bits -= 1; 519 518 } 519 + 520 + if (seq_bits != ra->seq_num_bits) 521 + return false; 520 522 521 523 /* The log page data offset and record header length must be quad-aligned. */ 522 524 if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) || ··· 976 974 return e; 977 975 } 978 976 977 + struct restart_info { 978 + u64 last_lsn; 979 + struct RESTART_HDR *r_page; 980 + u32 vbo; 981 + bool chkdsk_was_run; 982 + bool valid_page; 983 + bool initialized; 984 + bool restart; 985 + }; 986 + 979 987 #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001) 980 988 981 989 #define NTFSLOG_WRAPPED 0x00000001 ··· 999 987 struct ntfs_inode *ni; 1000 988 1001 989 u32 l_size; 990 + u32 orig_file_size; 1002 991 u32 sys_page_size; 1003 992 u32 sys_page_mask; 1004 993 u32 page_size; ··· 1053 1040 1054 1041 struct CLIENT_ID client_id; 1055 1042 u32 client_undo_commit; 1043 + 1044 + struct restart_info rst_info, rst_info2; 1056 1045 }; 1057 1046 1058 1047 static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn) ··· 1120 1105 lsn <= le64_to_cpu(log->ra->current_lsn) && lsn; 1121 1106 } 1122 1107 1123 - struct restart_info { 1124 - u64 last_lsn; 1125 - struct RESTART_HDR *r_page; 1126 - u32 vbo; 1127 - bool chkdsk_was_run; 1128 - bool valid_page; 1129 - bool initialized; 1130 - bool restart; 1131 - }; 1132 - 1133 1108 static int read_log_page(struct ntfs_log *log, u32 vbo, 1134 1109 struct RECORD_PAGE_HDR **buffer, bool *usa_error) 1135 1110 { ··· 1181 1176 * restart page header. It will stop the first time we find a 1182 1177 * valid page header. 1183 1178 */ 1184 - static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, 1179 + static int log_read_rst(struct ntfs_log *log, bool first, 1185 1180 struct restart_info *info) 1186 1181 { 1187 1182 u32 skip, vbo; ··· 1197 1192 } 1198 1193 1199 1194 /* Loop continuously until we succeed. */ 1200 - for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) { 1195 + for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) { 1201 1196 bool usa_error; 1202 1197 bool brst, bchk; 1203 1198 struct RESTART_AREA *ra; ··· 1290 1285 /* 1291 1286 * Ilog_init_pg_hdr - Init @log from restart page header. 1292 1287 */ 1293 - static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size, 1294 - u32 page_size, u16 major_ver, u16 minor_ver) 1288 + static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver) 1295 1289 { 1296 - log->sys_page_size = sys_page_size; 1297 - log->sys_page_mask = sys_page_size - 1; 1298 - log->page_size = page_size; 1299 - log->page_mask = page_size - 1; 1300 - log->page_bits = blksize_bits(page_size); 1290 + log->sys_page_size = log->page_size; 1291 + log->sys_page_mask = log->page_mask; 1301 1292 1302 1293 log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits; 1303 1294 if (!log->clst_per_page) 1304 1295 log->clst_per_page = 1; 1305 1296 1306 - log->first_page = major_ver >= 2 ? 1307 - 0x22 * page_size : 1308 - ((sys_page_size << 1) + (page_size << 1)); 1297 + log->first_page = major_ver >= 2 ? 0x22 * log->page_size : 1298 + 4 * log->page_size; 1309 1299 log->major_ver = major_ver; 1310 1300 log->minor_ver = minor_ver; 1311 1301 } ··· 1308 1308 /* 1309 1309 * log_create - Init @log in cases when we don't have a restart area to use. 1310 1310 */ 1311 - static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn, 1311 + static void log_create(struct ntfs_log *log, const u64 last_lsn, 1312 1312 u32 open_log_count, bool wrapped, bool use_multi_page) 1313 1313 { 1314 - log->l_size = l_size; 1315 1314 /* All file offsets must be quadword aligned. */ 1316 - log->file_data_bits = blksize_bits(l_size) - 3; 1315 + log->file_data_bits = blksize_bits(log->l_size) - 3; 1317 1316 log->seq_num_mask = (8 << log->file_data_bits) - 1; 1318 1317 log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits; 1319 1318 log->seq_num = (last_lsn >> log->file_data_bits) + 2; ··· 3719 3720 struct ntfs_sb_info *sbi = ni->mi.sbi; 3720 3721 struct ntfs_log *log; 3721 3722 3722 - struct restart_info rst_info, rst_info2; 3723 - u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0; 3723 + u64 rec_lsn, checkpt_lsn = 0, rlsn = 0; 3724 3724 struct ATTR_NAME_ENTRY *attr_names = NULL; 3725 - struct ATTR_NAME_ENTRY *ane; 3726 3725 struct RESTART_TABLE *dptbl = NULL; 3727 3726 struct RESTART_TABLE *trtbl = NULL; 3728 3727 const struct RESTART_TABLE *rt; ··· 3738 3741 struct TRANSACTION_ENTRY *tr; 3739 3742 struct DIR_PAGE_ENTRY *dp; 3740 3743 u32 i, bytes_per_attr_entry; 3741 - u32 l_size = ni->vfs_inode.i_size; 3742 - u32 orig_file_size = l_size; 3743 - u32 page_size, vbo, tail, off, dlen; 3744 + u32 vbo, tail, off, dlen; 3744 3745 u32 saved_len, rec_len, transact_id; 3745 3746 bool use_second_page; 3746 3747 struct RESTART_AREA *ra2, *ra = NULL; ··· 3753 3758 u16 t16; 3754 3759 u32 t32; 3755 3760 3756 - /* Get the size of page. NOTE: To replay we can use default page. */ 3757 - #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2 3758 - page_size = norm_file_page(PAGE_SIZE, &l_size, true); 3759 - #else 3760 - page_size = norm_file_page(PAGE_SIZE, &l_size, false); 3761 - #endif 3762 - if (!page_size) 3763 - return -EINVAL; 3764 - 3765 3761 log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS); 3766 3762 if (!log) 3767 3763 return -ENOMEM; 3768 3764 3769 3765 log->ni = ni; 3770 - log->l_size = l_size; 3771 - log->one_page_buf = kmalloc(page_size, GFP_NOFS); 3766 + log->l_size = log->orig_file_size = ni->vfs_inode.i_size; 3772 3767 3768 + /* Get the size of page. NOTE: To replay we can use default page. */ 3769 + #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2 3770 + log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true); 3771 + #else 3772 + log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false); 3773 + #endif 3774 + if (!log->page_size) { 3775 + err = -EINVAL; 3776 + goto out; 3777 + } 3778 + 3779 + log->one_page_buf = kmalloc(log->page_size, GFP_NOFS); 3773 3780 if (!log->one_page_buf) { 3774 3781 err = -ENOMEM; 3775 3782 goto out; 3776 3783 } 3777 3784 3778 - log->page_size = page_size; 3779 - log->page_mask = page_size - 1; 3780 - log->page_bits = blksize_bits(page_size); 3785 + log->page_mask = log->page_size - 1; 3786 + log->page_bits = blksize_bits(log->page_size); 3781 3787 3782 3788 /* Look for a restart area on the disk. */ 3783 - memset(&rst_info, 0, sizeof(struct restart_info)); 3784 - err = log_read_rst(log, l_size, true, &rst_info); 3789 + err = log_read_rst(log, true, &log->rst_info); 3785 3790 if (err) 3786 3791 goto out; 3787 3792 3788 3793 /* remember 'initialized' */ 3789 - *initialized = rst_info.initialized; 3794 + *initialized = log->rst_info.initialized; 3790 3795 3791 - if (!rst_info.restart) { 3792 - if (rst_info.initialized) { 3796 + if (!log->rst_info.restart) { 3797 + if (log->rst_info.initialized) { 3793 3798 /* No restart area but the file is not initialized. */ 3794 3799 err = -EINVAL; 3795 3800 goto out; 3796 3801 } 3797 3802 3798 - log_init_pg_hdr(log, page_size, page_size, 1, 1); 3799 - log_create(log, l_size, 0, get_random_u32(), false, false); 3800 - 3801 - log->ra = ra; 3803 + log_init_pg_hdr(log, 1, 1); 3804 + log_create(log, 0, get_random_u32(), false, false); 3802 3805 3803 3806 ra = log_create_ra(log); 3804 3807 if (!ra) { ··· 3813 3820 * If the restart offset above wasn't zero then we won't 3814 3821 * look for a second restart. 3815 3822 */ 3816 - if (rst_info.vbo) 3823 + if (log->rst_info.vbo) 3817 3824 goto check_restart_area; 3818 3825 3819 - memset(&rst_info2, 0, sizeof(struct restart_info)); 3820 - err = log_read_rst(log, l_size, false, &rst_info2); 3826 + err = log_read_rst(log, false, &log->rst_info2); 3821 3827 if (err) 3822 3828 goto out; 3823 3829 3824 3830 /* Determine which restart area to use. */ 3825 - if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn) 3831 + if (!log->rst_info2.restart || 3832 + log->rst_info2.last_lsn <= log->rst_info.last_lsn) 3826 3833 goto use_first_page; 3827 3834 3828 3835 use_second_page = true; 3829 3836 3830 - if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) { 3837 + if (log->rst_info.chkdsk_was_run && 3838 + log->page_size != log->rst_info.vbo) { 3831 3839 struct RECORD_PAGE_HDR *sp = NULL; 3832 3840 bool usa_error; 3833 3841 3834 - if (!read_log_page(log, page_size, &sp, &usa_error) && 3842 + if (!read_log_page(log, log->page_size, &sp, &usa_error) && 3835 3843 sp->rhdr.sign == NTFS_CHKD_SIGNATURE) { 3836 3844 use_second_page = false; 3837 3845 } ··· 3840 3846 } 3841 3847 3842 3848 if (use_second_page) { 3843 - kfree(rst_info.r_page); 3844 - memcpy(&rst_info, &rst_info2, sizeof(struct restart_info)); 3845 - rst_info2.r_page = NULL; 3849 + kfree(log->rst_info.r_page); 3850 + memcpy(&log->rst_info, &log->rst_info2, 3851 + sizeof(struct restart_info)); 3852 + log->rst_info2.r_page = NULL; 3846 3853 } 3847 3854 3848 3855 use_first_page: 3849 - kfree(rst_info2.r_page); 3856 + kfree(log->rst_info2.r_page); 3850 3857 3851 3858 check_restart_area: 3852 3859 /* 3853 3860 * If the restart area is at offset 0, we want 3854 3861 * to write the second restart area first. 3855 3862 */ 3856 - log->init_ra = !!rst_info.vbo; 3863 + log->init_ra = !!log->rst_info.vbo; 3857 3864 3858 3865 /* If we have a valid page then grab a pointer to the restart area. */ 3859 - ra2 = rst_info.valid_page ? 3860 - Add2Ptr(rst_info.r_page, 3861 - le16_to_cpu(rst_info.r_page->ra_off)) : 3866 + ra2 = log->rst_info.valid_page ? 3867 + Add2Ptr(log->rst_info.r_page, 3868 + le16_to_cpu(log->rst_info.r_page->ra_off)) : 3862 3869 NULL; 3863 3870 3864 - if (rst_info.chkdsk_was_run || 3871 + if (log->rst_info.chkdsk_was_run || 3865 3872 (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) { 3866 3873 bool wrapped = false; 3867 3874 bool use_multi_page = false; 3868 3875 u32 open_log_count; 3869 3876 3870 3877 /* Do some checks based on whether we have a valid log page. */ 3871 - if (!rst_info.valid_page) { 3872 - open_log_count = get_random_u32(); 3873 - goto init_log_instance; 3874 - } 3875 - open_log_count = le32_to_cpu(ra2->open_log_count); 3878 + open_log_count = log->rst_info.valid_page ? 3879 + le32_to_cpu(ra2->open_log_count) : 3880 + get_random_u32(); 3876 3881 3877 - /* 3878 - * If the restart page size isn't changing then we want to 3879 - * check how much work we need to do. 3880 - */ 3881 - if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size)) 3882 - goto init_log_instance; 3882 + log_init_pg_hdr(log, 1, 1); 3883 3883 3884 - init_log_instance: 3885 - log_init_pg_hdr(log, page_size, page_size, 1, 1); 3886 - 3887 - log_create(log, l_size, rst_info.last_lsn, open_log_count, 3888 - wrapped, use_multi_page); 3884 + log_create(log, log->rst_info.last_lsn, open_log_count, wrapped, 3885 + use_multi_page); 3889 3886 3890 3887 ra = log_create_ra(log); 3891 3888 if (!ra) { ··· 3901 3916 * use the log file. We must use the system page size instead of the 3902 3917 * default size if there is not a clean shutdown. 3903 3918 */ 3904 - t32 = le32_to_cpu(rst_info.r_page->sys_page_size); 3905 - if (page_size != t32) { 3906 - l_size = orig_file_size; 3907 - page_size = 3908 - norm_file_page(t32, &l_size, t32 == DefaultLogPageSize); 3919 + t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size); 3920 + if (log->page_size != t32) { 3921 + log->l_size = log->orig_file_size; 3922 + log->page_size = norm_file_page(t32, &log->l_size, 3923 + t32 == DefaultLogPageSize); 3909 3924 } 3910 3925 3911 - if (page_size != t32 || 3912 - page_size != le32_to_cpu(rst_info.r_page->page_size)) { 3926 + if (log->page_size != t32 || 3927 + log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) { 3913 3928 err = -EINVAL; 3914 3929 goto out; 3915 3930 } 3916 3931 3917 3932 /* If the file size has shrunk then we won't mount it. */ 3918 - if (l_size < le64_to_cpu(ra2->l_size)) { 3933 + if (log->l_size < le64_to_cpu(ra2->l_size)) { 3919 3934 err = -EINVAL; 3920 3935 goto out; 3921 3936 } 3922 3937 3923 - log_init_pg_hdr(log, page_size, page_size, 3924 - le16_to_cpu(rst_info.r_page->major_ver), 3925 - le16_to_cpu(rst_info.r_page->minor_ver)); 3938 + log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver), 3939 + le16_to_cpu(log->rst_info.r_page->minor_ver)); 3926 3940 3927 3941 log->l_size = le64_to_cpu(ra2->l_size); 3928 3942 log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits); ··· 3929 3945 log->seq_num_mask = (8 << log->file_data_bits) - 1; 3930 3946 log->last_lsn = le64_to_cpu(ra2->current_lsn); 3931 3947 log->seq_num = log->last_lsn >> log->file_data_bits; 3932 - log->ra_off = le16_to_cpu(rst_info.r_page->ra_off); 3948 + log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off); 3933 3949 log->restart_size = log->sys_page_size - log->ra_off; 3934 3950 log->record_header_len = le16_to_cpu(ra2->rec_hdr_len); 3935 3951 log->ra_size = le16_to_cpu(ra2->ra_len); ··· 4029 4045 log->current_avail = current_log_avail(log); 4030 4046 4031 4047 /* Remember which restart area to write first. */ 4032 - log->init_ra = rst_info.vbo; 4048 + log->init_ra = log->rst_info.vbo; 4033 4049 4034 4050 process_log: 4035 4051 /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */ ··· 4089 4105 log->client_id.seq_num = cr->seq_num; 4090 4106 log->client_id.client_idx = client; 4091 4107 4092 - err = read_rst_area(log, &rst, &ra_lsn); 4108 + err = read_rst_area(log, &rst, &checkpt_lsn); 4093 4109 if (err) 4094 4110 goto out; 4095 4111 ··· 4098 4114 4099 4115 bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28; 4100 4116 4101 - checkpt_lsn = le64_to_cpu(rst->check_point_start); 4102 - if (!checkpt_lsn) 4103 - checkpt_lsn = ra_lsn; 4117 + if (rst->check_point_start) 4118 + checkpt_lsn = le64_to_cpu(rst->check_point_start); 4104 4119 4105 4120 /* Allocate and Read the Transaction Table. */ 4106 4121 if (!rst->transact_table_len) ··· 4313 4330 lcb = NULL; 4314 4331 4315 4332 check_attribute_names2: 4316 - if (!rst->attr_names_len) 4317 - goto trace_attribute_table; 4318 - 4319 - ane = attr_names; 4320 - if (!oatbl) 4321 - goto trace_attribute_table; 4322 - while (ane->off) { 4323 - /* TODO: Clear table on exit! */ 4324 - oe = Add2Ptr(oatbl, le16_to_cpu(ane->off)); 4325 - t16 = le16_to_cpu(ane->name_bytes); 4326 - oe->name_len = t16 / sizeof(short); 4327 - oe->ptr = ane->name; 4328 - oe->is_attr_name = 2; 4329 - ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16); 4333 + if (rst->attr_names_len && oatbl) { 4334 + struct ATTR_NAME_ENTRY *ane = attr_names; 4335 + while (ane->off) { 4336 + /* TODO: Clear table on exit! */ 4337 + oe = Add2Ptr(oatbl, le16_to_cpu(ane->off)); 4338 + t16 = le16_to_cpu(ane->name_bytes); 4339 + oe->name_len = t16 / sizeof(short); 4340 + oe->ptr = ane->name; 4341 + oe->is_attr_name = 2; 4342 + ane = Add2Ptr(ane, 4343 + sizeof(struct ATTR_NAME_ENTRY) + t16); 4344 + } 4330 4345 } 4331 4346 4332 - trace_attribute_table: 4333 4347 /* 4334 4348 * If the checkpt_lsn is zero, then this is a freshly 4335 4349 * formatted disk and we have no work to do. ··· 5169 5189 kfree(oatbl); 5170 5190 kfree(dptbl); 5171 5191 kfree(attr_names); 5172 - kfree(rst_info.r_page); 5192 + kfree(log->rst_info.r_page); 5173 5193 5174 5194 kfree(ra); 5175 5195 kfree(log->one_page_buf);
+27 -2
fs/ntfs3/fsntfs.c
··· 853 853 /* 854 854 * sb can be NULL here. In this case sbi->flags should be 0 too. 855 855 */ 856 - if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR)) 856 + if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) || 857 + unlikely(ntfs3_forced_shutdown(sb))) 857 858 return; 858 859 859 860 blocksize = sb->s_blocksize; ··· 1005 1004 while (bytes--) 1006 1005 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++); 1007 1006 return cpu_to_le32(hash); 1007 + } 1008 + 1009 + /* 1010 + * simple wrapper for sb_bread_unmovable. 1011 + */ 1012 + struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block) 1013 + { 1014 + struct ntfs_sb_info *sbi = sb->s_fs_info; 1015 + struct buffer_head *bh; 1016 + 1017 + if (unlikely(block >= sbi->volume.blocks)) { 1018 + /* prevent generic message "attempt to access beyond end of device" */ 1019 + ntfs_err(sb, "try to read out of volume at offset 0x%llx", 1020 + (u64)block << sb->s_blocksize_bits); 1021 + return NULL; 1022 + } 1023 + 1024 + bh = sb_bread_unmovable(sb, block); 1025 + if (bh) 1026 + return bh; 1027 + 1028 + ntfs_err(sb, "failed to read volume at offset 0x%llx", 1029 + (u64)block << sb->s_blocksize_bits); 1030 + return NULL; 1008 1031 } 1009 1032 1010 1033 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer) ··· 2153 2128 if (le32_to_cpu(d_security->size) == new_sec_size && 2154 2129 d_security->key.hash == hash_key.hash && 2155 2130 !memcmp(d_security + 1, sd, size_sd)) { 2156 - *security_id = d_security->key.sec_id; 2157 2131 /* Such security already exists. */ 2132 + *security_id = d_security->key.sec_id; 2158 2133 err = 0; 2159 2134 goto out; 2160 2135 }
+4 -4
fs/ntfs3/index.c
··· 1462 1462 goto out2; 1463 1463 1464 1464 if (in->name == I30_NAME) { 1465 - ni->vfs_inode.i_size = data_size; 1465 + i_size_write(&ni->vfs_inode, data_size); 1466 1466 inode_set_bytes(&ni->vfs_inode, alloc_size); 1467 1467 } 1468 1468 ··· 1544 1544 } 1545 1545 1546 1546 if (in->name == I30_NAME) 1547 - ni->vfs_inode.i_size = data_size; 1547 + i_size_write(&ni->vfs_inode, data_size); 1548 1548 1549 1549 *vbn = bit << indx->idx2vbn_bits; 1550 1550 ··· 2090 2090 return err; 2091 2091 2092 2092 if (in->name == I30_NAME) 2093 - ni->vfs_inode.i_size = new_data; 2093 + i_size_write(&ni->vfs_inode, new_data); 2094 2094 2095 2095 bpb = bitmap_size(bit); 2096 2096 if (bpb * 8 == nbits) ··· 2576 2576 err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len, 2577 2577 &indx->alloc_run, 0, NULL, false, NULL); 2578 2578 if (in->name == I30_NAME) 2579 - ni->vfs_inode.i_size = 0; 2579 + i_size_write(&ni->vfs_inode, 0); 2580 2580 2581 2581 err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len, 2582 2582 false, NULL);
+24 -8
fs/ntfs3/inode.c
··· 345 345 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer 346 346 .PrintNameLength) / 347 347 sizeof(u16); 348 - 349 348 ni->i_valid = inode->i_size; 350 - 351 349 /* Clear directory bit. */ 352 350 if (ni->ni_flags & NI_FLAG_DIR) { 353 351 indx_clear(&ni->dir); ··· 410 412 goto out; 411 413 412 414 if (!is_match && name) { 413 - /* Reuse rec as buffer for ascii name. */ 414 415 err = -ENOENT; 415 416 goto out; 416 417 } ··· 424 427 425 428 if (names != le16_to_cpu(rec->hard_links)) { 426 429 /* Correct minor error on the fly. Do not mark inode as dirty. */ 430 + ntfs_inode_warn(inode, "Correct links count -> %u.", names); 427 431 rec->hard_links = cpu_to_le16(names); 428 432 ni->mi.dirty = true; 429 433 } ··· 651 653 off = vbo & (PAGE_SIZE - 1); 652 654 folio_set_bh(bh, folio, off); 653 655 654 - err = bh_read(bh, 0); 655 - if (err < 0) 656 + if (bh_read(bh, 0) < 0) { 657 + err = -EIO; 656 658 goto out; 659 + } 657 660 folio_zero_segment(folio, off + voff, off + block_size); 658 661 } 659 662 } ··· 852 853 struct writeback_control *wbc, void *data) 853 854 { 854 855 struct address_space *mapping = data; 855 - struct ntfs_inode *ni = ntfs_i(mapping->host); 856 + struct inode *inode = mapping->host; 857 + struct ntfs_inode *ni = ntfs_i(inode); 856 858 int ret; 859 + 860 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 861 + return -EIO; 857 862 858 863 ni_lock(ni); 859 864 ret = attr_data_write_resident(ni, &folio->page); ··· 872 869 static int ntfs_writepages(struct address_space *mapping, 873 870 struct writeback_control *wbc) 874 871 { 875 - if (is_resident(ntfs_i(mapping->host))) 872 + struct inode *inode = mapping->host; 873 + 874 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 875 + return -EIO; 876 + 877 + if (is_resident(ntfs_i(inode))) 876 878 return write_cache_pages(mapping, wbc, ntfs_resident_writepage, 877 879 mapping); 878 880 return mpage_writepages(mapping, wbc, ntfs_get_block); ··· 896 888 int err; 897 889 struct inode *inode = mapping->host; 898 890 struct ntfs_inode *ni = ntfs_i(inode); 891 + 892 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 893 + return -EIO; 899 894 900 895 *pagep = NULL; 901 896 if (is_resident(ni)) { ··· 985 974 } 986 975 987 976 if (pos + err > inode->i_size) { 988 - inode->i_size = pos + err; 977 + i_size_write(inode, pos + err); 989 978 dirty = true; 990 979 } 991 980 ··· 1315 1304 if (!new_de) { 1316 1305 err = -ENOMEM; 1317 1306 goto out1; 1307 + } 1308 + 1309 + if (unlikely(ntfs3_forced_shutdown(sb))) { 1310 + err = -EIO; 1311 + goto out2; 1318 1312 } 1319 1313 1320 1314 /* Mark rw ntfs as dirty. it will be cleared at umount. */
+12
fs/ntfs3/namei.c
··· 181 181 struct ntfs_inode *ni = ntfs_i(dir); 182 182 int err; 183 183 184 + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) 185 + return -EIO; 186 + 184 187 ni_lock_dir(ni); 185 188 186 189 err = ntfs_unlink_inode(dir, dentry); ··· 201 198 { 202 199 u32 size = strlen(symname); 203 200 struct inode *inode; 201 + 202 + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) 203 + return -EIO; 204 204 205 205 inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0, 206 206 symname, size, NULL); ··· 232 226 { 233 227 struct ntfs_inode *ni = ntfs_i(dir); 234 228 int err; 229 + 230 + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) 231 + return -EIO; 235 232 236 233 ni_lock_dir(ni); 237 234 ··· 272 263 static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + sizeof(struct NTFS_DE) < 273 264 1024); 274 265 static_assert(PATH_MAX >= 4 * 1024); 266 + 267 + if (unlikely(ntfs3_forced_shutdown(sb))) 268 + return -EIO; 275 269 276 270 if (flags & ~RENAME_NOREPLACE) 277 271 return -EINVAL;
+1 -3
fs/ntfs3/ntfs.h
··· 523 523 __le64 vcn; // 0x08: Starting VCN of this attribute. 524 524 struct MFT_REF ref; // 0x10: MFT record number with attribute. 525 525 __le16 id; // 0x18: struct ATTRIB ID. 526 - __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset. 526 + __le16 name[]; // 0x1A: To get real name use name_off. 527 527 528 528 }; // sizeof(0x20) 529 - 530 - static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20); 531 529 532 530 static inline u32 le_size(u8 name_len) 533 531 {
+13 -16
fs/ntfs3/ntfs_fs.h
··· 61 61 62 62 /* sbi->flags */ 63 63 #define NTFS_FLAGS_NODISCARD 0x00000001 64 + /* ntfs in shutdown state. */ 65 + #define NTFS_FLAGS_SHUTDOWN_BIT 0x00000002 /* == 4*/ 64 66 /* Set when LogFile is replaying. */ 65 67 #define NTFS_FLAGS_LOG_REPLAYING 0x00000008 66 68 /* Set when we changed first MFT's which copy must be updated in $MftMirr. */ ··· 228 226 u64 maxbytes; // Maximum size for normal files. 229 227 u64 maxbytes_sparse; // Maximum size for sparse file. 230 228 231 - u32 flags; // See NTFS_FLAGS_XXX. 229 + unsigned long flags; // See NTFS_FLAGS_ 232 230 233 231 CLST zone_max; // Maximum MFT zone length in clusters 234 232 CLST bad_clusters; // The count of marked bad clusters. ··· 475 473 int al_update(struct ntfs_inode *ni, int sync); 476 474 static inline size_t al_aligned(size_t size) 477 475 { 478 - return (size + 1023) & ~(size_t)1023; 476 + return size_add(size, 1023) & ~(size_t)1023; 479 477 } 480 478 481 479 /* Globals from bitfunc.c */ ··· 502 500 int ntfs_file_open(struct inode *inode, struct file *file); 503 501 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 504 502 __u64 start, __u64 len); 503 + long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg); 504 + long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg); 505 505 extern const struct inode_operations ntfs_special_inode_operations; 506 506 extern const struct inode_operations ntfs_file_inode_operations; 507 507 extern const struct file_operations ntfs_file_operations; ··· 588 584 int log_replay(struct ntfs_inode *ni, bool *initialized); 589 585 590 586 /* Globals from fsntfs.c */ 587 + struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block); 591 588 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes); 592 589 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, 593 590 bool simple); ··· 877 872 878 873 int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry); 879 874 ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 880 - extern const struct xattr_handler * const ntfs_xattr_handlers[]; 875 + extern const struct xattr_handler *const ntfs_xattr_handlers[]; 881 876 882 877 int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size); 883 878 void ntfs_get_wsl_perm(struct inode *inode); ··· 1004 999 return sb->s_fs_info; 1005 1000 } 1006 1001 1002 + static inline int ntfs3_forced_shutdown(struct super_block *sb) 1003 + { 1004 + return test_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags); 1005 + } 1006 + 1007 1007 /* 1008 1008 * ntfs_up_cluster - Align up on cluster boundary. 1009 1009 */ ··· 1033 1023 static inline u64 bytes_to_block(const struct super_block *sb, u64 size) 1034 1024 { 1035 1025 return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 1036 - } 1037 - 1038 - static inline struct buffer_head *ntfs_bread(struct super_block *sb, 1039 - sector_t block) 1040 - { 1041 - struct buffer_head *bh = sb_bread(sb, block); 1042 - 1043 - if (bh) 1044 - return bh; 1045 - 1046 - ntfs_err(sb, "failed to read volume at offset 0x%llx", 1047 - (u64)block << sb->s_blocksize_bits); 1048 - return NULL; 1049 1026 } 1050 1027 1051 1028 static inline struct ntfs_inode *ntfs_i(struct inode *inode)
+15 -3
fs/ntfs3/record.c
··· 279 279 if (t16 > asize) 280 280 return NULL; 281 281 282 - if (t16 + le32_to_cpu(attr->res.data_size) > asize) 282 + if (le32_to_cpu(attr->res.data_size) > asize - t16) 283 283 return NULL; 284 284 285 285 t32 = sizeof(short) * attr->name_len; ··· 535 535 return false; 536 536 537 537 if (ni && is_attr_indexed(attr)) { 538 - le16_add_cpu(&ni->mi.mrec->hard_links, -1); 539 - ni->mi.dirty = true; 538 + u16 links = le16_to_cpu(ni->mi.mrec->hard_links); 539 + struct ATTR_FILE_NAME *fname = 540 + attr->type != ATTR_NAME ? 541 + NULL : 542 + resident_data_ex(attr, 543 + SIZEOF_ATTRIBUTE_FILENAME); 544 + if (fname && fname->type == FILE_NAME_DOS) { 545 + /* Do not decrease links count deleting DOS name. */ 546 + } else if (!links) { 547 + /* minor error. Not critical. */ 548 + } else { 549 + ni->mi.mrec->hard_links = cpu_to_le16(links - 1); 550 + ni->mi.dirty = true; 551 + } 540 552 } 541 553 542 554 used -= asize;
+32 -22
fs/ntfs3/super.c
··· 122 122 123 123 if (name) { 124 124 struct dentry *de = d_find_alias(inode); 125 - const u32 name_len = ARRAY_SIZE(s_name_buf) - 1; 126 125 127 126 if (de) { 128 127 spin_lock(&de->d_lock); 129 - snprintf(name, name_len, " \"%s\"", de->d_name.name); 128 + snprintf(name, sizeof(s_name_buf), " \"%s\"", 129 + de->d_name.name); 130 130 spin_unlock(&de->d_lock); 131 - name[name_len] = 0; /* To be sure. */ 132 131 } else { 133 132 name[0] = 0; 134 133 } ··· 624 625 { 625 626 kfree(sbi->new_rec); 626 627 kvfree(ntfs_put_shared(sbi->upcase)); 627 - kfree(sbi->def_table); 628 + kvfree(sbi->def_table); 628 629 kfree(sbi->compress.lznt); 629 630 #ifdef CONFIG_NTFS3_LZX_XPRESS 630 631 xpress_free_decompressor(sbi->compress.xpress); ··· 714 715 } 715 716 716 717 /* 718 + * ntfs_shutdown - super_operations::shutdown 719 + */ 720 + static void ntfs_shutdown(struct super_block *sb) 721 + { 722 + set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags); 723 + } 724 + 725 + /* 717 726 * ntfs_sync_fs - super_operations::sync_fs 718 727 */ 719 728 static int ntfs_sync_fs(struct super_block *sb, int wait) ··· 730 723 struct ntfs_sb_info *sbi = sb->s_fs_info; 731 724 struct ntfs_inode *ni; 732 725 struct inode *inode; 726 + 727 + if (unlikely(ntfs3_forced_shutdown(sb))) 728 + return -EIO; 733 729 734 730 ni = sbi->security.ni; 735 731 if (ni) { ··· 773 763 .put_super = ntfs_put_super, 774 764 .statfs = ntfs_statfs, 775 765 .show_options = ntfs_show_options, 766 + .shutdown = ntfs_shutdown, 776 767 .sync_fs = ntfs_sync_fs, 777 768 .write_inode = ntfs3_write_inode, 778 769 }; ··· 877 866 u16 fn, ao; 878 867 u8 cluster_bits; 879 868 u32 boot_off = 0; 869 + sector_t boot_block = 0; 880 870 const char *hint = "Primary boot"; 881 871 882 872 /* Save original dev_size. Used with alternative boot. */ ··· 885 873 886 874 sbi->volume.blocks = dev_size >> PAGE_SHIFT; 887 875 888 - bh = ntfs_bread(sb, 0); 876 + read_boot: 877 + bh = ntfs_bread(sb, boot_block); 889 878 if (!bh) 890 - return -EIO; 879 + return boot_block ? -EINVAL : -EIO; 891 880 892 - check_boot: 893 881 err = -EINVAL; 894 882 895 883 /* Corrupted image; do not read OOB */ ··· 1120 1108 } 1121 1109 1122 1110 out: 1123 - if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) { 1111 + brelse(bh); 1112 + 1113 + if (err == -EINVAL && !boot_block && dev_size0 > PAGE_SHIFT) { 1124 1114 u32 block_size = min_t(u32, sector_size, PAGE_SIZE); 1125 1115 u64 lbo = dev_size0 - sizeof(*boot); 1126 1116 1127 - /* 1128 - * Try alternative boot (last sector) 1129 - */ 1130 - brelse(bh); 1131 - 1132 - sb_set_blocksize(sb, block_size); 1133 - bh = ntfs_bread(sb, lbo >> blksize_bits(block_size)); 1134 - if (!bh) 1135 - return -EINVAL; 1136 - 1117 + boot_block = lbo >> blksize_bits(block_size); 1137 1118 boot_off = lbo & (block_size - 1); 1138 - hint = "Alternative boot"; 1139 - dev_size = dev_size0; /* restore original size. */ 1140 - goto check_boot; 1119 + if (boot_block && block_size >= boot_off + sizeof(*boot)) { 1120 + /* 1121 + * Try alternative boot (last sector) 1122 + */ 1123 + sb_set_blocksize(sb, block_size); 1124 + hint = "Alternative boot"; 1125 + dev_size = dev_size0; /* restore original size. */ 1126 + goto read_boot; 1127 + } 1141 1128 } 1142 - brelse(bh); 1143 1129 1144 1130 return err; 1145 1131 }
+6
fs/ntfs3/xattr.c
··· 219 219 if (!ea->name_len) 220 220 break; 221 221 222 + if (ea->name_len > ea_size) 223 + break; 224 + 222 225 if (buffer) { 223 226 /* Check if we can use field ea->name */ 224 227 if (off + ea_size > size) ··· 746 743 { 747 744 int err; 748 745 struct ntfs_inode *ni = ntfs_i(inode); 746 + 747 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 748 + return -EIO; 749 749 750 750 /* Dispatch request. */ 751 751 if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
+6 -8
fs/overlayfs/copy_up.c
··· 265 265 if (IS_ERR(old_file)) 266 266 return PTR_ERR(old_file); 267 267 268 + /* Try to use clone_file_range to clone up within the same fs */ 269 + cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0); 270 + if (cloned == len) 271 + goto out_fput; 272 + 273 + /* Couldn't clone, so now we try to copy the data */ 268 274 error = rw_verify_area(READ, old_file, &old_pos, len); 269 275 if (!error) 270 276 error = rw_verify_area(WRITE, new_file, &new_pos, len); 271 277 if (error) 272 278 goto out_fput; 273 - 274 - /* Try to use clone_file_range to clone up within the same fs */ 275 - ovl_start_write(dentry); 276 - cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0); 277 - ovl_end_write(dentry); 278 - if (cloned == len) 279 - goto out_fput; 280 - /* Couldn't clone, so now we try to copy the data */ 281 279 282 280 /* Check if lower fs supports seek operation */ 283 281 if (old_file->f_mode & FMODE_LSEEK)
+37 -29
fs/proc/array.c
··· 477 477 int permitted; 478 478 struct mm_struct *mm; 479 479 unsigned long long start_time; 480 - unsigned long cmin_flt = 0, cmaj_flt = 0; 481 - unsigned long min_flt = 0, maj_flt = 0; 482 - u64 cutime, cstime, utime, stime; 483 - u64 cgtime, gtime; 480 + unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt; 481 + u64 cutime, cstime, cgtime, utime, stime, gtime; 484 482 unsigned long rsslim = 0; 485 483 unsigned long flags; 486 484 int exit_code = task->exit_code; 485 + struct signal_struct *sig = task->signal; 486 + unsigned int seq = 1; 487 487 488 488 state = *get_task_state(task); 489 489 vsize = eip = esp = 0; ··· 511 511 512 512 sigemptyset(&sigign); 513 513 sigemptyset(&sigcatch); 514 - cutime = cstime = utime = stime = 0; 515 - cgtime = gtime = 0; 516 514 517 515 if (lock_task_sighand(task, &flags)) { 518 - struct signal_struct *sig = task->signal; 519 - 520 516 if (sig->tty) { 521 517 struct pid *pgrp = tty_get_pgrp(sig->tty); 522 518 tty_pgrp = pid_nr_ns(pgrp, ns); ··· 523 527 num_threads = get_nr_threads(task); 524 528 collect_sigign_sigcatch(task, &sigign, &sigcatch); 525 529 526 - cmin_flt = sig->cmin_flt; 527 - cmaj_flt = sig->cmaj_flt; 528 - cutime = sig->cutime; 529 - cstime = sig->cstime; 530 - cgtime = sig->cgtime; 531 530 rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); 532 531 533 - /* add up live thread stats at the group level */ 534 532 if (whole) { 535 - struct task_struct *t; 536 - 537 - __for_each_thread(sig, t) { 538 - min_flt += t->min_flt; 539 - maj_flt += t->maj_flt; 540 - gtime += task_gtime(t); 541 - } 542 - 543 - min_flt += sig->min_flt; 544 - maj_flt += sig->maj_flt; 545 - thread_group_cputime_adjusted(task, &utime, &stime); 546 - gtime += sig->gtime; 547 - 548 533 if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) 549 534 exit_code = sig->group_exit_code; 550 535 } ··· 539 562 540 563 if (permitted && (!whole || num_threads < 2)) 541 564 wchan = !task_is_running(task); 542 - if (!whole) { 565 + 566 + do { 567 + seq++; /* 2 on the 1st/lockless path, otherwise odd */ 568 + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); 569 + 570 + cmin_flt = sig->cmin_flt; 571 + cmaj_flt = sig->cmaj_flt; 572 + cutime = sig->cutime; 573 + cstime = sig->cstime; 574 + cgtime = sig->cgtime; 575 + 576 + if (whole) { 577 + struct task_struct *t; 578 + 579 + min_flt = sig->min_flt; 580 + maj_flt = sig->maj_flt; 581 + gtime = sig->gtime; 582 + 583 + rcu_read_lock(); 584 + __for_each_thread(sig, t) { 585 + min_flt += t->min_flt; 586 + maj_flt += t->maj_flt; 587 + gtime += task_gtime(t); 588 + } 589 + rcu_read_unlock(); 590 + } 591 + } while (need_seqretry(&sig->stats_lock, seq)); 592 + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); 593 + 594 + if (whole) { 595 + thread_group_cputime_adjusted(task, &utime, &stime); 596 + } else { 597 + task_cputime_adjusted(task, &utime, &stime); 543 598 min_flt = task->min_flt; 544 599 maj_flt = task->maj_flt; 545 - task_cputime_adjusted(task, &utime, &stime); 546 600 gtime = task_gtime(task); 547 601 } 548 602
+9 -22
fs/remap_range.c
··· 373 373 } 374 374 EXPORT_SYMBOL(generic_remap_file_range_prep); 375 375 376 - loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, 377 - struct file *file_out, loff_t pos_out, 378 - loff_t len, unsigned int remap_flags) 376 + loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, 377 + struct file *file_out, loff_t pos_out, 378 + loff_t len, unsigned int remap_flags) 379 379 { 380 380 loff_t ret; 381 381 ··· 391 391 if (!file_in->f_op->remap_file_range) 392 392 return -EOPNOTSUPP; 393 393 394 - ret = file_in->f_op->remap_file_range(file_in, pos_in, 395 - file_out, pos_out, len, remap_flags); 396 - if (ret < 0) 397 - return ret; 398 - 399 - fsnotify_access(file_in); 400 - fsnotify_modify(file_out); 401 - return ret; 402 - } 403 - EXPORT_SYMBOL(do_clone_file_range); 404 - 405 - loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, 406 - struct file *file_out, loff_t pos_out, 407 - loff_t len, unsigned int remap_flags) 408 - { 409 - loff_t ret; 410 - 411 394 ret = remap_verify_area(file_in, pos_in, len, false); 412 395 if (ret) 413 396 return ret; ··· 400 417 return ret; 401 418 402 419 file_start_write(file_out); 403 - ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len, 404 - remap_flags); 420 + ret = file_in->f_op->remap_file_range(file_in, pos_in, 421 + file_out, pos_out, len, remap_flags); 405 422 file_end_write(file_out); 423 + if (ret < 0) 424 + return ret; 406 425 426 + fsnotify_access(file_in); 427 + fsnotify_modify(file_out); 407 428 return ret; 408 429 } 409 430 EXPORT_SYMBOL(vfs_clone_file_range);
+11
fs/smb/client/connect.c
··· 233 233 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { 234 234 /* check if iface is still active */ 235 235 spin_lock(&ses->chan_lock); 236 + if (cifs_ses_get_chan_index(ses, server) == 237 + CIFS_INVAL_CHAN_INDEX) { 238 + spin_unlock(&ses->chan_lock); 239 + continue; 240 + } 241 + 236 242 if (!cifs_chan_is_iface_active(ses, server)) { 237 243 spin_unlock(&ses->chan_lock); 238 244 cifs_chan_update_iface(ses, server); ··· 4234 4228 4235 4229 /* only send once per connect */ 4236 4230 spin_lock(&tcon->tc_lock); 4231 + 4232 + /* if tcon is marked for needing reconnect, update state */ 4233 + if (tcon->need_reconnect) 4234 + tcon->status = TID_NEED_TCON; 4235 + 4237 4236 if (tcon->status == TID_GOOD) { 4238 4237 spin_unlock(&tcon->tc_lock); 4239 4238 return 0;
+6 -1
fs/smb/client/dfs.c
··· 565 565 566 566 /* only send once per connect */ 567 567 spin_lock(&tcon->tc_lock); 568 + 569 + /* if tcon is marked for needing reconnect, update state */ 570 + if (tcon->need_reconnect) 571 + tcon->status = TID_NEED_TCON; 572 + 568 573 if (tcon->status == TID_GOOD) { 569 574 spin_unlock(&tcon->tc_lock); 570 575 return 0; ··· 630 625 spin_lock(&tcon->tc_lock); 631 626 if (tcon->status == TID_IN_TCON) 632 627 tcon->status = TID_GOOD; 633 - spin_unlock(&tcon->tc_lock); 634 628 tcon->need_reconnect = false; 629 + spin_unlock(&tcon->tc_lock); 635 630 } 636 631 637 632 return rc;
+3
fs/smb/client/file.c
··· 175 175 176 176 /* only send once per connect */ 177 177 spin_lock(&tcon->tc_lock); 178 + if (tcon->need_reconnect) 179 + tcon->status = TID_NEED_RECON; 180 + 178 181 if (tcon->status != TID_NEED_RECON) { 179 182 spin_unlock(&tcon->tc_lock); 180 183 return;
+1 -1
fs/smb/client/fs_context.c
··· 211 211 212 212 switch (match_token(value, cifs_secflavor_tokens, args)) { 213 213 case Opt_sec_krb5p: 214 - cifs_errorf(fc, "sec=krb5p is not supported!\n"); 214 + cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n"); 215 215 return 1; 216 216 case Opt_sec_krb5i: 217 217 ctx->sign = true;
+8 -7
fs/smb/client/readdir.c
··· 307 307 } 308 308 309 309 static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr, 310 - SEARCH_ID_FULL_DIR_INFO *info, 310 + const void *info, 311 311 struct cifs_sb_info *cifs_sb) 312 312 { 313 + const FILE_FULL_DIRECTORY_INFO *di = info; 314 + 313 315 __dir_info_to_fattr(fattr, info); 314 316 315 - /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */ 317 + /* See MS-FSCC 2.4.14, 2.4.19 */ 316 318 if (fattr->cf_cifsattrs & ATTR_REPARSE) 317 - fattr->cf_cifstag = le32_to_cpu(info->EaSize); 319 + fattr->cf_cifstag = le32_to_cpu(di->EaSize); 318 320 cifs_fill_common_info(fattr, cifs_sb); 319 321 } 320 322 ··· 398 396 } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 399 397 cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; 400 398 } else /* not srvinos - BB fixme add check for backlevel? */ { 401 - cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO; 399 + cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; 402 400 } 403 401 404 402 search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; ··· 989 987 (FIND_FILE_STANDARD_INFO *)find_entry, 990 988 cifs_sb); 991 989 break; 990 + case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 992 991 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 993 - cifs_fulldir_info_to_fattr(&fattr, 994 - (SEARCH_ID_FULL_DIR_INFO *)find_entry, 995 - cifs_sb); 992 + cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb); 996 993 break; 997 994 default: 998 995 cifs_dir_info_to_fattr(&fattr,
+1 -2
fs/smb/client/sess.c
··· 76 76 unsigned int i; 77 77 78 78 /* if the channel is waiting for termination */ 79 - if (server->terminate) 79 + if (server && server->terminate) 80 80 return CIFS_INVAL_CHAN_INDEX; 81 81 82 82 for (i = 0; i < ses->chan_count; i++) { ··· 88 88 if (server) 89 89 cifs_dbg(VFS, "unable to get chan index for server: 0x%llx", 90 90 server->conn_id); 91 - WARN_ON(1); 92 91 return CIFS_INVAL_CHAN_INDEX; 93 92 } 94 93
+6
fs/smb/client/smb2pdu.c
··· 5206 5206 case SMB_FIND_FILE_POSIX_INFO: 5207 5207 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; 5208 5208 break; 5209 + case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5210 + req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION; 5211 + break; 5209 5212 default: 5210 5213 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5211 5214 info_level); ··· 5277 5274 case SMB_FIND_FILE_POSIX_INFO: 5278 5275 /* note that posix payload are variable size */ 5279 5276 info_buf_size = sizeof(struct smb2_posix_info); 5277 + break; 5278 + case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5279 + info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO); 5280 5280 break; 5281 5281 default: 5282 5282 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+1
fs/smb/server/misc.c
··· 261 261 262 262 /** 263 263 * ksmbd_extract_sharename() - get share name from tree connect request 264 + * @um: pointer to a unicode_map structure for character encoding handling 264 265 * @treename: buffer containing tree name and share name 265 266 * 266 267 * Return: share name on success, otherwise error
+6 -2
fs/smb/server/smb2pdu.c
··· 6173 6173 err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, 6174 6174 offsetof(struct smb2_read_rsp, Buffer), 6175 6175 aux_payload_buf, nbytes); 6176 - if (err) 6176 + if (err) { 6177 + kvfree(aux_payload_buf); 6177 6178 goto out; 6179 + } 6178 6180 kvfree(rpc_resp); 6179 6181 } else { 6180 6182 err = ksmbd_iov_pin_rsp(work, (void *)rsp, ··· 6386 6384 err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, 6387 6385 offsetof(struct smb2_read_rsp, Buffer), 6388 6386 aux_payload_buf, nbytes); 6389 - if (err) 6387 + if (err) { 6388 + kvfree(aux_payload_buf); 6390 6389 goto out; 6390 + } 6391 6391 ksmbd_fd_put(work, fp); 6392 6392 return 0; 6393 6393
+5 -2
include/linux/backing-dev-defs.h
··· 141 141 struct delayed_work dwork; /* work item used for writeback */ 142 142 struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ 143 143 144 - unsigned long dirty_sleep; /* last wait */ 145 - 146 144 struct list_head bdi_node; /* anchored at bdi->wb_list */ 147 145 148 146 #ifdef CONFIG_CGROUP_WRITEBACK ··· 177 179 * any dirty wbs, which is depended upon by bdi_has_dirty(). 178 180 */ 179 181 atomic_long_t tot_write_bandwidth; 182 + /* 183 + * Jiffies when last process was dirty throttled on this bdi. Used by 184 + * blk-wbt. 185 + */ 186 + unsigned long last_bdp_sleep; 180 187 181 188 struct bdi_writeback wb; /* the root writeback info for this bdi */ 182 189 struct list_head wb_list; /* list of all wbs */
+1 -1
include/linux/ceph/messenger.h
··· 283 283 struct kref kref; 284 284 bool more_to_follow; 285 285 bool needs_out_seq; 286 - bool sparse_read; 286 + u64 sparse_read_total; 287 287 int front_alloc_len; 288 288 289 289 struct ceph_msgpool *pool;
+2 -1
include/linux/ceph/osd_client.h
··· 45 45 CEPH_SPARSE_READ_HDR = 0, 46 46 CEPH_SPARSE_READ_EXTENTS, 47 47 CEPH_SPARSE_READ_DATA_LEN, 48 + CEPH_SPARSE_READ_DATA_PRE, 48 49 CEPH_SPARSE_READ_DATA, 49 50 }; 50 51 ··· 65 64 u64 sr_req_len; /* orig request length */ 66 65 u64 sr_pos; /* current pos in buffer */ 67 66 int sr_index; /* current extent index */ 68 - __le32 sr_datalen; /* length of actual data */ 67 + u32 sr_datalen; /* length of actual data */ 69 68 u32 sr_count; /* extent count in reply */ 70 69 int sr_ext_len; /* length of extent array */ 71 70 struct ceph_sparse_extent *sr_extent; /* extent array */
+20
include/linux/compiler-gcc.h
··· 64 64 __builtin_unreachable(); \ 65 65 } while (0) 66 66 67 + /* 68 + * GCC 'asm goto' with outputs miscompiles certain code sequences: 69 + * 70 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 71 + * 72 + * Work around it via the same compiler barrier quirk that we used 73 + * to use for the old 'asm goto' workaround. 74 + * 75 + * Also, always mark such 'asm goto' statements as volatile: all 76 + * asm goto statements are supposed to be volatile as per the 77 + * documentation, but some versions of gcc didn't actually do 78 + * that for asms with outputs: 79 + * 80 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619 81 + */ 82 + #ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND 83 + #define asm_goto_output(x...) \ 84 + do { asm volatile goto(x); asm (""); } while (0) 85 + #endif 86 + 67 87 #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) 68 88 #define __HAVE_BUILTIN_BSWAP32__ 69 89 #define __HAVE_BUILTIN_BSWAP64__
+9 -2
include/linux/compiler_types.h
··· 362 362 #define __member_size(p) __builtin_object_size(p, 1) 363 363 #endif 364 364 365 - #ifndef asm_volatile_goto 366 - #define asm_volatile_goto(x...) asm goto(x) 365 + /* 366 + * Some versions of gcc do not mark 'asm goto' volatile: 367 + * 368 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979 369 + * 370 + * We do it here by hand, because it doesn't hurt. 371 + */ 372 + #ifndef asm_goto_output 373 + #define asm_goto_output(x...) asm volatile goto(x) 367 374 #endif 368 375 369 376 #ifdef CONFIG_CC_HAS_ASM_INLINE
+23
include/linux/cper.h
··· 90 90 GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ 91 91 0x72, 0x2D, 0xEB, 0x41) 92 92 93 + /* CXL Event record UUIDs are formatted as GUIDs and reported in section type */ 94 + /* 95 + * General Media Event Record 96 + * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 97 + */ 98 + #define CPER_SEC_CXL_GEN_MEDIA_GUID \ 99 + GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ 100 + 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) 101 + /* 102 + * DRAM Event Record 103 + * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 104 + */ 105 + #define CPER_SEC_CXL_DRAM_GUID \ 106 + GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ 107 + 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) 108 + /* 109 + * Memory Module Event Record 110 + * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 111 + */ 112 + #define CPER_SEC_CXL_MEM_MODULE_GUID \ 113 + GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ 114 + 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) 115 + 93 116 /* 94 117 * Flags bits definitions for flags in struct cper_record_header 95 118 * If set, the error has been recovered
-3
include/linux/fs.h
··· 2101 2101 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, 2102 2102 struct file *file_out, loff_t pos_out, 2103 2103 loff_t *count, unsigned int remap_flags); 2104 - extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, 2105 - struct file *file_out, loff_t pos_out, 2106 - loff_t len, unsigned int remap_flags); 2107 2104 extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, 2108 2105 struct file *file_out, loff_t pos_out, 2109 2106 loff_t len, unsigned int remap_flags);
+3 -1
include/linux/hrtimer.h
··· 157 157 * @max_hang_time: Maximum time spent in hrtimer_interrupt 158 158 * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are 159 159 * expired 160 + * @online: CPU is online from an hrtimers point of view 160 161 * @timer_waiters: A hrtimer_cancel() invocation waits for the timer 161 162 * callback to finish. 162 163 * @expires_next: absolute time of the next event, is required for remote ··· 180 179 unsigned int hres_active : 1, 181 180 in_hrtirq : 1, 182 181 hang_detected : 1, 183 - softirq_activated : 1; 182 + softirq_activated : 1, 183 + online : 1; 184 184 #ifdef CONFIG_HIGH_RES_TIMERS 185 185 unsigned int nr_events; 186 186 unsigned short nr_retries;
+5 -5
include/linux/netdevice.h
··· 2150 2150 2151 2151 /* TXRX read-mostly hotpath */ 2152 2152 __cacheline_group_begin(net_device_read_txrx); 2153 + union { 2154 + struct pcpu_lstats __percpu *lstats; 2155 + struct pcpu_sw_netstats __percpu *tstats; 2156 + struct pcpu_dstats __percpu *dstats; 2157 + }; 2153 2158 unsigned int flags; 2154 2159 unsigned short hard_header_len; 2155 2160 netdev_features_t features; ··· 2403 2398 enum netdev_ml_priv_type ml_priv_type; 2404 2399 2405 2400 enum netdev_stat_type pcpu_stat_type:8; 2406 - union { 2407 - struct pcpu_lstats __percpu *lstats; 2408 - struct pcpu_sw_netstats __percpu *tstats; 2409 - struct pcpu_dstats __percpu *dstats; 2410 - }; 2411 2401 2412 2402 #if IS_ENABLED(CONFIG_GARP) 2413 2403 struct garp_port __rcu *garp_port;
+4
include/linux/ptrace.h
··· 393 393 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) 394 394 #endif 395 395 396 + #ifndef exception_ip 397 + #define exception_ip(x) instruction_pointer(x) 398 + #endif 399 + 396 400 extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); 397 401 398 402 extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+3 -3
include/linux/tcp.h
··· 221 221 u32 lost_out; /* Lost packets */ 222 222 u32 sacked_out; /* SACK'd packets */ 223 223 u16 tcp_header_len; /* Bytes of tcp header to send */ 224 + u8 scaling_ratio; /* see tcp_win_from_space() */ 224 225 u8 chrono_type : 2, /* current chronograph type */ 225 226 repair : 1, 227 + tcp_usec_ts : 1, /* TSval values in usec */ 226 228 is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ 227 229 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ 228 230 __cacheline_group_end(tcp_sock_read_txrx); ··· 354 352 u32 compressed_ack_rcv_nxt; 355 353 struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ 356 354 357 - u8 scaling_ratio; /* see tcp_win_from_space() */ 358 355 /* Information of the most recently (s)acked skb */ 359 356 struct tcp_rack { 360 357 u64 mstamp; /* (Re)sent time of the skb */ ··· 369 368 u8 compressed_ack; 370 369 u8 dup_ack_counter:2, 371 370 tlp_retrans:1, /* TLP is a retransmission */ 372 - tcp_usec_ts:1, /* TSval values in usec */ 373 - unused:4; 371 + unused:5; 374 372 u8 thin_lto : 1,/* Use linear timeouts for thin streams */ 375 373 recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ 376 374 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
-5
include/net/tls.h
··· 97 97 struct tls_rec *open_rec; 98 98 struct list_head tx_list; 99 99 atomic_t encrypt_pending; 100 - /* protect crypto_wait with encrypt_pending */ 101 - spinlock_t encrypt_compl_lock; 102 - int async_notify; 103 100 u8 async_capable:1; 104 101 105 102 #define BIT_TX_SCHEDULED 0 ··· 133 136 struct tls_strparser strp; 134 137 135 138 atomic_t decrypt_pending; 136 - /* protect crypto_wait with decrypt_pending*/ 137 - spinlock_t decrypt_compl_lock; 138 139 struct sk_buff_head async_hold; 139 140 struct wait_queue_head wq; 140 141 };
+1
include/uapi/drm/ivpu_accel.h
··· 305 305 306 306 /* drm_ivpu_bo_wait job status codes */ 307 307 #define DRM_IVPU_JOB_STATUS_SUCCESS 0 308 + #define DRM_IVPU_JOB_STATUS_ABORTED 256 308 309 309 310 /** 310 311 * struct drm_ivpu_bo_wait - Wait for BO to become inactive
+4 -1
include/uapi/xen/gntalloc.h
··· 31 31 __u64 index; 32 32 /* The grant references of the newly created grant, one per page */ 33 33 /* Variable size, depending on count */ 34 - __u32 gref_ids[1]; 34 + union { 35 + __u32 gref_ids[1]; 36 + __DECLARE_FLEX_ARRAY(__u32, gref_ids_flex); 37 + }; 35 38 }; 36 39 37 40 #define GNTALLOC_FLAG_WRITABLE 1
+9
init/Kconfig
··· 89 89 # Detect buggy gcc and clang, fixed in gcc-11 clang-14. 90 90 def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) 91 91 92 + config GCC_ASM_GOTO_OUTPUT_WORKAROUND 93 + bool 94 + depends on CC_IS_GCC && CC_HAS_ASM_GOTO_OUTPUT 95 + # Fixed in GCC 14, 13.3, 12.4 and 11.5 96 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 97 + default y if GCC_VERSION < 110500 98 + default y if GCC_VERSION >= 120000 && GCC_VERSION < 120400 99 + default y if GCC_VERSION >= 130000 && GCC_VERSION < 130300 100 + 92 101 config TOOLS_SUPPORT_RELR 93 102 def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) 94 103
+3 -7
kernel/exit.c
··· 1127 1127 * and nobody can change them. 1128 1128 * 1129 1129 * psig->stats_lock also protects us from our sub-threads 1130 - * which can reap other children at the same time. Until 1131 - * we change k_getrusage()-like users to rely on this lock 1132 - * we have to take ->siglock as well. 1130 + * which can reap other children at the same time. 1133 1131 * 1134 1132 * We use thread_group_cputime_adjusted() to get times for 1135 1133 * the thread group, which consolidates times for all threads 1136 1134 * in the group including the group leader. 1137 1135 */ 1138 1136 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1139 - spin_lock_irq(&current->sighand->siglock); 1140 - write_seqlock(&psig->stats_lock); 1137 + write_seqlock_irq(&psig->stats_lock); 1141 1138 psig->cutime += tgutime + sig->cutime; 1142 1139 psig->cstime += tgstime + sig->cstime; 1143 1140 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; ··· 1157 1160 psig->cmaxrss = maxrss; 1158 1161 task_io_accounting_add(&psig->ioac, &p->ioac); 1159 1162 task_io_accounting_add(&psig->ioac, &sig->ioac); 1160 - write_sequnlock(&psig->stats_lock); 1161 - spin_unlock_irq(&current->sighand->siglock); 1163 + write_sequnlock_irq(&psig->stats_lock); 1162 1164 } 1163 1165 1164 1166 if (wo->wo_rusage)
+2 -2
kernel/kprobes.c
··· 1993 1993 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, 1994 1994 struct llist_node **cur) 1995 1995 { 1996 - struct kretprobe_instance *ri = NULL; 1996 + struct kretprobe_instance *ri; 1997 1997 kprobe_opcode_t *ret; 1998 1998 1999 1999 if (WARN_ON_ONCE(!cur)) ··· 2802 2802 { 2803 2803 struct hlist_head *head; 2804 2804 struct kprobe *p, *kp; 2805 - const char *sym = NULL; 2805 + const char *sym; 2806 2806 unsigned int i = *(loff_t *) v; 2807 2807 unsigned long offset = 0; 2808 2808 char *modname, namebuf[KSYM_NAME_LEN];
+34 -20
kernel/sys.c
··· 1785 1785 struct task_struct *t; 1786 1786 unsigned long flags; 1787 1787 u64 tgutime, tgstime, utime, stime; 1788 - unsigned long maxrss = 0; 1788 + unsigned long maxrss; 1789 + struct mm_struct *mm; 1789 1790 struct signal_struct *sig = p->signal; 1791 + unsigned int seq = 0; 1790 1792 1791 - memset((char *)r, 0, sizeof (*r)); 1793 + retry: 1794 + memset(r, 0, sizeof(*r)); 1792 1795 utime = stime = 0; 1796 + maxrss = 0; 1793 1797 1794 1798 if (who == RUSAGE_THREAD) { 1795 1799 task_cputime_adjusted(current, &utime, &stime); 1796 1800 accumulate_thread_rusage(p, r); 1797 1801 maxrss = sig->maxrss; 1798 - goto out; 1802 + goto out_thread; 1799 1803 } 1800 1804 1801 - if (!lock_task_sighand(p, &flags)) 1802 - return; 1805 + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); 1803 1806 1804 1807 switch (who) { 1805 1808 case RUSAGE_BOTH: ··· 1822 1819 fallthrough; 1823 1820 1824 1821 case RUSAGE_SELF: 1825 - thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1826 - utime += tgutime; 1827 - stime += tgstime; 1828 1822 r->ru_nvcsw += sig->nvcsw; 1829 1823 r->ru_nivcsw += sig->nivcsw; 1830 1824 r->ru_minflt += sig->min_flt; ··· 1830 1830 r->ru_oublock += sig->oublock; 1831 1831 if (maxrss < sig->maxrss) 1832 1832 maxrss = sig->maxrss; 1833 + 1834 + rcu_read_lock(); 1833 1835 __for_each_thread(sig, t) 1834 1836 accumulate_thread_rusage(t, r); 1837 + rcu_read_unlock(); 1838 + 1835 1839 break; 1836 1840 1837 1841 default: 1838 1842 BUG(); 1839 1843 } 1840 - unlock_task_sighand(p, &flags); 1841 1844 1842 - out: 1845 + if (need_seqretry(&sig->stats_lock, seq)) { 1846 + seq = 1; 1847 + goto retry; 1848 + } 1849 + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); 1850 + 1851 + if (who == RUSAGE_CHILDREN) 1852 + goto out_children; 1853 + 1854 + thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1855 + utime += tgutime; 1856 + stime += tgstime; 1857 + 1858 + out_thread: 1859 + mm = get_task_mm(p); 1860 + if (mm) { 1861 + setmax_mm_hiwater_rss(&maxrss, mm); 1862 + mmput(mm); 1863 + } 1864 + 1865 + out_children: 1866 + r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1843 1867 r->ru_utime = ns_to_kernel_old_timeval(utime); 1844 1868 r->ru_stime = ns_to_kernel_old_timeval(stime); 1845 - 1846 - if (who != RUSAGE_CHILDREN) { 1847 - struct mm_struct *mm = get_task_mm(p); 1848 - 1849 - if (mm) { 1850 - setmax_mm_hiwater_rss(&maxrss, mm); 1851 - mmput(mm); 1852 - } 1853 - } 1854 - r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1855 1869 } 1856 1870 1857 1871 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
+3
kernel/time/hrtimer.c
··· 1085 1085 enum hrtimer_mode mode) 1086 1086 { 1087 1087 debug_activate(timer, mode); 1088 + WARN_ON_ONCE(!base->cpu_base->online); 1088 1089 1089 1090 base->cpu_base->active_bases |= 1 << base->index; 1090 1091 ··· 2184 2183 cpu_base->softirq_next_timer = NULL; 2185 2184 cpu_base->expires_next = KTIME_MAX; 2186 2185 cpu_base->softirq_expires_next = KTIME_MAX; 2186 + cpu_base->online = 1; 2187 2187 hrtimer_cpu_base_init_expiry_lock(cpu_base); 2188 2188 return 0; 2189 2189 } ··· 2252 2250 smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); 2253 2251 2254 2252 raw_spin_unlock(&new_base->lock); 2253 + old_base->online = 0; 2255 2254 raw_spin_unlock(&old_base->lock); 2256 2255 2257 2256 return 0;
+10
kernel/trace/ftrace.c
··· 5325 5325 5326 5326 static int register_ftrace_function_nolock(struct ftrace_ops *ops); 5327 5327 5328 + /* 5329 + * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct 5330 + * call will be jumped from ftrace_regs_caller. Only if the architecture does 5331 + * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it 5332 + * jumps from ftrace_caller for multiple ftrace_ops. 5333 + */ 5334 + #ifndef HAVE_DYNAMIC_FTRACE_WITH_REGS 5328 5335 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) 5336 + #else 5337 + #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) 5338 + #endif 5329 5339 5330 5340 static int check_direct_multi(struct ftrace_ops *ops) 5331 5341 {
+37 -38
kernel/trace/trace.c
··· 2320 2320 unsigned *map_cmdline_to_pid; 2321 2321 unsigned cmdline_num; 2322 2322 int cmdline_idx; 2323 - char *saved_cmdlines; 2323 + char saved_cmdlines[]; 2324 2324 }; 2325 2325 static struct saved_cmdlines_buffer *savedcmd; 2326 2326 ··· 2334 2334 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); 2335 2335 } 2336 2336 2337 - static int allocate_cmdlines_buffer(unsigned int val, 2338 - struct saved_cmdlines_buffer *s) 2337 + static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) 2339 2338 { 2339 + int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); 2340 + 2341 + kfree(s->map_cmdline_to_pid); 2342 + free_pages((unsigned long)s, order); 2343 + } 2344 + 2345 + static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val) 2346 + { 2347 + struct saved_cmdlines_buffer *s; 2348 + struct page *page; 2349 + int orig_size, size; 2350 + int order; 2351 + 2352 + /* Figure out how much is needed to hold the given number of cmdlines */ 2353 + orig_size = sizeof(*s) + val * TASK_COMM_LEN; 2354 + order = get_order(orig_size); 2355 + size = 1 << (order + PAGE_SHIFT); 2356 + page = alloc_pages(GFP_KERNEL, order); 2357 + if (!page) 2358 + return NULL; 2359 + 2360 + s = page_address(page); 2361 + memset(s, 0, sizeof(*s)); 2362 + 2363 + /* Round up to actual allocation */ 2364 + val = (size - sizeof(*s)) / TASK_COMM_LEN; 2365 + s->cmdline_num = val; 2366 + 2340 2367 s->map_cmdline_to_pid = kmalloc_array(val, 2341 2368 sizeof(*s->map_cmdline_to_pid), 2342 2369 GFP_KERNEL); 2343 - if (!s->map_cmdline_to_pid) 2344 - return -ENOMEM; 2345 - 2346 - s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); 2347 - if (!s->saved_cmdlines) { 2348 - kfree(s->map_cmdline_to_pid); 2349 - return -ENOMEM; 2370 + if (!s->map_cmdline_to_pid) { 2371 + free_saved_cmdlines_buffer(s); 2372 + return NULL; 2350 2373 } 2351 2374 2352 2375 s->cmdline_idx = 0; 2353 - s->cmdline_num = val; 2354 2376 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, 2355 2377 sizeof(s->map_pid_to_cmdline)); 2356 2378 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, 2357 2379 val * sizeof(*s->map_cmdline_to_pid)); 2358 2380 2359 - return 0; 2381 + return s; 2360 2382 } 2361 2383 2362 2384 static int trace_create_savedcmd(void) 2363 2385 { 2364 - int ret; 2386 + savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT); 2365 2387 2366 - savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); 2367 - if (!savedcmd) 2368 - return -ENOMEM; 2369 - 2370 - ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); 2371 - if (ret < 0) { 2372 - kfree(savedcmd); 2373 - savedcmd = NULL; 2374 - return -ENOMEM; 2375 - } 2376 - 2377 - return 0; 2388 + return savedcmd ? 0 : -ENOMEM; 2378 2389 } 2379 2390 2380 2391 int is_tracing_stopped(void) ··· 6067 6056 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6068 6057 } 6069 6058 6070 - static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) 6071 - { 6072 - kfree(s->saved_cmdlines); 6073 - kfree(s->map_cmdline_to_pid); 6074 - kfree(s); 6075 - } 6076 - 6077 6059 static int tracing_resize_saved_cmdlines(unsigned int val) 6078 6060 { 6079 6061 struct saved_cmdlines_buffer *s, *savedcmd_temp; 6080 6062 6081 - s = kmalloc(sizeof(*s), GFP_KERNEL); 6063 + s = allocate_cmdlines_buffer(val); 6082 6064 if (!s) 6083 6065 return -ENOMEM; 6084 - 6085 - if (allocate_cmdlines_buffer(val, s) < 0) { 6086 - kfree(s); 6087 - return -ENOMEM; 6088 - } 6089 6066 6090 6067 preempt_disable(); 6091 6068 arch_spin_lock(&trace_cmdline_lock);
+18 -14
kernel/trace/trace_probe.c
··· 1159 1159 if (!(ctx->flags & TPARG_FL_TEVENT) && 1160 1160 (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 || 1161 1161 strncmp(arg, "\\\"", 2) == 0)) { 1162 - /* The type of $comm must be "string", and not an array. */ 1163 - if (parg->count || (t && strcmp(t, "string"))) 1162 + /* The type of $comm must be "string", and not an array type. */ 1163 + if (parg->count || (t && strcmp(t, "string"))) { 1164 + trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), 1165 + NEED_STRING_TYPE); 1164 1166 goto out; 1167 + } 1165 1168 parg->type = find_fetch_type("string", ctx->flags); 1166 1169 } else 1167 1170 parg->type = find_fetch_type(t, ctx->flags); 1168 1171 if (!parg->type) { 1169 1172 trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), BAD_TYPE); 1170 1173 goto out; 1171 - } 1172 - parg->offset = *size; 1173 - *size += parg->type->size * (parg->count ?: 1); 1174 - 1175 - ret = -ENOMEM; 1176 - if (parg->count) { 1177 - len = strlen(parg->type->fmttype) + 6; 1178 - parg->fmt = kmalloc(len, GFP_KERNEL); 1179 - if (!parg->fmt) 1180 - goto out; 1181 - snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype, 1182 - parg->count); 1183 1174 } 1184 1175 1185 1176 code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL); ··· 1194 1203 if (ret) 1195 1204 goto fail; 1196 1205 } 1206 + } 1207 + parg->offset = *size; 1208 + *size += parg->type->size * (parg->count ?: 1); 1209 + 1210 + if (parg->count) { 1211 + len = strlen(parg->type->fmttype) + 6; 1212 + parg->fmt = kmalloc(len, GFP_KERNEL); 1213 + if (!parg->fmt) { 1214 + ret = -ENOMEM; 1215 + goto out; 1216 + } 1217 + snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype, 1218 + parg->count); 1197 1219 } 1198 1220 1199 1221 ret = -EINVAL;
+2 -1
kernel/trace/trace_probe.h
··· 515 515 C(BAD_HYPHEN, "Failed to parse single hyphen. Forgot '>'?"), \ 516 516 C(NO_BTF_FIELD, "This field is not found."), \ 517 517 C(BAD_BTF_TID, "Failed to get BTF type info."),\ 518 - C(BAD_TYPE4STR, "This type does not fit for string."), 518 + C(BAD_TYPE4STR, "This type does not fit for string."),\ 519 + C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"), 519 520 520 521 #undef C 521 522 #define C(a, b) TP_ERR_##a
+2
lib/kunit/device-impl.h
··· 13 13 14 14 // For internal use only -- registers the kunit_bus. 15 15 int kunit_bus_init(void); 16 + // For internal use only -- unregisters the kunit_bus. 17 + void kunit_bus_shutdown(void); 16 18 17 19 #endif //_KUNIT_DEVICE_IMPL_H
+14
lib/kunit/device.c
··· 54 54 return error; 55 55 } 56 56 57 + /* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */ 58 + void kunit_bus_shutdown(void) 59 + { 60 + /* Make sure the bus exists before we unregister it. */ 61 + if (IS_ERR_OR_NULL(kunit_bus_device)) 62 + return; 63 + 64 + bus_unregister(&kunit_bus_type); 65 + 66 + root_device_unregister(kunit_bus_device); 67 + 68 + kunit_bus_device = NULL; 69 + } 70 + 57 71 /* Release a 'fake' KUnit device. */ 58 72 static void kunit_device_release(struct device *d) 59 73 {
+3
lib/kunit/test.c
··· 928 928 #ifdef CONFIG_MODULES 929 929 unregister_module_notifier(&kunit_mod_nb); 930 930 #endif 931 + 932 + kunit_bus_shutdown(); 933 + 931 934 kunit_debugfs_cleanup(); 932 935 } 933 936 module_exit(kunit_exit);
+1 -1
mm/backing-dev.c
··· 436 436 INIT_LIST_HEAD(&wb->work_list); 437 437 INIT_DELAYED_WORK(&wb->dwork, wb_workfn); 438 438 INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); 439 - wb->dirty_sleep = jiffies; 440 439 441 440 err = fprop_local_init_percpu(&wb->completions, gfp); 442 441 if (err) ··· 920 921 INIT_LIST_HEAD(&bdi->bdi_list); 921 922 INIT_LIST_HEAD(&bdi->wb_list); 922 923 init_waitqueue_head(&bdi->wb_waitq); 924 + bdi->last_bdp_sleep = jiffies; 923 925 924 926 return cgwb_bdi_init(bdi); 925 927 }
+1 -1
mm/damon/sysfs-schemes.c
··· 2194 2194 sysfs_regions->upd_timeout_jiffies = jiffies + 2195 2195 2 * usecs_to_jiffies(scheme->apply_interval_us ? 2196 2196 scheme->apply_interval_us : 2197 - ctx->attrs.sample_interval); 2197 + ctx->attrs.aggr_interval); 2198 2198 } 2199 2199 } 2200 2200
+1
mm/madvise.c
··· 429 429 if (++batch_count == SWAP_CLUSTER_MAX) { 430 430 batch_count = 0; 431 431 if (need_resched()) { 432 + arch_leave_lazy_mmu_mode(); 432 433 pte_unmap_unlock(start_pte, ptl); 433 434 cond_resched(); 434 435 goto restart;
+35 -21
mm/memcontrol.c
··· 621 621 } 622 622 623 623 struct memcg_vmstats_percpu { 624 + /* Stats updates since the last flush */ 625 + unsigned int stats_updates; 626 + 627 + /* Cached pointers for fast iteration in memcg_rstat_updated() */ 628 + struct memcg_vmstats_percpu *parent; 629 + struct memcg_vmstats *vmstats; 630 + 631 + /* The above should fit a single cacheline for memcg_rstat_updated() */ 632 + 624 633 /* Local (CPU and cgroup) page state & events */ 625 634 long state[MEMCG_NR_STAT]; 626 635 unsigned long events[NR_MEMCG_EVENTS]; ··· 641 632 /* Cgroup1: threshold notifications & softlimit tree updates */ 642 633 unsigned long nr_page_events; 643 634 unsigned long targets[MEM_CGROUP_NTARGETS]; 644 - 645 - /* Stats updates since the last flush */ 646 - unsigned int stats_updates; 647 - }; 635 + } ____cacheline_aligned; 648 636 649 637 struct memcg_vmstats { 650 638 /* Aggregated (CPU and subtree) page state & events */ ··· 704 698 } 705 699 706 700 707 - static bool memcg_should_flush_stats(struct mem_cgroup *memcg) 701 + static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) 708 702 { 709 - return atomic64_read(&memcg->vmstats->stats_updates) > 703 + return atomic64_read(&vmstats->stats_updates) > 710 704 MEMCG_CHARGE_BATCH * num_online_cpus(); 711 705 } 712 706 713 707 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) 714 708 { 709 + struct memcg_vmstats_percpu *statc; 715 710 int cpu = smp_processor_id(); 716 - unsigned int x; 717 711 718 712 if (!val) 719 713 return; 720 714 721 715 cgroup_rstat_updated(memcg->css.cgroup, cpu); 722 - 723 - for (; memcg; memcg = parent_mem_cgroup(memcg)) { 724 - x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates, 725 - abs(val)); 726 - 727 - if (x < MEMCG_CHARGE_BATCH) 716 + statc = this_cpu_ptr(memcg->vmstats_percpu); 717 + for (; statc; statc = statc->parent) { 718 + statc->stats_updates += abs(val); 719 + if (statc->stats_updates < MEMCG_CHARGE_BATCH) 728 720 continue; 729 721 730 722 /* 731 723 * If @memcg is already flush-able, increasing stats_updates is 732 724 * redundant. Avoid the overhead of the atomic update. 733 725 */ 734 - if (!memcg_should_flush_stats(memcg)) 735 - atomic64_add(x, &memcg->vmstats->stats_updates); 736 - __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0); 726 + if (!memcg_vmstats_needs_flush(statc->vmstats)) 727 + atomic64_add(statc->stats_updates, 728 + &statc->vmstats->stats_updates); 729 + statc->stats_updates = 0; 737 730 } 738 731 } 739 732 ··· 761 756 if (!memcg) 762 757 memcg = root_mem_cgroup; 763 758 764 - if (memcg_should_flush_stats(memcg)) 759 + if (memcg_vmstats_needs_flush(memcg->vmstats)) 765 760 do_flush_stats(memcg); 766 761 } 767 762 ··· 775 770 static void flush_memcg_stats_dwork(struct work_struct *w) 776 771 { 777 772 /* 778 - * Deliberately ignore memcg_should_flush_stats() here so that flushing 773 + * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing 779 774 * in latency-sensitive paths is as cheap as possible. 780 775 */ 781 776 do_flush_stats(root_mem_cgroup); ··· 5482 5477 __mem_cgroup_free(memcg); 5483 5478 } 5484 5479 5485 - static struct mem_cgroup *mem_cgroup_alloc(void) 5480 + static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) 5486 5481 { 5482 + struct memcg_vmstats_percpu *statc, *pstatc; 5487 5483 struct mem_cgroup *memcg; 5488 - int node; 5484 + int node, cpu; 5489 5485 int __maybe_unused i; 5490 5486 long error = -ENOMEM; 5491 5487 ··· 5509 5503 GFP_KERNEL_ACCOUNT); 5510 5504 if (!memcg->vmstats_percpu) 5511 5505 goto fail; 5506 + 5507 + for_each_possible_cpu(cpu) { 5508 + if (parent) 5509 + pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); 5510 + statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5511 + statc->parent = parent ? pstatc : NULL; 5512 + statc->vmstats = memcg->vmstats; 5513 + } 5512 5514 5513 5515 for_each_node(node) 5514 5516 if (alloc_mem_cgroup_per_node_info(memcg, node)) ··· 5563 5549 struct mem_cgroup *memcg, *old_memcg; 5564 5550 5565 5551 old_memcg = set_active_memcg(parent); 5566 - memcg = mem_cgroup_alloc(); 5552 + memcg = mem_cgroup_alloc(parent); 5567 5553 set_active_memcg(old_memcg); 5568 5554 if (IS_ERR(memcg)) 5569 5555 return ERR_CAST(memcg);
+3
mm/memory-failure.c
··· 1377 1377 */ 1378 1378 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) 1379 1379 { 1380 + if (PageSlab(page)) 1381 + return false; 1382 + 1380 1383 /* Soft offline could migrate non-LRU movable pages */ 1381 1384 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) 1382 1385 return true;
+2 -2
mm/memory.c
··· 5478 5478 return true; 5479 5479 5480 5480 if (regs && !user_mode(regs)) { 5481 - unsigned long ip = instruction_pointer(regs); 5481 + unsigned long ip = exception_ip(regs); 5482 5482 if (!search_exception_tables(ip)) 5483 5483 return false; 5484 5484 } ··· 5503 5503 { 5504 5504 mmap_read_unlock(mm); 5505 5505 if (regs && !user_mode(regs)) { 5506 - unsigned long ip = instruction_pointer(regs); 5506 + unsigned long ip = exception_ip(regs); 5507 5507 if (!search_exception_tables(ip)) 5508 5508 return false; 5509 5509 }
+1 -1
mm/page-writeback.c
··· 1921 1921 break; 1922 1922 } 1923 1923 __set_current_state(TASK_KILLABLE); 1924 - wb->dirty_sleep = now; 1924 + bdi->last_bdp_sleep = jiffies; 1925 1925 io_schedule_timeout(pause); 1926 1926 1927 1927 current->dirty_paused_when = now + pause;
+7 -7
mm/userfaultfd.c
··· 902 902 903 903 double_pt_lock(dst_ptl, src_ptl); 904 904 905 - if (!pte_same(*src_pte, orig_src_pte) || 906 - !pte_same(*dst_pte, orig_dst_pte)) { 905 + if (!pte_same(ptep_get(src_pte), orig_src_pte) || 906 + !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 907 907 err = -EAGAIN; 908 908 goto out; 909 909 } ··· 946 946 947 947 double_pt_lock(dst_ptl, src_ptl); 948 948 949 - if (!pte_same(*src_pte, orig_src_pte) || 950 - !pte_same(*dst_pte, orig_dst_pte)) { 949 + if (!pte_same(ptep_get(src_pte), orig_src_pte) || 950 + !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 951 951 double_pt_unlock(dst_ptl, src_ptl); 952 952 return -EAGAIN; 953 953 } ··· 1016 1016 } 1017 1017 1018 1018 spin_lock(dst_ptl); 1019 - orig_dst_pte = *dst_pte; 1019 + orig_dst_pte = ptep_get(dst_pte); 1020 1020 spin_unlock(dst_ptl); 1021 1021 if (!pte_none(orig_dst_pte)) { 1022 1022 err = -EEXIST; ··· 1024 1024 } 1025 1025 1026 1026 spin_lock(src_ptl); 1027 - orig_src_pte = *src_pte; 1027 + orig_src_pte = ptep_get(src_pte); 1028 1028 spin_unlock(src_ptl); 1029 1029 if (pte_none(orig_src_pte)) { 1030 1030 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) ··· 1054 1054 * page isn't freed under us 1055 1055 */ 1056 1056 spin_lock(src_ptl); 1057 - if (!pte_same(orig_src_pte, *src_pte)) { 1057 + if (!pte_same(orig_src_pte, ptep_get(src_pte))) { 1058 1058 spin_unlock(src_ptl); 1059 1059 err = -EAGAIN; 1060 1060 goto out;
+5 -7
mm/zswap.c
··· 536 536 */ 537 537 static void zswap_free_entry(struct zswap_entry *entry) 538 538 { 539 - if (entry->objcg) { 540 - obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 541 - obj_cgroup_put(entry->objcg); 542 - } 543 539 if (!entry->length) 544 540 atomic_dec(&zswap_same_filled_pages); 545 541 else { ··· 543 547 zpool_free(zswap_find_zpool(entry), entry->handle); 544 548 atomic_dec(&entry->pool->nr_stored); 545 549 zswap_pool_put(entry->pool); 550 + } 551 + if (entry->objcg) { 552 + obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 553 + obj_cgroup_put(entry->objcg); 546 554 } 547 555 zswap_entry_cache_free(entry); 548 556 atomic_dec(&zswap_stored_pages); ··· 895 895 * into the warmer region. We should terminate shrinking (if we're in the dynamic 896 896 * shrinker context). 897 897 */ 898 - if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 899 - ret = LRU_SKIP; 898 + if (writeback_result == -EEXIST && encountered_page_in_swapcache) 900 899 *encountered_page_in_swapcache = true; 901 - } 902 900 903 901 goto put_unlock; 904 902 }
+1
net/6lowpan/core.c
··· 179 179 module_init(lowpan_module_init); 180 180 module_exit(lowpan_module_exit); 181 181 182 + MODULE_DESCRIPTION("IPv6 over Low-Power Wireless Personal Area Network core module"); 182 183 MODULE_LICENSE("GPL");
+1
net/atm/mpc.c
··· 1532 1532 module_init(atm_mpoa_init); 1533 1533 module_exit(atm_mpoa_cleanup); 1534 1534 1535 + MODULE_DESCRIPTION("Multi-Protocol Over ATM (MPOA) driver"); 1535 1536 MODULE_LICENSE("GPL");
+2 -1
net/can/j1939/j1939-priv.h
··· 86 86 unsigned int tp_max_packet_size; 87 87 88 88 /* lock for j1939_socks list */ 89 - spinlock_t j1939_socks_lock; 89 + rwlock_t j1939_socks_lock; 90 90 struct list_head j1939_socks; 91 91 92 92 struct kref rx_kref; ··· 301 301 302 302 int ifindex; 303 303 struct j1939_addr addr; 304 + spinlock_t filters_lock; 304 305 struct j1939_filter *filters; 305 306 int nfilters; 306 307 pgn_t pgn_rx_filter;
+1 -1
net/can/j1939/main.c
··· 274 274 return ERR_PTR(-ENOMEM); 275 275 276 276 j1939_tp_init(priv); 277 - spin_lock_init(&priv->j1939_socks_lock); 277 + rwlock_init(&priv->j1939_socks_lock); 278 278 INIT_LIST_HEAD(&priv->j1939_socks); 279 279 280 280 mutex_lock(&j1939_netdev_lock);
+30 -16
net/can/j1939/socket.c
··· 80 80 jsk->state |= J1939_SOCK_BOUND; 81 81 j1939_priv_get(priv); 82 82 83 - spin_lock_bh(&priv->j1939_socks_lock); 83 + write_lock_bh(&priv->j1939_socks_lock); 84 84 list_add_tail(&jsk->list, &priv->j1939_socks); 85 - spin_unlock_bh(&priv->j1939_socks_lock); 85 + write_unlock_bh(&priv->j1939_socks_lock); 86 86 } 87 87 88 88 static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) 89 89 { 90 - spin_lock_bh(&priv->j1939_socks_lock); 90 + write_lock_bh(&priv->j1939_socks_lock); 91 91 list_del_init(&jsk->list); 92 - spin_unlock_bh(&priv->j1939_socks_lock); 92 + write_unlock_bh(&priv->j1939_socks_lock); 93 93 94 94 j1939_priv_put(priv); 95 95 jsk->state &= ~J1939_SOCK_BOUND; ··· 262 262 static bool j1939_sk_match_filter(struct j1939_sock *jsk, 263 263 const struct j1939_sk_buff_cb *skcb) 264 264 { 265 - const struct j1939_filter *f = jsk->filters; 266 - int nfilter = jsk->nfilters; 265 + const struct j1939_filter *f; 266 + int nfilter; 267 + 268 + spin_lock_bh(&jsk->filters_lock); 269 + 270 + f = jsk->filters; 271 + nfilter = jsk->nfilters; 267 272 268 273 if (!nfilter) 269 274 /* receive all when no filters are assigned */ 270 - return true; 275 + goto filter_match_found; 271 276 272 277 for (; nfilter; ++f, --nfilter) { 273 278 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) ··· 281 276 continue; 282 277 if ((skcb->addr.src_name & f->name_mask) != f->name) 283 278 continue; 284 - return true; 279 + goto filter_match_found; 285 280 } 281 + 282 + spin_unlock_bh(&jsk->filters_lock); 286 283 return false; 284 + 285 + filter_match_found: 286 + spin_unlock_bh(&jsk->filters_lock); 287 + return true; 287 288 } 288 289 289 290 static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, ··· 340 329 struct j1939_sock *jsk; 341 330 bool match = false; 342 331 343 - spin_lock_bh(&priv->j1939_socks_lock); 332 + read_lock_bh(&priv->j1939_socks_lock); 344 333 list_for_each_entry(jsk, &priv->j1939_socks, list) { 345 334 match = j1939_sk_recv_match_one(jsk, skcb); 346 335 if (match) 347 336 break; 348 337 } 349 - spin_unlock_bh(&priv->j1939_socks_lock); 338 + read_unlock_bh(&priv->j1939_socks_lock); 350 339 351 340 return match; 352 341 } ··· 355 344 { 356 345 struct j1939_sock *jsk; 357 346 358 - spin_lock_bh(&priv->j1939_socks_lock); 347 + read_lock_bh(&priv->j1939_socks_lock); 359 348 list_for_each_entry(jsk, &priv->j1939_socks, list) { 360 349 j1939_sk_recv_one(jsk, skb); 361 350 } 362 - spin_unlock_bh(&priv->j1939_socks_lock); 351 + read_unlock_bh(&priv->j1939_socks_lock); 363 352 } 364 353 365 354 static void j1939_sk_sock_destruct(struct sock *sk) ··· 412 401 atomic_set(&jsk->skb_pending, 0); 413 402 spin_lock_init(&jsk->sk_session_queue_lock); 414 403 INIT_LIST_HEAD(&jsk->sk_session_queue); 404 + spin_lock_init(&jsk->filters_lock); 415 405 416 406 /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */ 417 407 sock_set_flag(sk, SOCK_RCU_FREE); ··· 715 703 } 716 704 717 705 lock_sock(&jsk->sk); 706 + spin_lock_bh(&jsk->filters_lock); 718 707 ofilters = jsk->filters; 719 708 jsk->filters = filters; 720 709 jsk->nfilters = count; 710 + spin_unlock_bh(&jsk->filters_lock); 721 711 release_sock(&jsk->sk); 722 712 kfree(ofilters); 723 713 return 0; ··· 1094 1080 } 1095 1081 1096 1082 /* spread RX notifications to all sockets subscribed to this session */ 1097 - spin_lock_bh(&priv->j1939_socks_lock); 1083 + read_lock_bh(&priv->j1939_socks_lock); 1098 1084 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1099 1085 if (j1939_sk_recv_match_one(jsk, &session->skcb)) 1100 1086 __j1939_sk_errqueue(session, &jsk->sk, type); 1101 1087 } 1102 - spin_unlock_bh(&priv->j1939_socks_lock); 1088 + read_unlock_bh(&priv->j1939_socks_lock); 1103 1089 }; 1104 1090 1105 1091 void j1939_sk_send_loop_abort(struct sock *sk, int err) ··· 1287 1273 struct j1939_sock *jsk; 1288 1274 int error_code = ENETDOWN; 1289 1275 1290 - spin_lock_bh(&priv->j1939_socks_lock); 1276 + read_lock_bh(&priv->j1939_socks_lock); 1291 1277 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1292 1278 jsk->sk.sk_err = error_code; 1293 1279 if (!sock_flag(&jsk->sk, SOCK_DEAD)) ··· 1295 1281 1296 1282 j1939_sk_queue_drop_all(priv, jsk, error_code); 1297 1283 } 1298 - spin_unlock_bh(&priv->j1939_socks_lock); 1284 + read_unlock_bh(&priv->j1939_socks_lock); 1299 1285 } 1300 1286 1301 1287 static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+17 -16
net/ceph/messenger_v1.c
··· 160 160 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 161 161 { 162 162 /* Initialize data cursor if it's not a sparse read */ 163 - if (!msg->sparse_read) 164 - ceph_msg_data_cursor_init(&msg->cursor, msg, data_len); 163 + u64 len = msg->sparse_read_total ? : data_len; 164 + 165 + ceph_msg_data_cursor_init(&msg->cursor, msg, len); 165 166 } 166 167 167 168 /* ··· 992 991 return read_partial_message_chunk(con, section, sec_len, crc); 993 992 } 994 993 995 - static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc) 994 + static int read_partial_sparse_msg_extent(struct ceph_connection *con, u32 *crc) 996 995 { 997 996 struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor; 998 997 bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE); ··· 1027 1026 return 1; 1028 1027 } 1029 1028 1030 - static int read_sparse_msg_data(struct ceph_connection *con) 1029 + static int read_partial_sparse_msg_data(struct ceph_connection *con) 1031 1030 { 1032 1031 struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor; 1033 1032 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); ··· 1037 1036 if (do_datacrc) 1038 1037 crc = con->in_data_crc; 1039 1038 1040 - do { 1039 + while (cursor->total_resid) { 1041 1040 if (con->v1.in_sr_kvec.iov_base) 1042 1041 ret = read_partial_message_chunk(con, 1043 1042 &con->v1.in_sr_kvec, 1044 1043 con->v1.in_sr_len, 1045 1044 &crc); 1046 1045 else if (cursor->sr_resid > 0) 1047 - ret = read_sparse_msg_extent(con, &crc); 1048 - 1049 - if (ret <= 0) { 1050 - if (do_datacrc) 1051 - con->in_data_crc = crc; 1052 - return ret; 1053 - } 1046 + ret = read_partial_sparse_msg_extent(con, &crc); 1047 + if (ret <= 0) 1048 + break; 1054 1049 1055 1050 memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec)); 1056 1051 ret = con->ops->sparse_read(con, cursor, 1057 1052 (char **)&con->v1.in_sr_kvec.iov_base); 1053 + if (ret <= 0) { 1054 + ret = ret ? ret : 1; /* must return > 0 to indicate success */ 1055 + break; 1056 + } 1058 1057 con->v1.in_sr_len = ret; 1059 - } while (ret > 0); 1058 + } 1060 1059 1061 1060 if (do_datacrc) 1062 1061 con->in_data_crc = crc; 1063 1062 1064 - return ret < 0 ? ret : 1; /* must return > 0 to indicate success */ 1063 + return ret; 1065 1064 } 1066 1065 1067 1066 static int read_partial_msg_data(struct ceph_connection *con) ··· 1254 1253 if (!m->num_data_items) 1255 1254 return -EIO; 1256 1255 1257 - if (m->sparse_read) 1258 - ret = read_sparse_msg_data(con); 1256 + if (m->sparse_read_total) 1257 + ret = read_partial_sparse_msg_data(con); 1259 1258 else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) 1260 1259 ret = read_partial_msg_data_bounce(con); 1261 1260 else
+2 -2
net/ceph/messenger_v2.c
··· 1128 1128 struct sg_table enc_sgt = {}; 1129 1129 struct sg_table sgt = {}; 1130 1130 struct page **pages = NULL; 1131 - bool sparse = con->in_msg->sparse_read; 1131 + bool sparse = !!con->in_msg->sparse_read_total; 1132 1132 int dpos = 0; 1133 1133 int tail_len; 1134 1134 int ret; ··· 2060 2060 } 2061 2061 2062 2062 if (data_len(msg)) { 2063 - if (msg->sparse_read) 2063 + if (msg->sparse_read_total) 2064 2064 con->v2.in_state = IN_S_PREPARE_SPARSE_DATA; 2065 2065 else 2066 2066 con->v2.in_state = IN_S_PREPARE_READ_DATA;
+18 -9
net/ceph/osd_client.c
··· 5510 5510 } 5511 5511 5512 5512 m = ceph_msg_get(req->r_reply); 5513 - m->sparse_read = (bool)srlen; 5513 + m->sparse_read_total = srlen; 5514 5514 5515 5515 dout("get_reply tid %lld %p\n", tid, m); 5516 5516 ··· 5777 5777 } 5778 5778 5779 5779 if (o->o_sparse_op_idx < 0) { 5780 - u64 srlen = sparse_data_requested(req); 5781 - 5782 - dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n", 5783 - __func__, o->o_osd, srlen); 5784 - ceph_msg_data_cursor_init(cursor, con->in_msg, srlen); 5780 + dout("%s: [%d] starting new sparse read req\n", 5781 + __func__, o->o_osd); 5785 5782 } else { 5786 5783 u64 end; 5787 5784 ··· 5854 5857 struct ceph_osd *o = con->private; 5855 5858 struct ceph_sparse_read *sr = &o->o_sparse_read; 5856 5859 u32 count = sr->sr_count; 5857 - u64 eoff, elen; 5858 - int ret; 5860 + u64 eoff, elen, len = 0; 5861 + int i, ret; 5859 5862 5860 5863 switch (sr->sr_state) { 5861 5864 case CEPH_SPARSE_READ_HDR: ··· 5900 5903 convert_extent_map(sr); 5901 5904 ret = sizeof(sr->sr_datalen); 5902 5905 *pbuf = (char *)&sr->sr_datalen; 5903 - sr->sr_state = CEPH_SPARSE_READ_DATA; 5906 + sr->sr_state = CEPH_SPARSE_READ_DATA_PRE; 5904 5907 break; 5908 + case CEPH_SPARSE_READ_DATA_PRE: 5909 + /* Convert sr_datalen to host-endian */ 5910 + sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen); 5911 + for (i = 0; i < count; i++) 5912 + len += sr->sr_extent[i].len; 5913 + if (sr->sr_datalen != len) { 5914 + pr_warn_ratelimited("data len %u != extent len %llu\n", 5915 + sr->sr_datalen, len); 5916 + return -EREMOTEIO; 5917 + } 5918 + sr->sr_state = CEPH_SPARSE_READ_DATA; 5919 + fallthrough; 5905 5920 case CEPH_SPARSE_READ_DATA: 5906 5921 if (sr->sr_index >= count) { 5907 5922 sr->sr_state = CEPH_SPARSE_READ_HDR;
+3 -2
net/core/dev.c
··· 316 316 return -ENOMEM; 317 317 netdev_name_node_add(net, name_node); 318 318 /* The node that holds dev->name acts as a head of per-device list. */ 319 - list_add_tail(&name_node->list, &dev->name_node->list); 319 + list_add_tail_rcu(&name_node->list, &dev->name_node->list); 320 320 321 321 return 0; 322 322 } ··· 11695 11695 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160); 11696 11696 11697 11697 /* TXRX read-mostly hotpath */ 11698 + CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats); 11698 11699 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags); 11699 11700 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len); 11700 11701 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features); 11701 11702 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr); 11702 - CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 30); 11703 + CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 38); 11703 11704 11704 11705 /* RX read-mostly hotpath */ 11705 11706 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
+9 -6
net/core/rtnetlink.c
··· 1019 1019 static size_t rtnl_prop_list_size(const struct net_device *dev) 1020 1020 { 1021 1021 struct netdev_name_node *name_node; 1022 - size_t size; 1022 + unsigned int cnt = 0; 1023 1023 1024 - if (list_empty(&dev->name_node->list)) 1024 + rcu_read_lock(); 1025 + list_for_each_entry_rcu(name_node, &dev->name_node->list, list) 1026 + cnt++; 1027 + rcu_read_unlock(); 1028 + 1029 + if (!cnt) 1025 1030 return 0; 1026 - size = nla_total_size(0); 1027 - list_for_each_entry(name_node, &dev->name_node->list, list) 1028 - size += nla_total_size(ALTIFNAMSIZ); 1029 - return size; 1031 + 1032 + return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ); 1030 1033 } 1031 1034 1032 1035 static size_t rtnl_proto_down_size(const struct net_device *dev)
+4 -1
net/handshake/handshake-test.c
··· 471 471 handshake_req_cancel(sock->sk); 472 472 473 473 /* Act */ 474 - fput(filp); 474 + /* Ensure the close/release/put process has run to 475 + * completion before checking the result. 476 + */ 477 + __fput_sync(filp); 475 478 476 479 /* Assert */ 477 480 KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
+1
net/ipv4/ah4.c
··· 597 597 598 598 module_init(ah4_init); 599 599 module_exit(ah4_fini); 600 + MODULE_DESCRIPTION("IPv4 AH transformation library"); 600 601 MODULE_LICENSE("GPL"); 601 602 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
+1
net/ipv4/esp4.c
··· 1247 1247 1248 1248 module_init(esp4_init); 1249 1249 module_exit(esp4_fini); 1250 + MODULE_DESCRIPTION("IPv4 ESP transformation library"); 1250 1251 MODULE_LICENSE("GPL"); 1251 1252 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
+1
net/ipv4/ip_gre.c
··· 1799 1799 1800 1800 module_init(ipgre_init); 1801 1801 module_exit(ipgre_fini); 1802 + MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library"); 1802 1803 MODULE_LICENSE("GPL"); 1803 1804 MODULE_ALIAS_RTNL_LINK("gre"); 1804 1805 MODULE_ALIAS_RTNL_LINK("gretap");
+8 -5
net/ipv4/ip_output.c
··· 972 972 unsigned int maxfraglen, fragheaderlen, maxnonfragsize; 973 973 int csummode = CHECKSUM_NONE; 974 974 struct rtable *rt = (struct rtable *)cork->dst; 975 + bool paged, hold_tskey, extra_uref = false; 975 976 unsigned int wmem_alloc_delta = 0; 976 - bool paged, extra_uref = false; 977 977 u32 tskey = 0; 978 978 979 979 skb = skb_peek_tail(queue); ··· 981 981 exthdrlen = !skb ? rt->dst.header_len : 0; 982 982 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; 983 983 paged = !!cork->gso_size; 984 - 985 - if (cork->tx_flags & SKBTX_ANY_TSTAMP && 986 - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) 987 - tskey = atomic_inc_return(&sk->sk_tskey) - 1; 988 984 989 985 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 990 986 ··· 1047 1051 } 1048 1052 1049 1053 cork->length += length; 1054 + 1055 + hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && 1056 + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; 1057 + if (hold_tskey) 1058 + tskey = atomic_inc_return(&sk->sk_tskey) - 1; 1050 1059 1051 1060 /* So, what's going on in the loop below? 1052 1061 * ··· 1275 1274 cork->length -= length; 1276 1275 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1277 1276 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1277 + if (hold_tskey) 1278 + atomic_dec(&sk->sk_tskey); 1278 1279 return err; 1279 1280 } 1280 1281
+1
net/ipv4/ip_tunnel.c
··· 1296 1296 } 1297 1297 EXPORT_SYMBOL_GPL(ip_tunnel_setup); 1298 1298 1299 + MODULE_DESCRIPTION("IPv4 tunnel implementation library"); 1299 1300 MODULE_LICENSE("GPL");
+1
net/ipv4/ip_vti.c
··· 723 723 724 724 module_init(vti_init); 725 725 module_exit(vti_fini); 726 + MODULE_DESCRIPTION("Virtual (secure) IP tunneling library"); 726 727 MODULE_LICENSE("GPL"); 727 728 MODULE_ALIAS_RTNL_LINK("vti"); 728 729 MODULE_ALIAS_NETDEV("ip_vti0");
+1
net/ipv4/ipip.c
··· 660 660 661 661 module_init(ipip_init); 662 662 module_exit(ipip_fini); 663 + MODULE_DESCRIPTION("IP/IP protocol decoder library"); 663 664 MODULE_LICENSE("GPL"); 664 665 MODULE_ALIAS_RTNL_LINK("ipip"); 665 666 MODULE_ALIAS_NETDEV("tunl0");
+2 -1
net/ipv4/tcp.c
··· 4615 4615 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); 4616 4616 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); 4617 4617 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); 4618 - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 31); 4618 + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); 4619 + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32); 4619 4620 4620 4621 /* RX read-mostly hotpath cache lines */ 4621 4622 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
+1
net/ipv4/tunnel4.c
··· 294 294 295 295 module_init(tunnel4_init); 296 296 module_exit(tunnel4_fini); 297 + MODULE_DESCRIPTION("IPv4 XFRM tunnel library"); 297 298 MODULE_LICENSE("GPL");
+1
net/ipv4/udp_tunnel_core.c
··· 253 253 } 254 254 EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup); 255 255 256 + MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver"); 256 257 MODULE_LICENSE("GPL");
+1
net/ipv4/xfrm4_tunnel.c
··· 114 114 115 115 module_init(ipip_init); 116 116 module_exit(ipip_fini); 117 + MODULE_DESCRIPTION("IPv4 XFRM tunnel driver"); 117 118 MODULE_LICENSE("GPL"); 118 119 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
+1
net/ipv6/ah6.c
··· 800 800 module_init(ah6_init); 801 801 module_exit(ah6_fini); 802 802 803 + MODULE_DESCRIPTION("IPv6 AH transformation helpers"); 803 804 MODULE_LICENSE("GPL"); 804 805 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
+1
net/ipv6/esp6.c
··· 1301 1301 module_init(esp6_init); 1302 1302 module_exit(esp6_fini); 1303 1303 1304 + MODULE_DESCRIPTION("IPv6 ESP transformation helpers"); 1304 1305 MODULE_LICENSE("GPL"); 1305 1306 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
+8 -5
net/ipv6/ip6_output.c
··· 1424 1424 bool zc = false; 1425 1425 u32 tskey = 0; 1426 1426 struct rt6_info *rt = (struct rt6_info *)cork->dst; 1427 + bool paged, hold_tskey, extra_uref = false; 1427 1428 struct ipv6_txoptions *opt = v6_cork->opt; 1428 1429 int csummode = CHECKSUM_NONE; 1429 1430 unsigned int maxnonfragsize, headersize; 1430 1431 unsigned int wmem_alloc_delta = 0; 1431 - bool paged, extra_uref = false; 1432 1432 1433 1433 skb = skb_peek_tail(queue); 1434 1434 if (!skb) { ··· 1439 1439 paged = !!cork->gso_size; 1440 1440 mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize; 1441 1441 orig_mtu = mtu; 1442 - 1443 - if (cork->tx_flags & SKBTX_ANY_TSTAMP && 1444 - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) 1445 - tskey = atomic_inc_return(&sk->sk_tskey) - 1; 1446 1442 1447 1443 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1448 1444 ··· 1533 1537 else 1534 1538 flags &= ~MSG_SPLICE_PAGES; 1535 1539 } 1540 + 1541 + hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && 1542 + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; 1543 + if (hold_tskey) 1544 + tskey = atomic_inc_return(&sk->sk_tskey) - 1; 1536 1545 1537 1546 /* 1538 1547 * Let's try using as much space as possible. ··· 1795 1794 cork->length -= length; 1796 1795 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1797 1796 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1797 + if (hold_tskey) 1798 + atomic_dec(&sk->sk_tskey); 1798 1799 return err; 1799 1800 } 1800 1801
+1
net/ipv6/ip6_udp_tunnel.c
··· 182 182 } 183 183 EXPORT_SYMBOL_GPL(udp_tunnel6_dst_lookup); 184 184 185 + MODULE_DESCRIPTION("IPv6 Foo over UDP tunnel driver"); 185 186 MODULE_LICENSE("GPL");
+1
net/ipv6/mip6.c
··· 405 405 module_init(mip6_init); 406 406 module_exit(mip6_fini); 407 407 408 + MODULE_DESCRIPTION("IPv6 Mobility driver"); 408 409 MODULE_LICENSE("GPL"); 409 410 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_DSTOPTS); 410 411 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ROUTING);
+1
net/ipv6/sit.c
··· 1954 1954 1955 1955 module_init(sit_init); 1956 1956 module_exit(sit_cleanup); 1957 + MODULE_DESCRIPTION("IPv6-in-IPv4 tunnel SIT driver"); 1957 1958 MODULE_LICENSE("GPL"); 1958 1959 MODULE_ALIAS_RTNL_LINK("sit"); 1959 1960 MODULE_ALIAS_NETDEV("sit0");
+1
net/ipv6/tunnel6.c
··· 302 302 303 303 module_init(tunnel6_init); 304 304 module_exit(tunnel6_fini); 305 + MODULE_DESCRIPTION("IP-in-IPv6 tunnel driver"); 305 306 MODULE_LICENSE("GPL");
+1
net/ipv6/xfrm6_tunnel.c
··· 401 401 402 402 module_init(xfrm6_tunnel_init); 403 403 module_exit(xfrm6_tunnel_fini); 404 + MODULE_DESCRIPTION("IPv6 XFRM tunnel driver"); 404 405 MODULE_LICENSE("GPL"); 405 406 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
+1
net/key/af_key.c
··· 3924 3924 3925 3925 module_init(ipsec_pfkey_init); 3926 3926 module_exit(ipsec_pfkey_exit); 3927 + MODULE_DESCRIPTION("PF_KEY socket helpers"); 3927 3928 MODULE_LICENSE("GPL"); 3928 3929 MODULE_ALIAS_NETPROTO(PF_KEY);
+3 -2
net/mac80211/tx.c
··· 5 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 6 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 7 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 - * Copyright (C) 2018-2022 Intel Corporation 8 + * Copyright (C) 2018-2024 Intel Corporation 9 9 * 10 10 * Transmit and frame generation functions. 11 11 */ ··· 3927 3927 goto begin; 3928 3928 3929 3929 skb = __skb_dequeue(&tx.skbs); 3930 + info = IEEE80211_SKB_CB(skb); 3930 3931 3931 3932 if (!skb_queue_empty(&tx.skbs)) { 3932 3933 spin_lock_bh(&fq->lock); ··· 3972 3971 } 3973 3972 3974 3973 encap_out: 3975 - IEEE80211_SKB_CB(skb)->control.vif = vif; 3974 + info->control.vif = vif; 3976 3975 3977 3976 if (tx.sta && 3978 3977 wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
+2 -4
net/mptcp/fastopen.c
··· 59 59 mptcp_data_unlock(sk); 60 60 } 61 61 62 - void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 63 - const struct mptcp_options_received *mp_opt) 62 + void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 63 + const struct mptcp_options_received *mp_opt) 64 64 { 65 65 struct sock *sk = (struct sock *)msk; 66 66 struct sk_buff *skb; 67 67 68 - mptcp_data_lock(sk); 69 68 skb = skb_peek_tail(&sk->sk_receive_queue); 70 69 if (skb) { 71 70 WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); ··· 76 77 } 77 78 78 79 pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq); 79 - mptcp_data_unlock(sk); 80 80 }
+5 -4
net/mptcp/options.c
··· 962 962 /* subflows are fully established as soon as we get any 963 963 * additional ack, including ADD_ADDR. 964 964 */ 965 - subflow->fully_established = 1; 966 - WRITE_ONCE(msk->fully_established, true); 967 - goto check_notify; 965 + goto set_fully_established; 968 966 } 969 967 970 968 /* If the first established packet does not contain MP_CAPABLE + data ··· 984 986 set_fully_established: 985 987 if (unlikely(!READ_ONCE(msk->pm.server_side))) 986 988 pr_warn_once("bogus mpc option on established client sk"); 987 - mptcp_subflow_fully_established(subflow, mp_opt); 989 + 990 + mptcp_data_lock((struct sock *)msk); 991 + __mptcp_subflow_fully_established(msk, subflow, mp_opt); 992 + mptcp_data_unlock((struct sock *)msk); 988 993 989 994 check_notify: 990 995 /* if the subflow is not already linked into the conn_list, we can't
+12 -1
net/mptcp/pm_userspace.c
··· 130 130 int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, 131 131 struct mptcp_addr_info *skc) 132 132 { 133 - struct mptcp_pm_addr_entry new_entry; 133 + struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry; 134 134 __be16 msk_sport = ((struct inet_sock *) 135 135 inet_sk((struct sock *)msk))->inet_sport; 136 + 137 + spin_lock_bh(&msk->pm.lock); 138 + list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) { 139 + if (mptcp_addresses_equal(&e->addr, skc, false)) { 140 + entry = e; 141 + break; 142 + } 143 + } 144 + spin_unlock_bh(&msk->pm.lock); 145 + if (entry) 146 + return entry->addr.id; 136 147 137 148 memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry)); 138 149 new_entry.addr = *skc;
+17 -14
net/mptcp/protocol.c
··· 1510 1510 1511 1511 void mptcp_check_and_set_pending(struct sock *sk) 1512 1512 { 1513 - if (mptcp_send_head(sk)) 1514 - mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING); 1513 + if (mptcp_send_head(sk)) { 1514 + mptcp_data_lock(sk); 1515 + mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); 1516 + mptcp_data_unlock(sk); 1517 + } 1515 1518 } 1516 1519 1517 1520 static int __subflow_push_pending(struct sock *sk, struct sock *ssk, ··· 1967 1964 1968 1965 if (copied <= 0) 1969 1966 return; 1967 + 1968 + if (!msk->rcvspace_init) 1969 + mptcp_rcv_space_init(msk, msk->first); 1970 1970 1971 1971 msk->rcvq_space.copied += copied; 1972 1972 ··· 3153 3147 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); 3154 3148 WRITE_ONCE(msk->flags, 0); 3155 3149 msk->cb_flags = 0; 3156 - msk->push_pending = 0; 3157 3150 msk->recovery = false; 3158 3151 WRITE_ONCE(msk->can_ack, false); 3159 3152 WRITE_ONCE(msk->fully_established, false); ··· 3168 3163 msk->bytes_received = 0; 3169 3164 msk->bytes_sent = 0; 3170 3165 msk->bytes_retrans = 0; 3166 + msk->rcvspace_init = 0; 3171 3167 3172 3168 WRITE_ONCE(sk->sk_shutdown, 0); 3173 3169 sk_error_report(sk); ··· 3191 3185 { 3192 3186 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 3193 3187 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 3188 + struct mptcp_subflow_context *subflow; 3194 3189 struct mptcp_sock *msk; 3195 3190 3196 3191 if (!nsk) ··· 3232 3225 3233 3226 /* The msk maintain a ref to each subflow in the connections list */ 3234 3227 WRITE_ONCE(msk->first, ssk); 3235 - list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list); 3228 + subflow = mptcp_subflow_ctx(ssk); 3229 + list_add(&subflow->node, &msk->conn_list); 3236 3230 sock_hold(ssk); 3237 3231 3238 3232 /* new mpc subflow takes ownership of the newly ··· 3248 3240 __mptcp_propagate_sndbuf(nsk, ssk); 3249 3241 3250 3242 mptcp_rcv_space_init(msk, ssk); 3243 + 3244 + if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK) 3245 + __mptcp_subflow_fully_established(msk, subflow, mp_opt); 3251 3246 bh_unlock_sock(nsk); 3252 3247 3253 3248 /* note: the newly allocated socket refcount is 2 now */ ··· 3261 3250 { 3262 3251 const struct tcp_sock *tp = tcp_sk(ssk); 3263 3252 3253 + msk->rcvspace_init = 1; 3264 3254 msk->rcvq_space.copied = 0; 3265 3255 msk->rcvq_space.rtt_us = 0; 3266 3256 ··· 3272 3260 TCP_INIT_CWND * tp->advmss); 3273 3261 if (msk->rcvq_space.space == 0) 3274 3262 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; 3275 - 3276 - WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); 3277 3263 } 3278 3264 3279 3265 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) ··· 3342 3332 struct mptcp_sock *msk = mptcp_sk(sk); 3343 3333 3344 3334 for (;;) { 3345 - unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) | 3346 - msk->push_pending; 3335 + unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); 3347 3336 struct list_head join_list; 3348 3337 3349 3338 if (!flags) ··· 3358 3349 * datapath acquires the msk socket spinlock while helding 3359 3350 * the subflow socket lock 3360 3351 */ 3361 - msk->push_pending = 0; 3362 3352 msk->cb_flags &= ~flags; 3363 3353 spin_unlock_bh(&sk->sk_lock.slock); 3364 3354 ··· 3485 3477 * accessing the field below 3486 3478 */ 3487 3479 WRITE_ONCE(msk->local_key, subflow->local_key); 3488 - WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 3489 - WRITE_ONCE(msk->snd_nxt, msk->write_seq); 3490 - WRITE_ONCE(msk->snd_una, msk->write_seq); 3491 3480 3492 3481 mptcp_pm_new_connection(msk, ssk, 0); 3493 - 3494 - mptcp_rcv_space_init(msk, ssk); 3495 3482 } 3496 3483 3497 3484 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
+9 -7
net/mptcp/protocol.h
··· 288 288 int rmem_released; 289 289 unsigned long flags; 290 290 unsigned long cb_flags; 291 - unsigned long push_pending; 292 291 bool recovery; /* closing subflow write queue reinjected */ 293 292 bool can_ack; 294 293 bool fully_established; ··· 306 307 nodelay:1, 307 308 fastopening:1, 308 309 in_accept_queue:1, 309 - free_first:1; 310 + free_first:1, 311 + rcvspace_init:1; 310 312 struct work_struct work; 311 313 struct sk_buff *ooo_last_skb; 312 314 struct rb_root out_of_order_queue; ··· 624 624 unsigned int mptcp_close_timeout(const struct sock *sk); 625 625 int mptcp_get_pm_type(const struct net *net); 626 626 const char *mptcp_get_scheduler(const struct net *net); 627 - void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, 628 - const struct mptcp_options_received *mp_opt); 627 + void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 628 + struct mptcp_subflow_context *subflow, 629 + const struct mptcp_options_received *mp_opt); 629 630 bool __mptcp_retransmit_pending_data(struct sock *sk); 630 631 void mptcp_check_and_set_pending(struct sock *sk); 631 632 void __mptcp_push_pending(struct sock *sk, unsigned int flags); ··· 955 954 enum mptcp_event_type event); 956 955 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk); 957 956 958 - void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 959 - const struct mptcp_options_received *mp_opt); 957 + void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 958 + const struct mptcp_options_received *mp_opt); 960 959 void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow, 961 960 struct request_sock *req); 962 961 ··· 1131 1130 { 1132 1131 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1133 1132 1134 - return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1) && 1133 + return (1 << sk->sk_state) & 1134 + (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING) && 1135 1135 is_active_ssk(subflow) && 1136 1136 !subflow->conn_finished; 1137 1137 }
+44 -27
net/mptcp/subflow.c
··· 422 422 423 423 void __mptcp_sync_state(struct sock *sk, int state) 424 424 { 425 + struct mptcp_subflow_context *subflow; 425 426 struct mptcp_sock *msk = mptcp_sk(sk); 427 + struct sock *ssk = msk->first; 426 428 427 - __mptcp_propagate_sndbuf(sk, msk->first); 429 + subflow = mptcp_subflow_ctx(ssk); 430 + __mptcp_propagate_sndbuf(sk, ssk); 431 + if (!msk->rcvspace_init) 432 + mptcp_rcv_space_init(msk, ssk); 433 + 428 434 if (sk->sk_state == TCP_SYN_SENT) { 435 + /* subflow->idsn is always available is TCP_SYN_SENT state, 436 + * even for the FASTOPEN scenarios 437 + */ 438 + WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 439 + WRITE_ONCE(msk->snd_nxt, msk->write_seq); 429 440 mptcp_set_state(sk, state); 430 441 sk->sk_state_change(sk); 431 442 } 432 - } 433 - 434 - static void mptcp_propagate_state(struct sock *sk, struct sock *ssk) 435 - { 436 - struct mptcp_sock *msk = mptcp_sk(sk); 437 - 438 - mptcp_data_lock(sk); 439 - if (!sock_owned_by_user(sk)) { 440 - __mptcp_sync_state(sk, ssk->sk_state); 441 - } else { 442 - msk->pending_state = ssk->sk_state; 443 - __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 444 - } 445 - mptcp_data_unlock(sk); 446 443 } 447 444 448 445 static void subflow_set_remote_key(struct mptcp_sock *msk, ··· 461 464 WRITE_ONCE(msk->ack_seq, subflow->iasn); 462 465 WRITE_ONCE(msk->can_ack, true); 463 466 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 467 + } 468 + 469 + static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, 470 + struct mptcp_subflow_context *subflow, 471 + const struct mptcp_options_received *mp_opt) 472 + { 473 + struct mptcp_sock *msk = mptcp_sk(sk); 474 + 475 + mptcp_data_lock(sk); 476 + if (mp_opt) { 477 + /* Options are available only in the non fallback cases 478 + * avoid updating rx path fields otherwise 479 + */ 480 + WRITE_ONCE(msk->snd_una, subflow->idsn + 1); 481 + WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); 482 + subflow_set_remote_key(msk, subflow, mp_opt); 483 + } 484 + 485 + if (!sock_owned_by_user(sk)) { 486 + __mptcp_sync_state(sk, ssk->sk_state); 487 + } else { 488 + msk->pending_state = ssk->sk_state; 489 + __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 490 + } 491 + mptcp_data_unlock(sk); 464 492 } 465 493 466 494 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) ··· 522 500 if (mp_opt.deny_join_id0) 523 501 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 524 502 subflow->mp_capable = 1; 525 - subflow_set_remote_key(msk, subflow, &mp_opt); 526 503 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 527 504 mptcp_finish_connect(sk); 528 - mptcp_propagate_state(parent, sk); 505 + mptcp_propagate_state(parent, sk, subflow, &mp_opt); 529 506 } else if (subflow->request_join) { 530 507 u8 hmac[SHA256_DIGEST_SIZE]; 531 508 ··· 567 546 } 568 547 } else if (mptcp_check_fallback(sk)) { 569 548 fallback: 570 - mptcp_rcv_space_init(msk, sk); 571 - mptcp_propagate_state(parent, sk); 549 + mptcp_propagate_state(parent, sk, subflow, NULL); 572 550 } 573 551 return; 574 552 ··· 753 733 kfree_rcu(ctx, rcu); 754 734 } 755 735 756 - void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, 757 - const struct mptcp_options_received *mp_opt) 736 + void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 737 + struct mptcp_subflow_context *subflow, 738 + const struct mptcp_options_received *mp_opt) 758 739 { 759 - struct mptcp_sock *msk = mptcp_sk(subflow->conn); 760 - 761 740 subflow_set_remote_key(msk, subflow, mp_opt); 762 741 subflow->fully_established = 1; 763 742 WRITE_ONCE(msk->fully_established, true); 764 743 765 744 if (subflow->is_mptfo) 766 - mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 745 + __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 767 746 } 768 747 769 748 static struct sock *subflow_syn_recv_sock(const struct sock *sk, ··· 855 836 * mpc option 856 837 */ 857 838 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 858 - mptcp_subflow_fully_established(ctx, &mp_opt); 859 839 mptcp_pm_fully_established(owner, child); 860 840 ctx->pm_notified = 1; 861 841 } ··· 1764 1746 msk = mptcp_sk(parent); 1765 1747 if (subflow_simultaneous_connect(sk)) { 1766 1748 mptcp_do_fallback(sk); 1767 - mptcp_rcv_space_init(msk, sk); 1768 1749 pr_fallback(msk); 1769 1750 subflow->conn_finished = 1; 1770 - mptcp_propagate_state(parent, sk); 1751 + mptcp_propagate_state(parent, sk, subflow, NULL); 1771 1752 } 1772 1753 1773 1754 /* as recvmsg() does not acquire the subflow socket for ssk selection
+4 -1
net/netfilter/nf_nat_core.c
··· 551 551 find_free_id: 552 552 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) 553 553 off = (ntohs(*keyptr) - ntohs(range->base_proto.all)); 554 - else 554 + else if ((range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL) || 555 + maniptype != NF_NAT_MANIP_DST) 555 556 off = get_random_u16(); 557 + else 558 + off = 0; 556 559 557 560 attempts = range_size; 558 561 if (attempts > NF_NAT_MAX_ATTEMPTS)
+1
net/netfilter/nft_flow_offload.c
··· 361 361 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 362 362 } 363 363 364 + __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags); 364 365 ret = flow_offload_add(flowtable, flow); 365 366 if (ret < 0) 366 367 goto err_flow_add;
+2 -2
net/netfilter/nft_set_pipapo.h
··· 144 144 145 145 /** 146 146 * struct nft_pipapo_match - Data used for lookup and matching 147 - * @field_count Amount of fields in set 147 + * @field_count: Amount of fields in set 148 148 * @scratch: Preallocated per-CPU maps for partial matching results 149 149 * @bsize_max: Maximum lookup table bucket size of all fields, in longs 150 - * @rcu Matching data is swapped on commits 150 + * @rcu: Matching data is swapped on commits 151 151 * @f: Fields, with lookup and mapping tables 152 152 */ 153 153 struct nft_pipapo_match {
+1 -1
net/netfilter/nft_set_pipapo_avx2.c
··· 57 57 58 58 /* Jump to label if @reg is zero */ 59 59 #define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \ 60 - asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \ 60 + asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \ 61 61 "je %l[" #label "]" : : : : label) 62 62 63 63 /* Store 256 bits from YMM register into memory. Contrary to bucket load
+33 -16
net/openvswitch/flow_netlink.c
··· 48 48 49 49 #define OVS_ATTR_NESTED -1 50 50 #define OVS_ATTR_VARIABLE -2 51 + #define OVS_COPY_ACTIONS_MAX_DEPTH 16 51 52 52 53 static bool actions_may_change_flow(const struct nlattr *actions) 53 54 { ··· 2546 2545 const struct sw_flow_key *key, 2547 2546 struct sw_flow_actions **sfa, 2548 2547 __be16 eth_type, __be16 vlan_tci, 2549 - u32 mpls_label_count, bool log); 2548 + u32 mpls_label_count, bool log, 2549 + u32 depth); 2550 2550 2551 2551 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, 2552 2552 const struct sw_flow_key *key, 2553 2553 struct sw_flow_actions **sfa, 2554 2554 __be16 eth_type, __be16 vlan_tci, 2555 - u32 mpls_label_count, bool log, bool last) 2555 + u32 mpls_label_count, bool log, bool last, 2556 + u32 depth) 2556 2557 { 2557 2558 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; 2558 2559 const struct nlattr *probability, *actions; ··· 2605 2602 return err; 2606 2603 2607 2604 err = __ovs_nla_copy_actions(net, actions, key, sfa, 2608 - eth_type, vlan_tci, mpls_label_count, log); 2605 + eth_type, vlan_tci, mpls_label_count, log, 2606 + depth + 1); 2609 2607 2610 2608 if (err) 2611 2609 return err; ··· 2621 2617 const struct sw_flow_key *key, 2622 2618 struct sw_flow_actions **sfa, 2623 2619 __be16 eth_type, __be16 vlan_tci, 2624 - u32 mpls_label_count, bool log) 2620 + u32 mpls_label_count, bool log, 2621 + u32 depth) 2625 2622 { 2626 2623 const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1]; 2627 2624 int start, action_start, err, rem; ··· 2665 2660 return action_start; 2666 2661 2667 2662 err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type, 2668 - vlan_tci, mpls_label_count, log); 2663 + vlan_tci, mpls_label_count, log, 2664 + depth + 1); 2669 2665 if (err) 2670 2666 return err; 2671 2667 ··· 2680 2674 const struct sw_flow_key *key, 2681 2675 struct sw_flow_actions **sfa, 2682 2676 __be16 eth_type, __be16 vlan_tci, 2683 - u32 mpls_label_count, bool log, bool last) 2677 + u32 mpls_label_count, bool log, bool last, 2678 + u32 depth) 2684 2679 { 2685 2680 int start, err; 2686 2681 u32 exec; ··· 2701 2694 return err; 2702 2695 2703 2696 err = __ovs_nla_copy_actions(net, attr, key, sfa, 2704 - eth_type, vlan_tci, mpls_label_count, log); 2697 + eth_type, vlan_tci, mpls_label_count, log, 2698 + depth + 1); 2705 2699 if (err) 2706 2700 return err; 2707 2701 ··· 3071 3063 struct sw_flow_actions **sfa, 3072 3064 __be16 eth_type, __be16 vlan_tci, 3073 3065 u32 mpls_label_count, 3074 - bool log, bool last) 3066 + bool log, bool last, u32 depth) 3075 3067 { 3076 3068 const struct nlattr *acts_if_greater, *acts_if_lesser_eq; 3077 3069 struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1]; ··· 3119 3111 return nested_acts_start; 3120 3112 3121 3113 err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa, 3122 - eth_type, vlan_tci, mpls_label_count, log); 3114 + eth_type, vlan_tci, mpls_label_count, log, 3115 + depth + 1); 3123 3116 3124 3117 if (err) 3125 3118 return err; ··· 3133 3124 return nested_acts_start; 3134 3125 3135 3126 err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa, 3136 - eth_type, vlan_tci, mpls_label_count, log); 3127 + eth_type, vlan_tci, mpls_label_count, log, 3128 + depth + 1); 3137 3129 3138 3130 if (err) 3139 3131 return err; ··· 3162 3152 const struct sw_flow_key *key, 3163 3153 struct sw_flow_actions **sfa, 3164 3154 __be16 eth_type, __be16 vlan_tci, 3165 - u32 mpls_label_count, bool log) 3155 + u32 mpls_label_count, bool log, 3156 + u32 depth) 3166 3157 { 3167 3158 u8 mac_proto = ovs_key_mac_proto(key); 3168 3159 const struct nlattr *a; 3169 3160 int rem, err; 3161 + 3162 + if (depth > OVS_COPY_ACTIONS_MAX_DEPTH) 3163 + return -EOVERFLOW; 3170 3164 3171 3165 nla_for_each_nested(a, attr, rem) { 3172 3166 /* Expected argument lengths, (u32)-1 for variable length. */ ··· 3369 3355 err = validate_and_copy_sample(net, a, key, sfa, 3370 3356 eth_type, vlan_tci, 3371 3357 mpls_label_count, 3372 - log, last); 3358 + log, last, depth); 3373 3359 if (err) 3374 3360 return err; 3375 3361 skip_copy = true; ··· 3440 3426 err = validate_and_copy_clone(net, a, key, sfa, 3441 3427 eth_type, vlan_tci, 3442 3428 mpls_label_count, 3443 - log, last); 3429 + log, last, depth); 3444 3430 if (err) 3445 3431 return err; 3446 3432 skip_copy = true; ··· 3454 3440 eth_type, 3455 3441 vlan_tci, 3456 3442 mpls_label_count, 3457 - log, last); 3443 + log, last, 3444 + depth); 3458 3445 if (err) 3459 3446 return err; 3460 3447 skip_copy = true; ··· 3465 3450 case OVS_ACTION_ATTR_DEC_TTL: 3466 3451 err = validate_and_copy_dec_ttl(net, a, key, sfa, 3467 3452 eth_type, vlan_tci, 3468 - mpls_label_count, log); 3453 + mpls_label_count, log, 3454 + depth); 3469 3455 if (err) 3470 3456 return err; 3471 3457 skip_copy = true; ··· 3511 3495 3512 3496 (*sfa)->orig_len = nla_len(attr); 3513 3497 err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type, 3514 - key->eth.vlan.tci, mpls_label_count, log); 3498 + key->eth.vlan.tci, mpls_label_count, log, 3499 + 0); 3515 3500 if (err) 3516 3501 ovs_nla_free_flow_actions(*sfa); 3517 3502
+11 -2
net/rds/recv.c
··· 425 425 struct sock *sk = rds_rs_to_sk(rs); 426 426 int ret = 0; 427 427 unsigned long flags; 428 + struct rds_incoming *to_drop = NULL; 428 429 429 430 write_lock_irqsave(&rs->rs_recv_lock, flags); 430 431 if (!list_empty(&inc->i_item)) { ··· 436 435 -be32_to_cpu(inc->i_hdr.h_len), 437 436 inc->i_hdr.h_dport); 438 437 list_del_init(&inc->i_item); 439 - rds_inc_put(inc); 438 + to_drop = inc; 440 439 } 441 440 } 442 441 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 442 + 443 + if (to_drop) 444 + rds_inc_put(to_drop); 443 445 444 446 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop); 445 447 return ret; ··· 762 758 struct sock *sk = rds_rs_to_sk(rs); 763 759 struct rds_incoming *inc, *tmp; 764 760 unsigned long flags; 761 + LIST_HEAD(to_drop); 765 762 766 763 write_lock_irqsave(&rs->rs_recv_lock, flags); 767 764 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) { 768 765 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 769 766 -be32_to_cpu(inc->i_hdr.h_len), 770 767 inc->i_hdr.h_dport); 768 + list_move(&inc->i_item, &to_drop); 769 + } 770 + write_unlock_irqrestore(&rs->rs_recv_lock, flags); 771 + 772 + list_for_each_entry_safe(inc, tmp, &to_drop, i_item) { 771 773 list_del_init(&inc->i_item); 772 774 rds_inc_put(inc); 773 775 } 774 - write_unlock_irqrestore(&rs->rs_recv_lock, flags); 775 776 } 776 777 777 778 /*
-2
net/sched/act_mirred.c
··· 533 533 * net_device are already rcu protected. 534 534 */ 535 535 RCU_INIT_POINTER(m->tcfm_dev, NULL); 536 - } else if (m->tcfm_blockid) { 537 - m->tcfm_blockid = 0; 538 536 } 539 537 spin_unlock_bh(&m->tcf_lock); 540 538 }
+1
net/sched/em_canid.c
··· 222 222 tcf_em_unregister(&em_canid_ops); 223 223 } 224 224 225 + MODULE_DESCRIPTION("ematch classifier to match CAN IDs embedded in skb CAN frames"); 225 226 MODULE_LICENSE("GPL"); 226 227 227 228 module_init(init_em_canid);
+1
net/sched/em_cmp.c
··· 87 87 tcf_em_unregister(&em_cmp_ops); 88 88 } 89 89 90 + MODULE_DESCRIPTION("ematch classifier for basic data types(8/16/32 bit) against skb data"); 90 91 MODULE_LICENSE("GPL"); 91 92 92 93 module_init(init_em_cmp);
+1
net/sched/em_meta.c
··· 1006 1006 tcf_em_unregister(&em_meta_ops); 1007 1007 } 1008 1008 1009 + MODULE_DESCRIPTION("ematch classifier for various internal kernel metadata, skb metadata and sk metadata"); 1009 1010 MODULE_LICENSE("GPL"); 1010 1011 1011 1012 module_init(init_em_meta);
+1
net/sched/em_nbyte.c
··· 68 68 tcf_em_unregister(&em_nbyte_ops); 69 69 } 70 70 71 + MODULE_DESCRIPTION("ematch classifier for arbitrary skb multi-bytes"); 71 72 MODULE_LICENSE("GPL"); 72 73 73 74 module_init(init_em_nbyte);
+1
net/sched/em_text.c
··· 147 147 tcf_em_unregister(&em_text_ops); 148 148 } 149 149 150 + MODULE_DESCRIPTION("ematch classifier for embedded text in skbs"); 150 151 MODULE_LICENSE("GPL"); 151 152 152 153 module_init(init_em_text);
+1
net/sched/em_u32.c
··· 52 52 tcf_em_unregister(&em_u32_ops); 53 53 } 54 54 55 + MODULE_DESCRIPTION("ematch skb classifier using 32 bit chunks of data"); 55 56 MODULE_LICENSE("GPL"); 56 57 57 58 module_init(init_em_u32);
+10 -4
net/sctp/inqueue.c
··· 38 38 INIT_WORK(&queue->immediate, NULL); 39 39 } 40 40 41 + /* Properly release the chunk which is being worked on. */ 42 + static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) 43 + { 44 + if (chunk->head_skb) 45 + chunk->skb = chunk->head_skb; 46 + sctp_chunk_free(chunk); 47 + } 48 + 41 49 /* Release the memory associated with an SCTP inqueue. */ 42 50 void sctp_inq_free(struct sctp_inq *queue) 43 51 { ··· 61 53 * free it as well. 62 54 */ 63 55 if (queue->in_progress) { 64 - sctp_chunk_free(queue->in_progress); 56 + sctp_inq_chunk_free(queue->in_progress); 65 57 queue->in_progress = NULL; 66 58 } 67 59 } ··· 138 130 goto new_skb; 139 131 } 140 132 141 - if (chunk->head_skb) 142 - chunk->skb = chunk->head_skb; 143 - sctp_chunk_free(chunk); 133 + sctp_inq_chunk_free(chunk); 144 134 chunk = queue->in_progress = NULL; 145 135 } else { 146 136 /* Nothing to do. Next chunk in the packet, please. */
+1
net/smc/af_smc.c
··· 924 924 smc->clcsock->file->private_data = smc->clcsock; 925 925 smc->clcsock->wq.fasync_list = 926 926 smc->sk.sk_socket->wq.fasync_list; 927 + smc->sk.sk_socket->wq.fasync_list = NULL; 927 928 928 929 /* There might be some wait entries remaining 929 930 * in smc sk->sk_wq and they should be woken up
+62 -73
net/tls/tls_sw.c
··· 63 63 u8 iv[TLS_MAX_IV_SIZE]; 64 64 u8 aad[TLS_MAX_AAD_SIZE]; 65 65 u8 tail; 66 + bool free_sgout; 66 67 struct scatterlist sg[]; 67 68 }; 68 69 ··· 188 187 struct aead_request *aead_req = data; 189 188 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 190 189 struct scatterlist *sgout = aead_req->dst; 191 - struct scatterlist *sgin = aead_req->src; 192 190 struct tls_sw_context_rx *ctx; 193 191 struct tls_decrypt_ctx *dctx; 194 192 struct tls_context *tls_ctx; ··· 195 195 unsigned int pages; 196 196 struct sock *sk; 197 197 int aead_size; 198 + 199 + /* If requests get too backlogged crypto API returns -EBUSY and calls 200 + * ->complete(-EINPROGRESS) immediately followed by ->complete(0) 201 + * to make waiting for backlog to flush with crypto_wait_req() easier. 202 + * First wait converts -EBUSY -> -EINPROGRESS, and the second one 203 + * -EINPROGRESS -> 0. 204 + * We have a single struct crypto_async_request per direction, this 205 + * scheme doesn't help us, so just ignore the first ->complete(). 206 + */ 207 + if (err == -EINPROGRESS) 208 + return; 198 209 199 210 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 200 211 aead_size = ALIGN(aead_size, __alignof__(*dctx)); ··· 224 213 } 225 214 226 215 /* Free the destination pages if skb was not decrypted inplace */ 227 - if (sgout != sgin) { 216 + if (dctx->free_sgout) { 228 217 /* Skip the first S/G entry as it points to AAD */ 229 218 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 230 219 if (!sg) ··· 235 224 236 225 kfree(aead_req); 237 226 238 - spin_lock_bh(&ctx->decrypt_compl_lock); 239 - if (!atomic_dec_return(&ctx->decrypt_pending)) 227 + if (atomic_dec_and_test(&ctx->decrypt_pending)) 240 228 complete(&ctx->async_wait.completion); 241 - spin_unlock_bh(&ctx->decrypt_compl_lock); 229 + } 230 + 231 + static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) 232 + { 233 + if (!atomic_dec_and_test(&ctx->decrypt_pending)) 234 + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 235 + atomic_inc(&ctx->decrypt_pending); 236 + 237 + return ctx->async_wait.err; 242 238 } 243 239 244 240 static int tls_do_decryption(struct sock *sk, ··· 271 253 aead_request_set_callback(aead_req, 272 254 CRYPTO_TFM_REQ_MAY_BACKLOG, 273 255 tls_decrypt_done, aead_req); 256 + DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); 274 257 atomic_inc(&ctx->decrypt_pending); 275 258 } else { 276 259 aead_request_set_callback(aead_req, ··· 280 261 } 281 262 282 263 ret = crypto_aead_decrypt(aead_req); 264 + if (ret == -EBUSY) { 265 + ret = tls_decrypt_async_wait(ctx); 266 + ret = ret ?: -EINPROGRESS; 267 + } 283 268 if (ret == -EINPROGRESS) { 284 269 if (darg->async) 285 270 return 0; ··· 462 439 struct tls_rec *rec = data; 463 440 struct scatterlist *sge; 464 441 struct sk_msg *msg_en; 465 - bool ready = false; 466 442 struct sock *sk; 467 - int pending; 443 + 444 + if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ 445 + return; 468 446 469 447 msg_en = &rec->msg_encrypted; 470 448 ··· 500 476 /* If received record is at head of tx_list, schedule tx */ 501 477 first_rec = list_first_entry(&ctx->tx_list, 502 478 struct tls_rec, list); 503 - if (rec == first_rec) 504 - ready = true; 479 + if (rec == first_rec) { 480 + /* Schedule the transmission */ 481 + if (!test_and_set_bit(BIT_TX_SCHEDULED, 482 + &ctx->tx_bitmask)) 483 + schedule_delayed_work(&ctx->tx_work.work, 1); 484 + } 505 485 } 506 486 507 - spin_lock_bh(&ctx->encrypt_compl_lock); 508 - pending = atomic_dec_return(&ctx->encrypt_pending); 509 - 510 - if (!pending && ctx->async_notify) 487 + if (atomic_dec_and_test(&ctx->encrypt_pending)) 511 488 complete(&ctx->async_wait.completion); 512 - spin_unlock_bh(&ctx->encrypt_compl_lock); 489 + } 513 490 514 - if (!ready) 515 - return; 491 + static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) 492 + { 493 + if (!atomic_dec_and_test(&ctx->encrypt_pending)) 494 + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 495 + atomic_inc(&ctx->encrypt_pending); 516 496 517 - /* Schedule the transmission */ 518 - if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 519 - schedule_delayed_work(&ctx->tx_work.work, 1); 497 + return ctx->async_wait.err; 520 498 } 521 499 522 500 static int tls_do_encryption(struct sock *sk, ··· 567 541 568 542 /* Add the record in tx_list */ 569 543 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 544 + DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); 570 545 atomic_inc(&ctx->encrypt_pending); 571 546 572 547 rc = crypto_aead_encrypt(aead_req); 548 + if (rc == -EBUSY) { 549 + rc = tls_encrypt_async_wait(ctx); 550 + rc = rc ?: -EINPROGRESS; 551 + } 573 552 if (!rc || rc != -EINPROGRESS) { 574 553 atomic_dec(&ctx->encrypt_pending); 575 554 sge->offset -= prot->prepend_size; ··· 1015 984 int num_zc = 0; 1016 985 int orig_size; 1017 986 int ret = 0; 1018 - int pending; 1019 987 1020 988 if (!eor && (msg->msg_flags & MSG_EOR)) 1021 989 return -EINVAL; ··· 1193 1163 if (!num_async) { 1194 1164 goto send_end; 1195 1165 } else if (num_zc) { 1166 + int err; 1167 + 1196 1168 /* Wait for pending encryptions to get completed */ 1197 - spin_lock_bh(&ctx->encrypt_compl_lock); 1198 - ctx->async_notify = true; 1199 - 1200 - pending = atomic_read(&ctx->encrypt_pending); 1201 - spin_unlock_bh(&ctx->encrypt_compl_lock); 1202 - if (pending) 1203 - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1204 - else 1205 - reinit_completion(&ctx->async_wait.completion); 1206 - 1207 - /* There can be no concurrent accesses, since we have no 1208 - * pending encrypt operations 1209 - */ 1210 - WRITE_ONCE(ctx->async_notify, false); 1211 - 1212 - if (ctx->async_wait.err) { 1213 - ret = ctx->async_wait.err; 1169 + err = tls_encrypt_async_wait(ctx); 1170 + if (err) { 1171 + ret = err; 1214 1172 copied = 0; 1215 1173 } 1216 1174 } ··· 1247 1229 ssize_t copied = 0; 1248 1230 bool retrying = false; 1249 1231 int ret = 0; 1250 - int pending; 1251 1232 1252 1233 if (!ctx->open_rec) 1253 1234 return; ··· 1281 1264 } 1282 1265 1283 1266 /* Wait for pending encryptions to get completed */ 1284 - spin_lock_bh(&ctx->encrypt_compl_lock); 1285 - ctx->async_notify = true; 1286 - 1287 - pending = atomic_read(&ctx->encrypt_pending); 1288 - spin_unlock_bh(&ctx->encrypt_compl_lock); 1289 - if (pending) 1290 - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1291 - else 1292 - reinit_completion(&ctx->async_wait.completion); 1293 - 1294 - /* There can be no concurrent accesses, since we have no pending 1295 - * encrypt operations 1296 - */ 1297 - WRITE_ONCE(ctx->async_notify, false); 1298 - 1299 - if (ctx->async_wait.err) 1267 + if (tls_encrypt_async_wait(ctx)) 1300 1268 goto unlock; 1301 1269 1302 1270 /* Transmit if any encryptions have completed */ ··· 1583 1581 } else if (out_sg) { 1584 1582 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1585 1583 } 1584 + dctx->free_sgout = !!pages; 1586 1585 1587 1586 /* Prepare and submit AEAD request */ 1588 1587 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, ··· 2112 2109 2113 2110 recv_end: 2114 2111 if (async) { 2115 - int ret, pending; 2112 + int ret; 2116 2113 2117 2114 /* Wait for all previously submitted records to be decrypted */ 2118 - spin_lock_bh(&ctx->decrypt_compl_lock); 2119 - reinit_completion(&ctx->async_wait.completion); 2120 - pending = atomic_read(&ctx->decrypt_pending); 2121 - spin_unlock_bh(&ctx->decrypt_compl_lock); 2122 - ret = 0; 2123 - if (pending) 2124 - ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2115 + ret = tls_decrypt_async_wait(ctx); 2125 2116 __skb_queue_purge(&ctx->async_hold); 2126 2117 2127 2118 if (ret) { ··· 2132 2135 else 2133 2136 err = process_rx_list(ctx, msg, &control, 0, 2134 2137 async_copy_bytes, is_peek); 2135 - decrypted += max(err, 0); 2136 2138 } 2137 2139 2138 2140 copied += decrypted; ··· 2431 2435 struct tls_context *tls_ctx = tls_get_ctx(sk); 2432 2436 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2433 2437 struct tls_rec *rec, *tmp; 2434 - int pending; 2435 2438 2436 2439 /* Wait for any pending async encryptions to complete */ 2437 - spin_lock_bh(&ctx->encrypt_compl_lock); 2438 - ctx->async_notify = true; 2439 - pending = atomic_read(&ctx->encrypt_pending); 2440 - spin_unlock_bh(&ctx->encrypt_compl_lock); 2441 - 2442 - if (pending) 2443 - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2440 + tls_encrypt_async_wait(ctx); 2444 2441 2445 2442 tls_tx_records(sk, -1); 2446 2443 ··· 2596 2607 } 2597 2608 2598 2609 crypto_init_wait(&sw_ctx_tx->async_wait); 2599 - spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2610 + atomic_set(&sw_ctx_tx->encrypt_pending, 1); 2600 2611 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2601 2612 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2602 2613 sw_ctx_tx->tx_work.sk = sk; ··· 2617 2628 } 2618 2629 2619 2630 crypto_init_wait(&sw_ctx_rx->async_wait); 2620 - spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2631 + atomic_set(&sw_ctx_rx->decrypt_pending, 1); 2621 2632 init_waitqueue_head(&sw_ctx_rx->wq); 2622 2633 skb_queue_head_init(&sw_ctx_rx->rx_list); 2623 2634 skb_queue_head_init(&sw_ctx_rx->async_hold);
+4 -3
net/unix/garbage.c
··· 340 340 __skb_queue_purge(&hitlist); 341 341 342 342 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 343 - list_for_each_entry_safe(u, next, &gc_candidates, link) { 344 - struct sk_buff *skb = u->oob_skb; 343 + while (!list_empty(&gc_candidates)) { 344 + u = list_entry(gc_candidates.next, struct unix_sock, link); 345 + if (u->oob_skb) { 346 + struct sk_buff *skb = u->oob_skb; 345 347 346 - if (skb) { 347 348 u->oob_skb = NULL; 348 349 kfree_skb(skb); 349 350 }
+1
net/xfrm/xfrm_algo.c
··· 858 858 } 859 859 EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported); 860 860 861 + MODULE_DESCRIPTION("XFRM Algorithm interface"); 861 862 MODULE_LICENSE("GPL");
+1
net/xfrm/xfrm_user.c
··· 3888 3888 3889 3889 module_init(xfrm_user_init); 3890 3890 module_exit(xfrm_user_exit); 3891 + MODULE_DESCRIPTION("XFRM User interface"); 3891 3892 MODULE_LICENSE("GPL"); 3892 3893 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
+4 -4
samples/bpf/asm_goto_workaround.h
··· 4 4 #define __ASM_GOTO_WORKAROUND_H 5 5 6 6 /* 7 - * This will bring in asm_volatile_goto and asm_inline macro definitions 7 + * This will bring in asm_goto_output and asm_inline macro definitions 8 8 * if enabled by compiler and config options. 9 9 */ 10 10 #include <linux/types.h> 11 11 12 - #ifdef asm_volatile_goto 13 - #undef asm_volatile_goto 14 - #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") 12 + #ifdef asm_goto_output 13 + #undef asm_goto_output 14 + #define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output") 15 15 #endif 16 16 17 17 /*
+1 -1
tools/arch/x86/include/asm/rmwcc.h
··· 4 4 5 5 #define __GEN_RMWcc(fullop, var, cc, ...) \ 6 6 do { \ 7 - asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ 7 + asm goto (fullop "; j" cc " %l[cc_label]" \ 8 8 : : "m" (var), ## __VA_ARGS__ \ 9 9 : "memory" : cc_label); \ 10 10 return 0; \
+2 -2
tools/include/linux/compiler_types.h
··· 36 36 #include <linux/compiler-gcc.h> 37 37 #endif 38 38 39 - #ifndef asm_volatile_goto 40 - #define asm_volatile_goto(x...) asm goto(x) 39 + #ifndef asm_goto_output 40 + #define asm_goto_output(x...) asm goto(x) 41 41 #endif 42 42 43 43 #endif /* __LINUX_COMPILER_TYPES_H */
+1
tools/testing/selftests/core/close_range_test.c
··· 12 12 #include <syscall.h> 13 13 #include <unistd.h> 14 14 #include <sys/resource.h> 15 + #include <linux/close_range.h> 15 16 16 17 #include "../kselftest_harness.h" 17 18 #include "../clone3/clone3_selftests.h"
+7 -6
tools/testing/selftests/dt/test_unprobed_devices.sh
··· 33 33 fi 34 34 35 35 nodes_compatible=$( 36 - for node_compat in $(find ${PDT} -name compatible); do 37 - node=$(dirname "${node_compat}") 36 + for node in $(find ${PDT} -type d); do 37 + [ ! -f "${node}"/compatible ] && continue 38 38 # Check if node is available 39 39 if [[ -e "${node}"/status ]]; then 40 40 status=$(tr -d '\000' < "${node}"/status) ··· 46 46 47 47 nodes_dev_bound=$( 48 48 IFS=$'\n' 49 - for uevent in $(find /sys/devices -name uevent); do 50 - if [[ -d "$(dirname "${uevent}")"/driver ]]; then 51 - grep '^OF_FULLNAME=' "${uevent}" | sed -e 's|OF_FULLNAME=||' 52 - fi 49 + for dev_dir in $(find /sys/devices -type d); do 50 + [ ! -f "${dev_dir}"/uevent ] && continue 51 + [ ! -d "${dev_dir}"/driver ] && continue 52 + 53 + grep '^OF_FULLNAME=' "${dev_dir}"/uevent | sed -e 's|OF_FULLNAME=||' 53 54 done 54 55 ) 55 56
+40 -8
tools/testing/selftests/landlock/common.h
··· 9 9 10 10 #include <errno.h> 11 11 #include <linux/landlock.h> 12 + #include <linux/securebits.h> 12 13 #include <sys/capability.h> 13 14 #include <sys/socket.h> 14 15 #include <sys/syscall.h> ··· 116 115 /* clang-format off */ 117 116 CAP_DAC_OVERRIDE, 118 117 CAP_MKNOD, 118 + CAP_NET_ADMIN, 119 + CAP_NET_BIND_SERVICE, 119 120 CAP_SYS_ADMIN, 120 121 CAP_SYS_CHROOT, 121 - CAP_NET_BIND_SERVICE, 122 122 /* clang-format on */ 123 123 }; 124 + const unsigned int noroot = SECBIT_NOROOT | SECBIT_NOROOT_LOCKED; 125 + 126 + if ((cap_get_secbits() & noroot) != noroot) 127 + EXPECT_EQ(0, cap_set_secbits(noroot)); 124 128 125 129 cap_p = cap_get_proc(); 126 130 EXPECT_NE(NULL, cap_p) ··· 143 137 TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); 144 138 } 145 139 } 140 + 141 + /* Automatically resets ambient capabilities. */ 146 142 EXPECT_NE(-1, cap_set_proc(cap_p)) 147 143 { 148 144 TH_LOG("Failed to cap_set_proc: %s", strerror(errno)); ··· 153 145 { 154 146 TH_LOG("Failed to cap_free: %s", strerror(errno)); 155 147 } 148 + 149 + /* Quickly checks that ambient capabilities are cleared. */ 150 + EXPECT_NE(-1, cap_get_ambient(caps[0])); 156 151 } 157 152 158 153 /* We cannot put such helpers in a library because of kselftest_harness.h . */ ··· 169 158 _init_caps(_metadata, true); 170 159 } 171 160 172 - static void _effective_cap(struct __test_metadata *const _metadata, 173 - const cap_value_t caps, const cap_flag_value_t value) 161 + static void _change_cap(struct __test_metadata *const _metadata, 162 + const cap_flag_t flag, const cap_value_t cap, 163 + const cap_flag_value_t value) 174 164 { 175 165 cap_t cap_p; 176 166 ··· 180 168 { 181 169 TH_LOG("Failed to cap_get_proc: %s", strerror(errno)); 182 170 } 183 - EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value)) 171 + EXPECT_NE(-1, cap_set_flag(cap_p, flag, 1, &cap, value)) 184 172 { 185 173 TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); 186 174 } ··· 195 183 } 196 184 197 185 static void __maybe_unused set_cap(struct __test_metadata *const _metadata, 198 - const cap_value_t caps) 186 + const cap_value_t cap) 199 187 { 200 - _effective_cap(_metadata, caps, CAP_SET); 188 + _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_SET); 201 189 } 202 190 203 191 static void __maybe_unused clear_cap(struct __test_metadata *const _metadata, 204 - const cap_value_t caps) 192 + const cap_value_t cap) 205 193 { 206 - _effective_cap(_metadata, caps, CAP_CLEAR); 194 + _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_CLEAR); 195 + } 196 + 197 + static void __maybe_unused 198 + set_ambient_cap(struct __test_metadata *const _metadata, const cap_value_t cap) 199 + { 200 + _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_SET); 201 + 202 + EXPECT_NE(-1, cap_set_ambient(cap, CAP_SET)) 203 + { 204 + TH_LOG("Failed to set ambient capability %d: %s", cap, 205 + strerror(errno)); 206 + } 207 + } 208 + 209 + static void __maybe_unused clear_ambient_cap( 210 + struct __test_metadata *const _metadata, const cap_value_t cap) 211 + { 212 + EXPECT_EQ(1, cap_get_ambient(cap)); 213 + _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_CLEAR); 214 + EXPECT_EQ(0, cap_get_ambient(cap)); 207 215 } 208 216 209 217 /* Receives an FD from a UNIX socket. Returns the received FD, or -errno. */
+8 -3
tools/testing/selftests/landlock/fs_test.c
··· 241 241 const char *const data; 242 242 }; 243 243 244 - const struct mnt_opt mnt_tmp = { 244 + #define MNT_TMP_DATA "size=4m,mode=700" 245 + 246 + static const struct mnt_opt mnt_tmp = { 245 247 .type = "tmpfs", 246 - .data = "size=4m,mode=700", 248 + .data = MNT_TMP_DATA, 247 249 }; 248 250 249 251 static int mount_opt(const struct mnt_opt *const mnt, const char *const target) ··· 4634 4632 /* clang-format off */ 4635 4633 FIXTURE_VARIANT_ADD(layout3_fs, tmpfs) { 4636 4634 /* clang-format on */ 4637 - .mnt = mnt_tmp, 4635 + .mnt = { 4636 + .type = "tmpfs", 4637 + .data = MNT_TMP_DATA, 4638 + }, 4638 4639 .file_path = file1_s1d1, 4639 4640 }; 4640 4641
+11 -2
tools/testing/selftests/landlock/net_test.c
··· 17 17 #include <string.h> 18 18 #include <sys/prctl.h> 19 19 #include <sys/socket.h> 20 + #include <sys/syscall.h> 20 21 #include <sys/un.h> 21 22 22 23 #include "common.h" ··· 55 54 }; 56 55 }; 57 56 57 + static pid_t sys_gettid(void) 58 + { 59 + return syscall(__NR_gettid); 60 + } 61 + 58 62 static int set_service(struct service_fixture *const srv, 59 63 const struct protocol_variant prot, 60 64 const unsigned short index) ··· 94 88 case AF_UNIX: 95 89 srv->unix_addr.sun_family = prot.domain; 96 90 sprintf(srv->unix_addr.sun_path, 97 - "_selftests-landlock-net-tid%d-index%d", gettid(), 91 + "_selftests-landlock-net-tid%d-index%d", sys_gettid(), 98 92 index); 99 93 srv->unix_addr_len = SUN_LEN(&srv->unix_addr); 100 94 srv->unix_addr.sun_path[0] = '\0'; ··· 107 101 { 108 102 set_cap(_metadata, CAP_SYS_ADMIN); 109 103 ASSERT_EQ(0, unshare(CLONE_NEWNET)); 110 - ASSERT_EQ(0, system("ip link set dev lo up")); 111 104 clear_cap(_metadata, CAP_SYS_ADMIN); 105 + 106 + set_ambient_cap(_metadata, CAP_NET_ADMIN); 107 + ASSERT_EQ(0, system("ip link set dev lo up")); 108 + clear_ambient_cap(_metadata, CAP_NET_ADMIN); 112 109 } 113 110 114 111 static bool is_restricted(const struct protocol_variant *const prot,
+5 -1
tools/testing/selftests/net/config
··· 24 24 CONFIG_INET_DIAG=y 25 25 CONFIG_INET_ESP=y 26 26 CONFIG_INET_ESP_OFFLOAD=y 27 + CONFIG_NET_FOU=y 28 + CONFIG_NET_FOU_IP_TUNNELS=y 27 29 CONFIG_IP_GRE=m 28 30 CONFIG_NETFILTER=y 29 31 CONFIG_NETFILTER_ADVANCED=y 30 32 CONFIG_NF_CONNTRACK=m 33 + CONFIG_IPV6_SIT=y 34 + CONFIG_IP_DCCP=m 31 35 CONFIG_NF_NAT=m 32 36 CONFIG_IP6_NF_IPTABLES=m 33 37 CONFIG_IP_NF_IPTABLES=m ··· 66 62 CONFIG_NET_CLS_U32=m 67 63 CONFIG_NET_IPGRE_DEMUX=m 68 64 CONFIG_NET_IPGRE=m 65 + CONFIG_NET_IPIP=y 69 66 CONFIG_NET_SCH_FQ_CODEL=m 70 67 CONFIG_NET_SCH_HTB=m 71 68 CONFIG_NET_SCH_FQ=m ··· 83 78 CONFIG_TRACEPOINTS=y 84 79 CONFIG_NET_DROP_MONITOR=m 85 80 CONFIG_NETDEVSIM=m 86 - CONFIG_NET_FOU=m 87 81 CONFIG_MPLS_ROUTING=m 88 82 CONFIG_MPLS_IPTUNNEL=m 89 83 CONFIG_NET_SCH_INGRESS=m
+2 -2
tools/testing/selftests/net/forwarding/bridge_locked_port.sh
··· 327 327 RET=0 328 328 check_port_mab_support || return 0 329 329 330 - bridge link set dev $swp1 learning on locked on mab on 331 330 tc qdisc add dev $swp1 clsact 332 331 tc filter add dev $swp1 ingress protocol all pref 1 handle 101 flower \ 333 332 action mirred egress redirect dev $swp2 333 + bridge link set dev $swp1 learning on locked on mab on 334 334 335 335 ping_do $h1 192.0.2.2 336 336 check_err $? "Ping did not work with redirection" ··· 349 349 check_err $? "Locked entry not created after deleting filter" 350 350 351 351 bridge fdb del `mac_get $h1` vlan 1 dev $swp1 master 352 - tc qdisc del dev $swp1 clsact 353 352 bridge link set dev $swp1 learning off locked off mab off 353 + tc qdisc del dev $swp1 clsact 354 354 355 355 log_test "Locked port MAB redirect" 356 356 }
+9 -5
tools/testing/selftests/net/forwarding/bridge_mdb.sh
··· 329 329 330 330 bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00" 331 331 check_err $? "(*, G) \"permanent\" entry has a pending group timer" 332 - bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00" 332 + bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00" 333 333 check_err $? "\"permanent\" source entry has a pending source timer" 334 334 335 335 bridge mdb del dev br0 port $swp1 grp $grp vid 10 ··· 346 346 347 347 bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00" 348 348 check_fail $? "(*, G) EXCLUDE entry does not have a pending group timer" 349 - bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00" 349 + bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00" 350 350 check_err $? "\"blocked\" source entry has a pending source timer" 351 351 352 352 bridge mdb del dev br0 port $swp1 grp $grp vid 10 ··· 363 363 364 364 bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00" 365 365 check_err $? "(*, G) INCLUDE entry has a pending group timer" 366 - bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00" 366 + bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00" 367 367 check_fail $? "Source entry does not have a pending source timer" 368 368 369 369 bridge mdb del dev br0 port $swp1 grp $grp vid 10 ··· 1252 1252 echo 1253 1253 log_info "# Forwarding tests" 1254 1254 1255 + # Set the Max Response Delay to 100 centiseconds (1 second) so that the 1256 + # bridge will start forwarding according to its MDB soon after a 1257 + # multicast querier is enabled. 1258 + ip link set dev br0 type bridge mcast_query_response_interval 100 1259 + 1255 1260 # Forwarding according to MDB entries only takes place when the bridge 1256 1261 # detects that there is a valid querier in the network. Set the bridge 1257 1262 # as the querier and assign it a valid IPv6 link-local address to be 1258 1263 # used as the source address for MLD queries. 1259 1264 ip -6 address add fe80::1/64 nodad dev br0 1260 1265 ip link set dev br0 type bridge mcast_querier 1 1261 - # Wait the default Query Response Interval (10 seconds) for the bridge 1262 - # to determine that there are no other queriers in the network. 1263 1266 sleep 10 1264 1267 1265 1268 fwd_test_host ··· 1270 1267 1271 1268 ip link set dev br0 type bridge mcast_querier 0 1272 1269 ip -6 address del fe80::1/64 dev br0 1270 + ip link set dev br0 type bridge mcast_query_response_interval 1000 1273 1271 } 1274 1272 1275 1273 ctrl_igmpv3_is_in_test()
+6 -2
tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
··· 209 209 # both registered and unregistered multicast traffic. 210 210 bridge link set dev $swp2 mcast_router 2 211 211 212 + # Set the Max Response Delay to 100 centiseconds (1 second) so that the 213 + # bridge will start forwarding according to its MDB soon after a 214 + # multicast querier is enabled. 215 + ip link set dev br1 type bridge mcast_query_response_interval 100 216 + 212 217 # Forwarding according to MDB entries only takes place when the bridge 213 218 # detects that there is a valid querier in the network. Set the bridge 214 219 # as the querier and assign it a valid IPv6 link-local address to be 215 220 # used as the source address for MLD queries. 216 221 ip link set dev br1 type bridge mcast_querier 1 217 222 ip -6 address add fe80::1/64 nodad dev br1 218 - # Wait the default Query Response Interval (10 seconds) for the bridge 219 - # to determine that there are no other queriers in the network. 220 223 sleep 10 221 224 222 225 test_l2_miss_multicast_ipv4 ··· 227 224 228 225 ip -6 address del fe80::1/64 dev br1 229 226 ip link set dev br1 type bridge mcast_querier 0 227 + ip link set dev br1 type bridge mcast_query_response_interval 1000 230 228 bridge link set dev $swp2 mcast_router 1 231 229 } 232 230
+5
tools/testing/selftests/net/gro.sh
··· 31 31 1>>log.txt 32 32 wait "${server_pid}" 33 33 exit_code=$? 34 + if [[ ${test} == "large" && -n "${KSFT_MACHINE_SLOW}" && \ 35 + ${exit_code} -ne 0 ]]; then 36 + echo "Ignoring errors due to slow environment" 1>&2 37 + exit_code=0 38 + fi 34 39 if [[ "${exit_code}" -eq 0 ]]; then 35 40 break; 36 41 fi
+4
tools/testing/selftests/net/ip_local_port_range.c
··· 16 16 #define IP_LOCAL_PORT_RANGE 51 17 17 #endif 18 18 19 + #ifndef IPPROTO_MPTCP 20 + #define IPPROTO_MPTCP 262 21 + #endif 22 + 19 23 static __u32 pack_port_range(__u16 lo, __u16 hi) 20 24 { 21 25 return (hi << 16) | (lo << 0);
+7 -4
tools/testing/selftests/net/net_helper.sh
··· 8 8 local listener_ns="${1}" 9 9 local port="${2}" 10 10 local protocol="${3}" 11 - local port_hex 11 + local pattern 12 12 local i 13 13 14 - port_hex="$(printf "%04X" "${port}")" 14 + pattern=":$(printf "%04X" "${port}") " 15 + 16 + # for tcp protocol additionally check the socket state 17 + [ ${protocol} = "tcp" ] && pattern="${pattern}0A" 15 18 for i in $(seq 10); do 16 - if ip netns exec "${listener_ns}" cat /proc/net/"${protocol}"* | \ 17 - grep -q "${port_hex}"; then 19 + if ip netns exec "${listener_ns}" awk '{print $2" "$4}' \ 20 + /proc/net/"${protocol}"* | grep -q "${pattern}"; then 18 21 break 19 22 fi 20 23 sleep 0.1
+13
tools/testing/selftests/net/openvswitch/openvswitch.sh
··· 564 564 wc -l) == 2 ] || \ 565 565 return 1 566 566 567 + info "Checking clone depth" 567 568 ERR_MSG="Flow actions may not be safe on all matching packets" 569 + PRE_TEST=$(dmesg | grep -c "${ERR_MSG}") 570 + ovs_add_flow "test_netlink_checks" nv0 \ 571 + 'in_port(1),eth(),eth_type(0x800),ipv4()' \ 572 + 'clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(drop)))))))))))))))))' \ 573 + >/dev/null 2>&1 && return 1 574 + POST_TEST=$(dmesg | grep -c "${ERR_MSG}") 575 + 576 + if [ "$PRE_TEST" == "$POST_TEST" ]; then 577 + info "failed - clone depth too large" 578 + return 1 579 + fi 580 + 568 581 PRE_TEST=$(dmesg | grep -c "${ERR_MSG}") 569 582 ovs_add_flow "test_netlink_checks" nv0 \ 570 583 'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(0),2' \
+56 -15
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
··· 299 299 ("OVS_ACTION_ATTR_PUSH_NSH", "none"), 300 300 ("OVS_ACTION_ATTR_POP_NSH", "flag"), 301 301 ("OVS_ACTION_ATTR_METER", "none"), 302 - ("OVS_ACTION_ATTR_CLONE", "none"), 302 + ("OVS_ACTION_ATTR_CLONE", "recursive"), 303 303 ("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"), 304 304 ("OVS_ACTION_ATTR_ADD_MPLS", "none"), 305 305 ("OVS_ACTION_ATTR_DEC_TTL", "none"), ··· 465 465 print_str += "pop_mpls" 466 466 else: 467 467 datum = self.get_attr(field[0]) 468 - print_str += datum.dpstr(more) 468 + if field[0] == "OVS_ACTION_ATTR_CLONE": 469 + print_str += "clone(" 470 + print_str += datum.dpstr(more) 471 + print_str += ")" 472 + else: 473 + print_str += datum.dpstr(more) 469 474 470 475 return print_str 471 476 472 477 def parse(self, actstr): 478 + totallen = len(actstr) 473 479 while len(actstr) != 0: 474 480 parsed = False 481 + parencount = 0 475 482 if actstr.startswith("drop"): 476 483 # If no reason is provided, the implicit drop is used (i.e no 477 484 # action). If some reason is given, an explicit action is used. 478 - actstr, reason = parse_extract_field( 479 - actstr, 480 - "drop(", 481 - "([0-9]+)", 482 - lambda x: int(x, 0), 483 - False, 484 - None, 485 - ) 485 + reason = None 486 + if actstr.startswith("drop("): 487 + parencount += 1 488 + 489 + actstr, reason = parse_extract_field( 490 + actstr, 491 + "drop(", 492 + "([0-9]+)", 493 + lambda x: int(x, 0), 494 + False, 495 + None, 496 + ) 497 + 486 498 if reason is not None: 487 499 self["attrs"].append(["OVS_ACTION_ATTR_DROP", reason]) 488 500 parsed = True 489 501 else: 490 - return 502 + actstr = actstr[len("drop"): ] 503 + return (totallen - len(actstr)) 491 504 492 505 elif parse_starts_block(actstr, "^(\d+)", False, True): 493 506 actstr, output = parse_extract_field( ··· 517 504 False, 518 505 0, 519 506 ) 507 + parencount += 1 520 508 self["attrs"].append(["OVS_ACTION_ATTR_RECIRC", recircid]) 521 509 parsed = True 522 510 ··· 530 516 531 517 for flat_act in parse_flat_map: 532 518 if parse_starts_block(actstr, flat_act[0], False): 533 - actstr += len(flat_act[0]) 519 + actstr = actstr[len(flat_act[0]):] 534 520 self["attrs"].append([flat_act[1]]) 535 521 actstr = actstr[strspn(actstr, ", ") :] 536 522 parsed = True 537 523 538 - if parse_starts_block(actstr, "ct(", False): 524 + if parse_starts_block(actstr, "clone(", False): 525 + parencount += 1 526 + subacts = ovsactions() 527 + actstr = actstr[len("clone("):] 528 + parsedLen = subacts.parse(actstr) 529 + lst = [] 530 + self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts)) 531 + actstr = actstr[parsedLen:] 532 + parsed = True 533 + elif parse_starts_block(actstr, "ct(", False): 534 + parencount += 1 539 535 actstr = actstr[len("ct(") :] 540 536 ctact = ovsactions.ctact() 541 537 ··· 577 553 natact = ovsactions.ctact.natattr() 578 554 579 555 if actstr.startswith("("): 556 + parencount += 1 580 557 t = None 581 558 actstr = actstr[1:] 582 559 if actstr.startswith("src"): ··· 632 607 actstr = actstr[strspn(actstr, ", ") :] 633 608 634 609 ctact["attrs"].append(["OVS_CT_ATTR_NAT", natact]) 635 - actstr = actstr[strspn(actstr, ",) ") :] 610 + actstr = actstr[strspn(actstr, ", ") :] 636 611 637 612 self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact]) 638 613 parsed = True 639 614 640 - actstr = actstr[strspn(actstr, "), ") :] 615 + actstr = actstr[strspn(actstr, ", ") :] 616 + while parencount > 0: 617 + parencount -= 1 618 + actstr = actstr[strspn(actstr, " "):] 619 + if len(actstr) and actstr[0] != ")": 620 + raise ValueError("Action str: '%s' unbalanced" % actstr) 621 + actstr = actstr[1:] 622 + 623 + if len(actstr) and actstr[0] == ")": 624 + return (totallen - len(actstr)) 625 + 626 + actstr = actstr[strspn(actstr, ", ") :] 627 + 641 628 if not parsed: 642 629 raise ValueError("Action str: '%s' not supported" % actstr) 630 + 631 + return (totallen - len(actstr)) 643 632 644 633 645 634 class ovskey(nla): ··· 2149 2110 ovsvp = OvsVport(ovspk) 2150 2111 ovsflow = OvsFlow() 2151 2112 ndb = NDB() 2113 + 2114 + sys.setrecursionlimit(100000) 2152 2115 2153 2116 if hasattr(args, "showdp"): 2154 2117 found = False
+2 -2
tools/testing/selftests/net/pmtu.sh
··· 1336 1336 else 1337 1337 TCPDST="TCP:[${dst}]:50000" 1338 1338 fi 1339 - ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000 STDOUT > $tmpoutfile & 1339 + ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000,reuseaddr STDOUT > $tmpoutfile & 1340 1340 local socat_pid=$! 1341 1341 1342 1342 wait_local_port_listen ${NS_B} 50000 tcp 1343 1343 1344 1344 dd if=/dev/zero status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3 1345 1345 1346 + wait ${socat_pid} 1346 1347 size=$(du -sb $tmpoutfile) 1347 1348 size=${size%%/tmp/*} 1348 - wait ${socat_pid} 1349 1349 1350 1350 [ $size -ne 1048576 ] && err "File size $size mismatches exepcted value in locally bridged vxlan test" && return 1 1351 1351 done
+25 -4
tools/testing/selftests/net/so_txtime.sh
··· 5 5 6 6 set -e 7 7 8 + readonly ksft_skip=4 8 9 readonly DEV="veth0" 9 10 readonly BIN="./so_txtime" 10 11 ··· 47 46 ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad 48 47 ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad 49 48 50 - do_test() { 49 + run_test() { 51 50 local readonly IP="$1" 52 51 local readonly CLOCK="$2" 53 52 local readonly TXARGS="$3" ··· 65 64 fi 66 65 67 66 local readonly START="$(date +%s%N --date="+ 0.1 seconds")" 67 + 68 68 ip netns exec "${NS2}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${RXARGS}" -r & 69 69 ip netns exec "${NS1}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${TXARGS}" 70 70 wait "$!" 71 71 } 72 72 73 + do_test() { 74 + run_test $@ 75 + [ $? -ne 0 ] && ret=1 76 + } 77 + 78 + do_fail_test() { 79 + run_test $@ 80 + [ $? -eq 0 ] && ret=1 81 + } 82 + 73 83 ip netns exec "${NS1}" tc qdisc add dev "${DEV}" root fq 84 + set +e 85 + ret=0 74 86 do_test 4 mono a,-1 a,-1 75 87 do_test 6 mono a,0 a,0 76 88 do_test 6 mono a,10 a,10 ··· 91 77 do_test 6 mono a,20,b,10 b,20,a,20 92 78 93 79 if ip netns exec "${NS1}" tc qdisc replace dev "${DEV}" root etf clockid CLOCK_TAI delta 400000; then 94 - ! do_test 4 tai a,-1 a,-1 95 - ! do_test 6 tai a,0 a,0 80 + do_fail_test 4 tai a,-1 a,-1 81 + do_fail_test 6 tai a,0 a,0 96 82 do_test 6 tai a,10 a,10 97 83 do_test 4 tai a,10,b,20 a,10,b,20 98 84 do_test 6 tai a,20,b,10 b,10,a,20 99 85 else 100 86 echo "tc ($(tc -V)) does not support qdisc etf. skipping" 87 + [ $ret -eq 0 ] && ret=$ksft_skip 101 88 fi 102 89 103 - echo OK. All tests passed 90 + if [ $ret -eq 0 ]; then 91 + echo OK. All tests passed 92 + elif [[ $ret -ne $ksft_skip && -n "$KSFT_MACHINE_SLOW" ]]; then 93 + echo "Ignoring errors due to slow environment" 1>&2 94 + ret=0 95 + fi 96 + exit $ret
+23
tools/testing/selftests/net/test_bridge_backup_port.sh
··· 124 124 [[ $pkts == $count ]] 125 125 } 126 126 127 + bridge_link_check() 128 + { 129 + local ns=$1; shift 130 + local dev=$1; shift 131 + local state=$1; shift 132 + 133 + bridge -n $ns -d -j link show dev $dev | \ 134 + jq -e ".[][\"state\"] == \"$state\"" &> /dev/null 135 + } 136 + 127 137 ################################################################################ 128 138 # Setup 129 139 ··· 269 259 log_test $? 0 "No forwarding out of vx0" 270 260 271 261 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 262 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 272 263 log_test $? 0 "swp1 carrier off" 273 264 274 265 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 279 268 log_test $? 0 "No forwarding out of vx0" 280 269 281 270 run_cmd "ip -n $sw1 link set dev swp1 carrier on" 271 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding 282 272 log_test $? 0 "swp1 carrier on" 283 273 284 274 # Configure vx0 as the backup port of swp1 and check that packets are ··· 296 284 log_test $? 0 "No forwarding out of vx0" 297 285 298 286 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 287 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 299 288 log_test $? 0 "swp1 carrier off" 300 289 301 290 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 306 293 log_test $? 0 "Forwarding out of vx0" 307 294 308 295 run_cmd "ip -n $sw1 link set dev swp1 carrier on" 296 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding 309 297 log_test $? 0 "swp1 carrier on" 310 298 311 299 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 328 314 log_test $? 0 "No forwarding out of vx0" 329 315 330 316 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 317 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 331 318 log_test $? 0 "swp1 carrier off" 332 319 333 320 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 384 369 log_test $? 0 "No forwarding out of vx0" 385 370 386 371 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 372 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 387 373 log_test $? 0 "swp1 carrier off" 388 374 389 375 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 398 382 log_test $? 0 "Forwarding using VXLAN FDB entry" 399 383 400 384 run_cmd "ip -n $sw1 link set dev swp1 carrier on" 385 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding 401 386 log_test $? 0 "swp1 carrier on" 402 387 403 388 # Configure nexthop ID 10 as the backup nexthop ID of swp1 and check ··· 415 398 log_test $? 0 "No forwarding out of vx0" 416 399 417 400 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 401 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 418 402 log_test $? 0 "swp1 carrier off" 419 403 420 404 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 429 411 log_test $? 0 "No forwarding using VXLAN FDB entry" 430 412 431 413 run_cmd "ip -n $sw1 link set dev swp1 carrier on" 414 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding 432 415 log_test $? 0 "swp1 carrier on" 433 416 434 417 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 460 441 log_test $? 0 "No forwarding using VXLAN FDB entry" 461 442 462 443 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 444 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 463 445 log_test $? 0 "swp1 carrier off" 464 446 465 447 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 517 497 log_test $? 0 "Valid nexthop as backup nexthop" 518 498 519 499 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 500 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 520 501 log_test $? 0 "swp1 carrier off" 521 502 522 503 run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1" ··· 625 604 run_cmd "bridge -n $sw2 link set dev swp1 backup_nhid 10" 626 605 627 606 run_cmd "ip -n $sw1 link set dev swp1 carrier off" 607 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled 628 608 run_cmd "ip -n $sw2 link set dev swp1 carrier off" 609 + busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw2 swp1 disabled 629 610 630 611 run_cmd "ip netns exec $sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66" 631 612 log_test $? 0 "Ping with backup nexthop ID"
+6 -6
tools/testing/selftests/net/tls.c
··· 1002 1002 1003 1003 memset(recv_mem, 0, sizeof(recv_mem)); 1004 1004 EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); 1005 - EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 1006 - MSG_WAITALL), -1); 1005 + EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first), 1006 + MSG_WAITALL), strlen(test_str_first)); 1007 1007 EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0); 1008 1008 memset(recv_mem, 0, sizeof(recv_mem)); 1009 - EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 1010 - MSG_WAITALL), -1); 1009 + EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second), 1010 + MSG_WAITALL), strlen(test_str_second)); 1011 1011 EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)), 1012 1012 0); 1013 1013 } ··· 1874 1874 /* Child should sleep in poll(), never get a wake */ 1875 1875 pfd.fd = self->cfd2; 1876 1876 pfd.events = POLLIN; 1877 - EXPECT_EQ(poll(&pfd, 1, 5), 0); 1877 + EXPECT_EQ(poll(&pfd, 1, 20), 0); 1878 1878 1879 1879 EXPECT_EQ(write(p[1], &token, 1), 1); /* Barrier #1 */ 1880 1880 1881 1881 pfd.fd = self->cfd2; 1882 1882 pfd.events = POLLIN; 1883 - EXPECT_EQ(poll(&pfd, 1, 5), 1); 1883 + EXPECT_EQ(poll(&pfd, 1, 20), 1); 1884 1884 1885 1885 exit(!_metadata->passed); 1886 1886 }
+6 -1
tools/tracing/rtla/Makefile
··· 28 28 -fasynchronous-unwind-tables -fstack-clash-protection 29 29 WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized 30 30 31 + ifeq ($(CC),clang) 32 + FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS)) 33 + WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS)) 34 + endif 35 + 31 36 TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs) 32 37 33 38 CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) 34 - LDFLAGS := -ggdb $(EXTRA_LDFLAGS) 39 + LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS) 35 40 LIBS := $$($(PKG_CONFIG) --libs libtracefs) 36 41 37 42 SRC := $(wildcard src/*.c)
+6 -3
tools/tracing/rtla/src/osnoise_hist.c
··· 135 135 if (params->output_divisor) 136 136 duration = duration / params->output_divisor; 137 137 138 - if (data->bucket_size) 139 - bucket = duration / data->bucket_size; 138 + bucket = duration / data->bucket_size; 140 139 141 140 total_duration = duration * count; 142 141 ··· 479 480 480 481 for (i = 0; msg[i]; i++) 481 482 fprintf(stderr, "%s\n", msg[i]); 482 - exit(1); 483 + 484 + if (usage) 485 + exit(EXIT_FAILURE); 486 + 487 + exit(EXIT_SUCCESS); 483 488 } 484 489 485 490 /*
+5 -1
tools/tracing/rtla/src/osnoise_top.c
··· 331 331 332 332 for (i = 0; msg[i]; i++) 333 333 fprintf(stderr, "%s\n", msg[i]); 334 - exit(1); 334 + 335 + if (usage) 336 + exit(EXIT_FAILURE); 337 + 338 + exit(EXIT_SUCCESS); 335 339 } 336 340 337 341 /*
+6 -3
tools/tracing/rtla/src/timerlat_hist.c
··· 178 178 if (params->output_divisor) 179 179 latency = latency / params->output_divisor; 180 180 181 - if (data->bucket_size) 182 - bucket = latency / data->bucket_size; 181 + bucket = latency / data->bucket_size; 183 182 184 183 if (!context) { 185 184 hist = data->hist[cpu].irq; ··· 545 546 546 547 for (i = 0; msg[i]; i++) 547 548 fprintf(stderr, "%s\n", msg[i]); 548 - exit(1); 549 + 550 + if (usage) 551 + exit(EXIT_FAILURE); 552 + 553 + exit(EXIT_SUCCESS); 549 554 } 550 555 551 556 /*
+5 -1
tools/tracing/rtla/src/timerlat_top.c
··· 375 375 376 376 for (i = 0; msg[i]; i++) 377 377 fprintf(stderr, "%s\n", msg[i]); 378 - exit(1); 378 + 379 + if (usage) 380 + exit(EXIT_FAILURE); 381 + 382 + exit(EXIT_SUCCESS); 379 383 } 380 384 381 385 /*
+4 -10
tools/tracing/rtla/src/utils.c
··· 238 238 return syscall(__NR_sched_setattr, pid, attr, flags); 239 239 } 240 240 241 - static inline int sched_getattr(pid_t pid, struct sched_attr *attr, 242 - unsigned int size, unsigned int flags) 243 - { 244 - return syscall(__NR_sched_getattr, pid, attr, size, flags); 245 - } 246 - 247 241 int __set_sched_attr(int pid, struct sched_attr *attr) 248 242 { 249 243 int flags = 0; ··· 473 479 if (prio == INVALID_VAL) 474 480 return -1; 475 481 476 - if (prio < sched_get_priority_min(SCHED_OTHER)) 482 + if (prio < MIN_NICE) 477 483 return -1; 478 - if (prio > sched_get_priority_max(SCHED_OTHER)) 484 + if (prio > MAX_NICE) 479 485 return -1; 480 486 481 487 sched_param->sched_policy = SCHED_OTHER; 482 - sched_param->sched_priority = prio; 488 + sched_param->sched_nice = prio; 483 489 break; 484 490 default: 485 491 return -1; ··· 530 536 */ 531 537 static const int find_mount(const char *fs, char *mp, int sizeof_mp) 532 538 { 533 - char mount_point[MAX_PATH]; 539 + char mount_point[MAX_PATH+1]; 534 540 char type[100]; 535 541 int found = 0; 536 542 FILE *fp;
+2
tools/tracing/rtla/src/utils.h
··· 9 9 */ 10 10 #define BUFF_U64_STR_SIZE 24 11 11 #define MAX_PATH 1024 12 + #define MAX_NICE 20 13 + #define MIN_NICE -19 12 14 13 15 #define container_of(ptr, type, member)({ \ 14 16 const typeof(((type *)0)->member) *__mptr = (ptr); \
+6 -1
tools/verification/rv/Makefile
··· 28 28 -fasynchronous-unwind-tables -fstack-clash-protection 29 29 WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized 30 30 31 + ifeq ($(CC),clang) 32 + FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS)) 33 + WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS)) 34 + endif 35 + 31 36 TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs) 32 37 33 38 CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) -I include 34 - LDFLAGS := -ggdb $(EXTRA_LDFLAGS) 39 + LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS) 35 40 LIBS := $$($(PKG_CONFIG) --libs libtracefs) 36 41 37 42 SRC := $(wildcard src/*.c)
+1 -1
tools/verification/rv/src/in_kernel.c
··· 210 210 static char *ikm_get_current_reactor(char *monitor_name) 211 211 { 212 212 char *reactors = ikm_read_reactor(monitor_name); 213 + char *curr_reactor = NULL; 213 214 char *start; 214 215 char *end; 215 - char *curr_reactor; 216 216 217 217 if (!reactors) 218 218 return NULL;