Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asoc-fix-v7.0-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v7.0

A moderately large pile of fixes, though none of them are super major,
plus a few new quirks and device IDs.

+5412 -2398
+8 -1
.mailmap
··· 210 210 Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com> 211 211 Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com> 212 212 Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com> 213 + Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@linaro.org> 214 + Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@free.fr> 215 + Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@linexp.org> 216 + Daniel Lezcano <daniel.lezcano@kernel.org> <dlezcano@fr.ibm.com> 213 217 Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org> 218 + Daniele Alessandrelli <daniele.alessandrelli@gmail.com> <daniele.alessandrelli@intel.com> 214 219 Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com> 215 220 David Brownell <david-b@pacbell.net> 216 221 David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> ··· 881 876 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> 882 877 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 883 878 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 879 + Vlastimil Babka <vbabka@kernel.org> <vbabka@suse.cz> 884 880 WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn> 885 881 WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org> 886 882 Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn> ··· 896 890 Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com> 897 891 Yixun Lan <dlan@kernel.org> <dlan@gentoo.org> 898 892 Yixun Lan <dlan@kernel.org> <yixun.lan@amlogic.com> 899 - Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com> 893 + Yosry Ahmed <yosry@kernel.org> <yosryahmed@google.com> 894 + Yosry Ahmed <yosry@kernel.org> <yosry.ahmed@linux.dev> 900 895 Yu-Chun Lin <eleanor.lin@realtek.com> <eleanor15x@gmail.com> 901 896 Yusuke Goda <goda.yusuke@renesas.com> 902 897 Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
+3
Documentation/admin-guide/sysctl/net.rst
··· 594 594 their sockets will only be able to connect within their own 595 595 namespace. 596 596 597 + The first write to ``child_ns_mode`` locks its value. Subsequent writes of the 598 + same value succeed, but writing a different value returns ``-EBUSY``. 599 + 597 600 Changing ``child_ns_mode`` only affects namespaces created after the change; 598 601 it does not modify the current namespace or any existing children. 599 602
+2 -2
Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml
··· 287 287 regulator-max-microvolt = <1700000>; 288 288 }; 289 289 mt6359_vrfck_1_ldo_reg: ldo_vrfck_1 { 290 - regulator-name = "vrfck"; 290 + regulator-name = "vrfck_1"; 291 291 regulator-min-microvolt = <1240000>; 292 292 regulator-max-microvolt = <1600000>; 293 293 }; ··· 309 309 regulator-max-microvolt = <3300000>; 310 310 }; 311 311 mt6359_vemc_1_ldo_reg: ldo_vemc_1 { 312 - regulator-name = "vemc"; 312 + regulator-name = "vemc_1"; 313 313 regulator-min-microvolt = <2500000>; 314 314 regulator-max-microvolt = <3300000>; 315 315 };
+1
Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
··· 23 23 enum: 24 24 - nvidia,tegra210-audio-graph-card 25 25 - nvidia,tegra186-audio-graph-card 26 + - nvidia,tegra238-audio-graph-card 26 27 - nvidia,tegra264-audio-graph-card 27 28 28 29 clocks:
+1
Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
··· 20 20 - renesas,r9a07g044-ssi # RZ/G2{L,LC} 21 21 - renesas,r9a07g054-ssi # RZ/V2L 22 22 - renesas,r9a08g045-ssi # RZ/G3S 23 + - renesas,r9a08g046-ssi # RZ/G3L 23 24 - const: renesas,rz-ssi 24 25 25 26 reg:
+1 -30
Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
··· 26 26 properties: 27 27 compatible: 28 28 contains: 29 - enum: 30 - - baikal,bt1-sys-ssi 31 - then: 32 - properties: 33 - mux-controls: 34 - maxItems: 1 35 - required: 36 - - mux-controls 37 - else: 38 - required: 39 - - interrupts 40 - - if: 41 - properties: 42 - compatible: 43 - contains: 44 29 const: amd,pensando-elba-spi 45 30 then: 46 31 required: ··· 60 75 const: intel,mountevans-imc-ssi 61 76 - description: AMD Pensando Elba SoC SPI Controller 62 77 const: amd,pensando-elba-spi 63 - - description: Baikal-T1 SPI Controller 64 - const: baikal,bt1-ssi 65 - - description: Baikal-T1 System Boot SPI Controller 66 - const: baikal,bt1-sys-ssi 67 78 - description: Canaan Kendryte K210 SoS SPI Controller 68 79 const: canaan,k210-spi 69 80 - description: Renesas RZ/N1 SPI Controller ··· 151 170 - "#address-cells" 152 171 - "#size-cells" 153 172 - clocks 173 + - interrupts 154 174 155 175 examples: 156 176 - | ··· 171 189 reg = <1>; 172 190 rx-sample-delay-ns = <7>; 173 191 }; 174 - }; 175 - - | 176 - spi@1f040100 { 177 - compatible = "baikal,bt1-sys-ssi"; 178 - reg = <0x1f040100 0x900>, 179 - <0x1c000000 0x1000000>; 180 - #address-cells = <1>; 181 - #size-cells = <0>; 182 - mux-controls = <&boot_mux>; 183 - clocks = <&ccu_sys>; 184 - clock-names = "ssi_clk"; 185 192 }; 186 193 ...
+4 -6
Documentation/virt/kvm/api.rst
··· 1396 1396 Memory for the region is taken starting at the address denoted by the 1397 1397 field userspace_addr, which must point at user addressable memory for 1398 1398 the entire memory slot size. Any object may back this memory, including 1399 - anonymous memory, ordinary files, and hugetlbfs. 1399 + anonymous memory, ordinary files, and hugetlbfs. Changes in the backing 1400 + of the memory region are automatically reflected into the guest. 1401 + For example, an mmap() that affects the region will be made visible 1402 + immediately. Another example is madvise(MADV_DROP). 1400 1403 1401 1404 On architectures that support a form of address tagging, userspace_addr must 1402 1405 be an untagged address. ··· 1414 1411 use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it, 1415 1412 to make a new slot read-only. In this case, writes to this memory will be 1416 1413 posted to userspace as KVM_EXIT_MMIO exits. 1417 - 1418 - When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of 1419 - the memory region are automatically reflected into the guest. For example, an 1420 - mmap() that affects the region will be made visible immediately. Another 1421 - example is madvise(MADV_DROP). 1422 1414 1423 1415 For TDX guest, deleting/moving memory region loses guest memory contents. 1424 1416 Read only region isn't supported. Only as-id 0 is supported.
+22 -23
MAINTAINERS
··· 1292 1292 F: include/uapi/drm/amdxdna_accel.h 1293 1293 1294 1294 AMD XGBE DRIVER 1295 - M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com> 1296 1295 M: Raju Rangoju <Raju.Rangoju@amd.com> 1297 1296 L: netdev@vger.kernel.org 1298 1297 S: Maintained ··· 6212 6213 6213 6214 CISCO SCSI HBA DRIVER 6214 6215 M: Karan Tilak Kumar <kartilak@cisco.com> 6216 + M: Narsimhulu Musini <nmusini@cisco.com> 6215 6217 M: Sesidhar Baddela <sebaddel@cisco.com> 6216 6218 L: linux-scsi@vger.kernel.org 6217 6219 S: Supported 6218 6220 F: drivers/scsi/snic/ 6219 6221 6220 6222 CISCO VIC ETHERNET NIC DRIVER 6221 - M: Christian Benvenuti <benve@cisco.com> 6222 6223 M: Satish Kharat <satishkh@cisco.com> 6223 6224 S: Maintained 6224 6225 F: drivers/net/ethernet/cisco/enic/ 6225 6226 6226 6227 CISCO VIC LOW LATENCY NIC DRIVER 6227 - M: Christian Benvenuti <benve@cisco.com> 6228 6228 M: Nelson Escobar <neescoba@cisco.com> 6229 + M: Satish Kharat <satishkh@cisco.com> 6229 6230 S: Supported 6230 6231 F: drivers/infiniband/hw/usnic/ 6231 6232 ··· 6279 6280 F: include/linux/clk.h 6280 6281 6281 6282 CLOCKSOURCE, CLOCKEVENT DRIVERS 6282 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 6283 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 6283 6284 M: Thomas Gleixner <tglx@kernel.org> 6284 6285 L: linux-kernel@vger.kernel.org 6285 6286 S: Supported ··· 6668 6669 6669 6670 CPU IDLE TIME MANAGEMENT FRAMEWORK 6670 6671 M: "Rafael J. Wysocki" <rafael@kernel.org> 6671 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 6672 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 6672 6673 R: Christian Loehle <christian.loehle@arm.com> 6673 6674 L: linux-pm@vger.kernel.org 6674 6675 S: Maintained ··· 6698 6699 6699 6700 CPUIDLE DRIVER - ARM BIG LITTLE 6700 6701 M: Lorenzo Pieralisi <lpieralisi@kernel.org> 6701 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 6702 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 6702 6703 L: linux-pm@vger.kernel.org 6703 6704 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6704 6705 S: Maintained ··· 6706 6707 F: drivers/cpuidle/cpuidle-big_little.c 6707 6708 6708 6709 CPUIDLE DRIVER - ARM EXYNOS 6709 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 6710 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 6710 6711 M: Kukjin Kim <kgene@kernel.org> 6711 6712 R: Krzysztof Kozlowski <krzk@kernel.org> 6712 6713 L: linux-pm@vger.kernel.org ··· 14411 14412 M: Herve Codina <herve.codina@bootlin.com> 14412 14413 S: Maintained 14413 14414 F: Documentation/devicetree/bindings/net/lantiq,pef2256.yaml 14414 - F: drivers/net/wan/framer/pef2256/ 14415 + F: drivers/net/wan/framer/ 14415 14416 F: drivers/pinctrl/pinctrl-pef2256.c 14416 - F: include/linux/framer/pef2256.h 14417 + F: include/linux/framer/ 14417 14418 14418 14419 LASI 53c700 driver for PARISC 14419 14420 M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> ··· 16655 16656 M: David Hildenbrand <david@kernel.org> 16656 16657 R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16657 16658 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16658 - R: Vlastimil Babka <vbabka@suse.cz> 16659 + R: Vlastimil Babka <vbabka@kernel.org> 16659 16660 R: Mike Rapoport <rppt@kernel.org> 16660 16661 R: Suren Baghdasaryan <surenb@google.com> 16661 16662 R: Michal Hocko <mhocko@suse.com> ··· 16785 16786 M: David Hildenbrand <david@kernel.org> 16786 16787 R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16787 16788 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16788 - R: Vlastimil Babka <vbabka@suse.cz> 16789 + R: Vlastimil Babka <vbabka@kernel.org> 16789 16790 R: Mike Rapoport <rppt@kernel.org> 16790 16791 R: Suren Baghdasaryan <surenb@google.com> 16791 16792 R: Michal Hocko <mhocko@suse.com> ··· 16840 16841 16841 16842 MEMORY MANAGEMENT - PAGE ALLOCATOR 16842 16843 M: Andrew Morton <akpm@linux-foundation.org> 16843 - M: Vlastimil Babka <vbabka@suse.cz> 16844 + M: Vlastimil Babka <vbabka@kernel.org> 16844 16845 R: Suren Baghdasaryan <surenb@google.com> 16845 16846 R: Michal Hocko <mhocko@suse.com> 16846 16847 R: Brendan Jackman <jackmanb@google.com> ··· 16886 16887 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16887 16888 R: Rik van Riel <riel@surriel.com> 16888 16889 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16889 - R: Vlastimil Babka <vbabka@suse.cz> 16890 + R: Vlastimil Babka <vbabka@kernel.org> 16890 16891 R: Harry Yoo <harry.yoo@oracle.com> 16891 16892 R: Jann Horn <jannh@google.com> 16892 16893 L: linux-mm@kvack.org ··· 16985 16986 M: Andrew Morton <akpm@linux-foundation.org> 16986 16987 M: Liam R. Howlett <Liam.Howlett@oracle.com> 16987 16988 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16988 - R: Vlastimil Babka <vbabka@suse.cz> 16989 + R: Vlastimil Babka <vbabka@kernel.org> 16989 16990 R: Jann Horn <jannh@google.com> 16990 16991 R: Pedro Falcato <pfalcato@suse.de> 16991 16992 L: linux-mm@kvack.org ··· 17015 17016 M: Suren Baghdasaryan <surenb@google.com> 17016 17017 M: Liam R. Howlett <Liam.Howlett@oracle.com> 17017 17018 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 17018 - R: Vlastimil Babka <vbabka@suse.cz> 17019 + R: Vlastimil Babka <vbabka@kernel.org> 17019 17020 R: Shakeel Butt <shakeel.butt@linux.dev> 17020 17021 L: linux-mm@kvack.org 17021 17022 S: Maintained ··· 17031 17032 M: Liam R. Howlett <Liam.Howlett@oracle.com> 17032 17033 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 17033 17034 M: David Hildenbrand <david@kernel.org> 17034 - R: Vlastimil Babka <vbabka@suse.cz> 17035 + R: Vlastimil Babka <vbabka@kernel.org> 17035 17036 R: Jann Horn <jannh@google.com> 17036 17037 L: linux-mm@kvack.org 17037 17038 S: Maintained ··· 20508 20509 F: drivers/pci/controller/dwc/pcie-kirin.c 20509 20510 20510 20511 PCIE DRIVER FOR HISILICON STB 20511 - M: Shawn Guo <shawn.guo@linaro.org> 20512 + M: Shawn Guo <shawnguo@kernel.org> 20512 20513 L: linux-pci@vger.kernel.org 20513 20514 S: Maintained 20514 20515 F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt ··· 21694 21695 F: drivers/net/ethernet/qualcomm/emac/ 21695 21696 21696 21697 QUALCOMM ETHQOS ETHERNET DRIVER 21697 - M: Vinod Koul <vkoul@kernel.org> 21698 + M: Mohd Ayaan Anwar <mohd.anwar@oss.qualcomm.com> 21698 21699 L: netdev@vger.kernel.org 21699 21700 L: linux-arm-msm@vger.kernel.org 21700 21701 S: Maintained ··· 23173 23174 RUST [ALLOC] 23174 23175 M: Danilo Krummrich <dakr@kernel.org> 23175 23176 R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 23176 - R: Vlastimil Babka <vbabka@suse.cz> 23177 + R: Vlastimil Babka <vbabka@kernel.org> 23177 23178 R: Liam R. Howlett <Liam.Howlett@oracle.com> 23178 23179 R: Uladzislau Rezki <urezki@gmail.com> 23179 23180 L: rust-for-linux@vger.kernel.org ··· 24349 24350 F: drivers/nvmem/layouts/sl28vpd.c 24350 24351 24351 24352 SLAB ALLOCATOR 24352 - M: Vlastimil Babka <vbabka@suse.cz> 24353 + M: Vlastimil Babka <vbabka@kernel.org> 24353 24354 M: Andrew Morton <akpm@linux-foundation.org> 24354 24355 R: Christoph Lameter <cl@gentwo.org> 24355 24356 R: David Rientjes <rientjes@google.com> ··· 26216 26217 26217 26218 THERMAL 26218 26219 M: Rafael J. Wysocki <rafael@kernel.org> 26219 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 26220 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 26220 26221 R: Zhang Rui <rui.zhang@intel.com> 26221 26222 R: Lukasz Luba <lukasz.luba@arm.com> 26222 26223 L: linux-pm@vger.kernel.org ··· 26246 26247 26247 26248 THERMAL/CPU_COOLING 26248 26249 M: Amit Daniel Kachhap <amit.kachhap@gmail.com> 26249 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 26250 + M: Daniel Lezcano <daniel.lezcano@kernel.org> 26250 26251 M: Viresh Kumar <viresh.kumar@linaro.org> 26251 26252 R: Lukasz Luba <lukasz.luba@arm.com> 26252 26253 L: linux-pm@vger.kernel.org ··· 29185 29186 29186 29187 ZSWAP COMPRESSED SWAP CACHING 29187 29188 M: Johannes Weiner <hannes@cmpxchg.org> 29188 - M: Yosry Ahmed <yosry.ahmed@linux.dev> 29189 + M: Yosry Ahmed <yosry@kernel.org> 29189 29190 M: Nhat Pham <nphamcs@gmail.com> 29190 29191 R: Chengming Zhou <chengming.zhou@linux.dev> 29191 29192 L: linux-mm@kvack.org
+1 -1
Makefile
··· 2 2 VERSION = 7 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc2 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+20 -6
arch/arm64/include/asm/io.h
··· 264 264 typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size, 265 265 pgprot_t *prot); 266 266 int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook); 267 + void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot); 267 268 269 + static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size, 270 + pgprot_t user_prot) 271 + { 272 + pgprot_t prot; 273 + ptdesc_t user_prot_val = pgprot_val(user_prot); 274 + 275 + if (WARN_ON_ONCE(!(user_prot_val & PTE_USER))) 276 + return NULL; 277 + 278 + prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK, 279 + user_prot_val & PTE_ATTRINDX_MASK); 280 + return __ioremap_prot(phys, size, prot); 281 + } 268 282 #define ioremap_prot ioremap_prot 269 283 270 - #define _PAGE_IOREMAP PROT_DEVICE_nGnRE 271 - 284 + #define ioremap(addr, size) \ 285 + __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 272 286 #define ioremap_wc(addr, size) \ 273 - ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) 287 + __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) 274 288 #define ioremap_np(addr, size) \ 275 - ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) 289 + __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) 276 290 277 291 278 292 #define ioremap_encrypted(addr, size) \ 279 - ioremap_prot((addr), (size), PAGE_KERNEL) 293 + __ioremap_prot((addr), (size), PAGE_KERNEL) 280 294 281 295 /* 282 296 * io{read,write}{16,32,64}be() macros ··· 311 297 if (pfn_is_map_memory(__phys_to_pfn(addr))) 312 298 return (void __iomem *)__phys_to_virt(addr); 313 299 314 - return ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); 300 + return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); 315 301 } 316 302 317 303 /*
+2 -1
arch/arm64/include/asm/kvm_host.h
··· 1616 1616 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) 1617 1617 1618 1618 #define kvm_has_s1poe(k) \ 1619 - (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1619 + (system_supports_poe() && \ 1620 + kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1620 1621 1621 1622 #define kvm_has_ras(k) \ 1622 1623 (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
+2
arch/arm64/include/asm/kvm_nested.h
··· 397 397 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu); 398 398 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val); 399 399 400 + u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime); 401 + 400 402 #define vncr_fixmap(c) \ 401 403 ({ \ 402 404 u32 __c = (c); \
-3
arch/arm64/include/asm/pgtable-prot.h
··· 164 164 #define _PAGE_GCS (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_WRITE | PTE_USER) 165 165 #define _PAGE_GCS_RO (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_USER) 166 166 167 - #define PAGE_GCS __pgprot(_PAGE_GCS) 168 - #define PAGE_GCS_RO __pgprot(_PAGE_GCS_RO) 169 - 170 167 #define PIE_E0 ( \ 171 168 PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \ 172 169 PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \
+37 -26
arch/arm64/include/asm/tlbflush.h
··· 31 31 */ 32 32 #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ 33 33 "tlbi " #op "\n" \ 34 - ALTERNATIVE("nop\n nop", \ 35 - "dsb ish\n tlbi " #op, \ 36 - ARM64_WORKAROUND_REPEAT_TLBI, \ 37 - CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ 38 34 : : ) 39 35 40 36 #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ 41 - "tlbi " #op ", %0\n" \ 42 - ALTERNATIVE("nop\n nop", \ 43 - "dsb ish\n tlbi " #op ", %0", \ 44 - ARM64_WORKAROUND_REPEAT_TLBI, \ 45 - CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ 46 - : : "r" (arg)) 37 + "tlbi " #op ", %x0\n" \ 38 + : : "rZ" (arg)) 47 39 48 40 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) 49 41 ··· 173 181 (__pages >> (5 * (scale) + 1)) - 1; \ 174 182 }) 175 183 184 + #define __repeat_tlbi_sync(op, arg...) \ 185 + do { \ 186 + if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) \ 187 + break; \ 188 + __tlbi(op, ##arg); \ 189 + dsb(ish); \ 190 + } while (0) 191 + 192 + /* 193 + * Complete broadcast TLB maintenance issued by the host which invalidates 194 + * stage 1 information in the host's own translation regime. 195 + */ 196 + static inline void __tlbi_sync_s1ish(void) 197 + { 198 + dsb(ish); 199 + __repeat_tlbi_sync(vale1is, 0); 200 + } 201 + 202 + /* 203 + * Complete broadcast TLB maintenance issued by hyp code which invalidates 204 + * stage 1 translation information in any translation regime. 205 + */ 206 + static inline void __tlbi_sync_s1ish_hyp(void) 207 + { 208 + dsb(ish); 209 + __repeat_tlbi_sync(vale2is, 0); 210 + } 211 + 176 212 /* 177 213 * TLB Invalidation 178 214 * ================ ··· 299 279 { 300 280 dsb(ishst); 301 281 __tlbi(vmalle1is); 302 - dsb(ish); 282 + __tlbi_sync_s1ish(); 303 283 isb(); 304 284 } 305 285 ··· 311 291 asid = __TLBI_VADDR(0, ASID(mm)); 312 292 __tlbi(aside1is, asid); 313 293 __tlbi_user(aside1is, asid); 314 - dsb(ish); 294 + __tlbi_sync_s1ish(); 315 295 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); 316 296 } 317 297 ··· 365 345 unsigned long uaddr) 366 346 { 367 347 flush_tlb_page_nosync(vma, uaddr); 368 - dsb(ish); 348 + __tlbi_sync_s1ish(); 369 349 } 370 350 371 351 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) 372 352 { 373 - /* 374 - * TLB flush deferral is not required on systems which are affected by 375 - * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation 376 - * will have two consecutive TLBI instructions with a dsb(ish) in between 377 - * defeating the purpose (i.e save overall 'dsb ish' cost). 378 - */ 379 - if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) 380 - return false; 381 - 382 353 return true; 383 354 } 384 355 ··· 385 374 */ 386 375 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 387 376 { 388 - dsb(ish); 377 + __tlbi_sync_s1ish(); 389 378 } 390 379 391 380 /* ··· 520 509 { 521 510 __flush_tlb_range_nosync(vma->vm_mm, start, end, stride, 522 511 last_level, tlb_level); 523 - dsb(ish); 512 + __tlbi_sync_s1ish(); 524 513 } 525 514 526 515 static inline void local_flush_tlb_contpte(struct vm_area_struct *vma, ··· 568 557 dsb(ishst); 569 558 __flush_tlb_range_op(vaale1is, start, pages, stride, 0, 570 559 TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); 571 - dsb(ish); 560 + __tlbi_sync_s1ish(); 572 561 isb(); 573 562 } 574 563 ··· 582 571 583 572 dsb(ishst); 584 573 __tlbi(vaae1is, addr); 585 - dsb(ish); 574 + __tlbi_sync_s1ish(); 586 575 isb(); 587 576 } 588 577
+1 -1
arch/arm64/kernel/acpi.c
··· 377 377 prot = __acpi_get_writethrough_mem_attribute(); 378 378 } 379 379 } 380 - return ioremap_prot(phys, size, prot); 380 + return __ioremap_prot(phys, size, prot); 381 381 } 382 382 383 383 /*
+1 -1
arch/arm64/kernel/sys_compat.c
··· 37 37 * We pick the reserved-ASID to minimise the impact. 38 38 */ 39 39 __tlbi(aside1is, __TLBI_VADDR(0, 0)); 40 - dsb(ish); 40 + __tlbi_sync_s1ish(); 41 41 } 42 42 43 43 ret = caches_clean_inval_user_pou(start, start + chunk);
+15 -6
arch/arm64/kernel/topology.c
··· 400 400 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) 401 401 { 402 402 /* 403 - * Abort call on counterless CPU or when interrupts are 404 - * disabled - can lead to deadlock in smp sync call. 403 + * Abort call on counterless CPU. 405 404 */ 406 405 if (!cpu_has_amu_feat(cpu)) 407 406 return -EOPNOTSUPP; 408 407 409 - if (WARN_ON_ONCE(irqs_disabled())) 410 - return -EPERM; 411 - 412 - smp_call_function_single(cpu, func, val, 1); 408 + if (irqs_disabled()) { 409 + /* 410 + * When IRQs are disabled (tick path: sched_tick -> 411 + * topology_scale_freq_tick or cppc_scale_freq_tick), only local 412 + * CPU counter reads are allowed. Remote CPU counter read would 413 + * require smp_call_function_single() which is unsafe with IRQs 414 + * disabled. 415 + */ 416 + if (WARN_ON_ONCE(cpu != smp_processor_id())) 417 + return -EPERM; 418 + func(val); 419 + } else { 420 + smp_call_function_single(cpu, func, val, 1); 421 + } 413 422 414 423 return 0; 415 424 }
-1
arch/arm64/kvm/Kconfig
··· 21 21 bool "Kernel-based Virtual Machine (KVM) support" 22 22 select KVM_COMMON 23 23 select KVM_GENERIC_HARDWARE_ENABLING 24 - select KVM_GENERIC_MMU_NOTIFIER 25 24 select HAVE_KVM_CPU_RELAX_INTERCEPT 26 25 select KVM_MMIO 27 26 select KVM_GENERIC_DIRTYLOG_READ_PROTECT
-1
arch/arm64/kvm/arm.c
··· 358 358 break; 359 359 case KVM_CAP_IOEVENTFD: 360 360 case KVM_CAP_USER_MEMORY: 361 - case KVM_CAP_SYNC_MMU: 362 361 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 363 362 case KVM_CAP_ONE_REG: 364 363 case KVM_CAP_ARM_PSCI:
+2 -25
arch/arm64/kvm/at.c
··· 540 540 wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0); 541 541 542 542 wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG); 543 - if (wr->nG) { 544 - u64 asid_ttbr, tcr; 545 - 546 - switch (wi->regime) { 547 - case TR_EL10: 548 - tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); 549 - asid_ttbr = ((tcr & TCR_A1) ? 550 - vcpu_read_sys_reg(vcpu, TTBR1_EL1) : 551 - vcpu_read_sys_reg(vcpu, TTBR0_EL1)); 552 - break; 553 - case TR_EL20: 554 - tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 555 - asid_ttbr = ((tcr & TCR_A1) ? 556 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 557 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 558 - break; 559 - default: 560 - BUG(); 561 - } 562 - 563 - wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr); 564 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 565 - !(tcr & TCR_ASID16)) 566 - wr->asid &= GENMASK(7, 0); 567 - } 543 + if (wr->nG) 544 + wr->asid = get_asid_by_regime(vcpu, wi->regime); 568 545 569 546 return 0; 570 547
+1 -1
arch/arm64/kvm/hyp/nvhe/mm.c
··· 271 271 */ 272 272 dsb(ishst); 273 273 __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level); 274 - dsb(ish); 274 + __tlbi_sync_s1ish_hyp(); 275 275 isb(); 276 276 } 277 277
+34 -3
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 342 342 /* No restrictions for non-protected VMs. */ 343 343 if (!kvm_vm_is_protected(kvm)) { 344 344 hyp_vm->kvm.arch.flags = host_arch_flags; 345 + hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED); 345 346 346 347 bitmap_copy(kvm->arch.vcpu_features, 347 348 host_kvm->arch.vcpu_features, ··· 392 391 if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE)) 393 392 return; 394 393 395 - sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state); 394 + sve_state = hyp_vcpu->vcpu.arch.sve_state; 396 395 hyp_unpin_shared_mem(sve_state, 397 396 sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu)); 398 397 } ··· 472 471 return ret; 473 472 } 474 473 474 + static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu) 475 + { 476 + struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); 477 + const struct kvm *host_kvm = hyp_vm->host_kvm; 478 + struct kvm *kvm = &hyp_vm->kvm; 479 + 480 + if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags)) 481 + return -EINVAL; 482 + 483 + if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 484 + return 0; 485 + 486 + memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs)); 487 + 488 + return 0; 489 + } 490 + 491 + static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu) 492 + { 493 + int ret = 0; 494 + 495 + if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) 496 + kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); 497 + else 498 + ret = vm_copy_id_regs(hyp_vcpu); 499 + 500 + return ret; 501 + } 502 + 475 503 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, 476 504 struct pkvm_hyp_vm *hyp_vm, 477 505 struct kvm_vcpu *host_vcpu) ··· 520 490 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); 521 491 hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; 522 492 523 - if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) 524 - kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); 493 + ret = pkvm_vcpu_init_sysregs(hyp_vcpu); 494 + if (ret) 495 + goto done; 525 496 526 497 ret = pkvm_vcpu_init_traps(hyp_vcpu); 527 498 if (ret)
+4 -4
arch/arm64/kvm/hyp/nvhe/tlb.c
··· 169 169 */ 170 170 dsb(ish); 171 171 __tlbi(vmalle1is); 172 - dsb(ish); 172 + __tlbi_sync_s1ish_hyp(); 173 173 isb(); 174 174 175 175 exit_vmid_context(&cxt); ··· 226 226 227 227 dsb(ish); 228 228 __tlbi(vmalle1is); 229 - dsb(ish); 229 + __tlbi_sync_s1ish_hyp(); 230 230 isb(); 231 231 232 232 exit_vmid_context(&cxt); ··· 240 240 enter_vmid_context(mmu, &cxt, false); 241 241 242 242 __tlbi(vmalls12e1is); 243 - dsb(ish); 243 + __tlbi_sync_s1ish_hyp(); 244 244 isb(); 245 245 246 246 exit_vmid_context(&cxt); ··· 266 266 /* Same remark as in enter_vmid_context() */ 267 267 dsb(ish); 268 268 __tlbi(alle1is); 269 - dsb(ish); 269 + __tlbi_sync_s1ish_hyp(); 270 270 }
+1 -1
arch/arm64/kvm/hyp/pgtable.c
··· 501 501 *unmapped += granule; 502 502 } 503 503 504 - dsb(ish); 504 + __tlbi_sync_s1ish_hyp(); 505 505 isb(); 506 506 mm_ops->put_page(ctx->ptep); 507 507
+5 -5
arch/arm64/kvm/hyp/vhe/tlb.c
··· 115 115 */ 116 116 dsb(ish); 117 117 __tlbi(vmalle1is); 118 - dsb(ish); 118 + __tlbi_sync_s1ish_hyp(); 119 119 isb(); 120 120 121 121 exit_vmid_context(&cxt); ··· 176 176 177 177 dsb(ish); 178 178 __tlbi(vmalle1is); 179 - dsb(ish); 179 + __tlbi_sync_s1ish_hyp(); 180 180 isb(); 181 181 182 182 exit_vmid_context(&cxt); ··· 192 192 enter_vmid_context(mmu, &cxt); 193 193 194 194 __tlbi(vmalls12e1is); 195 - dsb(ish); 195 + __tlbi_sync_s1ish_hyp(); 196 196 isb(); 197 197 198 198 exit_vmid_context(&cxt); ··· 217 217 { 218 218 dsb(ishst); 219 219 __tlbi(alle1is); 220 - dsb(ish); 220 + __tlbi_sync_s1ish_hyp(); 221 221 } 222 222 223 223 /* ··· 358 358 default: 359 359 ret = -EINVAL; 360 360 } 361 - dsb(ish); 361 + __tlbi_sync_s1ish_hyp(); 362 362 isb(); 363 363 364 364 if (mmu)
+5 -7
arch/arm64/kvm/mmu.c
··· 1754 1754 } 1755 1755 1756 1756 /* 1757 - * Both the canonical IPA and fault IPA must be hugepage-aligned to 1758 - * ensure we find the right PFN and lay down the mapping in the right 1759 - * place. 1757 + * Both the canonical IPA and fault IPA must be aligned to the 1758 + * mapping size to ensure we find the right PFN and lay down the 1759 + * mapping in the right place. 1760 1760 */ 1761 - if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { 1762 - fault_ipa &= ~(vma_pagesize - 1); 1763 - ipa &= ~(vma_pagesize - 1); 1764 - } 1761 + fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize); 1762 + ipa = ALIGN_DOWN(ipa, vma_pagesize); 1765 1763 1766 1764 gfn = ipa >> PAGE_SHIFT; 1767 1765 mte_allowed = kvm_vma_mte_allowed(vma);
+31 -32
arch/arm64/kvm/nested.c
··· 854 854 return kvm_inject_nested_sync(vcpu, esr_el2); 855 855 } 856 856 857 + u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime) 858 + { 859 + enum vcpu_sysreg ttbr_elx; 860 + u64 tcr; 861 + u16 asid; 862 + 863 + switch (regime) { 864 + case TR_EL10: 865 + tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); 866 + ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1; 867 + break; 868 + case TR_EL20: 869 + tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 870 + ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2; 871 + break; 872 + default: 873 + BUG(); 874 + } 875 + 876 + asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx)); 877 + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 878 + !(tcr & TCR_ASID16)) 879 + asid &= GENMASK(7, 0); 880 + 881 + return asid; 882 + } 883 + 857 884 static void invalidate_vncr(struct vncr_tlb *vt) 858 885 { 859 886 vt->valid = false; ··· 1181 1154 { 1182 1155 int i; 1183 1156 1184 - if (!kvm->arch.nested_mmus_size) 1185 - return; 1186 - 1187 1157 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1188 1158 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; 1189 1159 ··· 1360 1336 if (read_vncr_el2(vcpu) != vt->gva) 1361 1337 return false; 1362 1338 1363 - if (vt->wr.nG) { 1364 - u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 1365 - u64 ttbr = ((tcr & TCR_A1) ? 1366 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 1367 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 1368 - u16 asid; 1369 - 1370 - asid = FIELD_GET(TTBR_ASID_MASK, ttbr); 1371 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 1372 - !(tcr & TCR_ASID16)) 1373 - asid &= GENMASK(7, 0); 1374 - 1375 - return asid == vt->wr.asid; 1376 - } 1339 + if (vt->wr.nG) 1340 + return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid; 1377 1341 1378 1342 return true; 1379 1343 } ··· 1464 1452 if (read_vncr_el2(vcpu) != vt->gva) 1465 1453 return; 1466 1454 1467 - if (vt->wr.nG) { 1468 - u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 1469 - u64 ttbr = ((tcr & TCR_A1) ? 1470 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 1471 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 1472 - u16 asid; 1473 - 1474 - asid = FIELD_GET(TTBR_ASID_MASK, ttbr); 1475 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 1476 - !(tcr & TCR_ASID16)) 1477 - asid &= GENMASK(7, 0); 1478 - 1479 - if (asid != vt->wr.asid) 1480 - return; 1481 - } 1455 + if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid) 1456 + return; 1482 1457 1483 1458 vt->cpu = smp_processor_id(); 1484 1459
+3
arch/arm64/kvm/sys_regs.c
··· 1816 1816 ID_AA64MMFR3_EL1_SCTLRX | 1817 1817 ID_AA64MMFR3_EL1_S1POE | 1818 1818 ID_AA64MMFR3_EL1_S1PIE; 1819 + 1820 + if (!system_supports_poe()) 1821 + val &= ~ID_AA64MMFR3_EL1_S1POE; 1819 1822 break; 1820 1823 case SYS_ID_MMFR4_EL1: 1821 1824 val &= ~ID_MMFR4_EL1_CCIDX;
+5 -1
arch/arm64/lib/delay.c
··· 32 32 * Note that userspace cannot change the offset behind our back either, 33 33 * as the vcpu mutex is held as long as KVM_RUN is in progress. 34 34 */ 35 - #define __delay_cycles() __arch_counter_get_cntvct_stable() 35 + static cycles_t notrace __delay_cycles(void) 36 + { 37 + guard(preempt_notrace)(); 38 + return __arch_counter_get_cntvct_stable(); 39 + } 36 40 37 41 void __delay(unsigned long cycles) 38 42 {
+3 -3
arch/arm64/mm/ioremap.c
··· 14 14 return 0; 15 15 } 16 16 17 - void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 18 - pgprot_t pgprot) 17 + void __iomem *__ioremap_prot(phys_addr_t phys_addr, size_t size, 18 + pgprot_t pgprot) 19 19 { 20 20 unsigned long last_addr = phys_addr + size - 1; 21 21 ··· 39 39 40 40 return generic_ioremap_prot(phys_addr, size, pgprot); 41 41 } 42 - EXPORT_SYMBOL(ioremap_prot); 42 + EXPORT_SYMBOL(__ioremap_prot); 43 43 44 44 /* 45 45 * Must be called after early_fixmap_init
+10 -2
arch/arm64/mm/mmap.c
··· 34 34 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC 35 35 }; 36 36 37 + static ptdesc_t gcs_page_prot __ro_after_init = _PAGE_GCS_RO; 38 + 37 39 /* 38 40 * You really shouldn't be using read() or write() on /dev/mem. This might go 39 41 * away in the future. ··· 75 73 protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; 76 74 } 77 75 78 - if (lpa2_is_enabled()) 76 + if (lpa2_is_enabled()) { 79 77 for (int i = 0; i < ARRAY_SIZE(protection_map); i++) 80 78 pgprot_val(protection_map[i]) &= ~PTE_SHARED; 79 + gcs_page_prot &= ~PTE_SHARED; 80 + } 81 81 82 82 return 0; 83 83 } ··· 91 87 92 88 /* Short circuit GCS to avoid bloating the table. */ 93 89 if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { 94 - prot = _PAGE_GCS_RO; 90 + /* Honour mprotect(PROT_NONE) on shadow stack mappings */ 91 + if (vm_flags & VM_ACCESS_FLAGS) 92 + prot = gcs_page_prot; 93 + else 94 + prot = pgprot_val(protection_map[VM_NONE]); 95 95 } else { 96 96 prot = pgprot_val(protection_map[vm_flags & 97 97 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+1 -1
arch/arm64/net/bpf_jit_comp.c
··· 2119 2119 extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); 2120 2120 image_size = extable_offset + extable_size; 2121 2121 ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, 2122 - sizeof(u32), &header, &image_ptr, 2122 + sizeof(u64), &header, &image_ptr, 2123 2123 jit_fill_hole); 2124 2124 if (!ro_header) { 2125 2125 prog = orig_prog;
-1
arch/loongarch/kvm/Kconfig
··· 28 28 select KVM_COMMON 29 29 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 30 30 select KVM_GENERIC_HARDWARE_ENABLING 31 - select KVM_GENERIC_MMU_NOTIFIER 32 31 select KVM_MMIO 33 32 select VIRT_XFER_TO_GUEST_WORK 34 33 select SCHED_INFO
-1
arch/loongarch/kvm/vm.c
··· 118 118 case KVM_CAP_ONE_REG: 119 119 case KVM_CAP_ENABLE_CAP: 120 120 case KVM_CAP_READONLY_MEM: 121 - case KVM_CAP_SYNC_MMU: 122 121 case KVM_CAP_IMMEDIATE_EXIT: 123 122 case KVM_CAP_IOEVENTFD: 124 123 case KVM_CAP_MP_STATE:
-1
arch/mips/kvm/Kconfig
··· 23 23 select KVM_COMMON 24 24 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 25 25 select KVM_MMIO 26 - select KVM_GENERIC_MMU_NOTIFIER 27 26 select KVM_GENERIC_HARDWARE_ENABLING 28 27 select HAVE_KVM_READONLY_MEM 29 28 help
-1
arch/mips/kvm/mips.c
··· 1035 1035 case KVM_CAP_ONE_REG: 1036 1036 case KVM_CAP_ENABLE_CAP: 1037 1037 case KVM_CAP_READONLY_MEM: 1038 - case KVM_CAP_SYNC_MMU: 1039 1038 case KVM_CAP_IMMEDIATE_EXIT: 1040 1039 r = 1; 1041 1040 break;
-4
arch/powerpc/kvm/Kconfig
··· 38 38 config KVM_BOOK3S_PR_POSSIBLE 39 39 bool 40 40 select KVM_MMIO 41 - select KVM_GENERIC_MMU_NOTIFIER 42 41 43 42 config KVM_BOOK3S_HV_POSSIBLE 44 43 bool ··· 80 81 tristate "KVM for POWER7 and later using hypervisor mode in host" 81 82 depends on KVM_BOOK3S_64 && PPC_POWERNV 82 83 select KVM_BOOK3S_HV_POSSIBLE 83 - select KVM_GENERIC_MMU_NOTIFIER 84 84 select KVM_BOOK3S_HV_PMU 85 85 select CMA 86 86 help ··· 201 203 depends on !CONTEXT_TRACKING_USER 202 204 select KVM 203 205 select KVM_MMIO 204 - select KVM_GENERIC_MMU_NOTIFIER 205 206 help 206 207 Support running unmodified E500 guest kernels in virtual machines on 207 208 E500v2 host processors. ··· 217 220 select KVM 218 221 select KVM_MMIO 219 222 select KVM_BOOKE_HV 220 - select KVM_GENERIC_MMU_NOTIFIER 221 223 help 222 224 Support running unmodified E500MC/E5500/E6500 guest kernels in 223 225 virtual machines on E500MC/E5500/E6500 host processors.
-6
arch/powerpc/kvm/powerpc.c
··· 623 623 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && 624 624 !kvmppc_hv_ops->enable_nested(NULL)); 625 625 break; 626 - #endif 627 - case KVM_CAP_SYNC_MMU: 628 - BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); 629 - r = 1; 630 - break; 631 - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 632 626 case KVM_CAP_PPC_HTAB_FD: 633 627 r = hv_enabled; 634 628 break;
-1
arch/riscv/kvm/Kconfig
··· 30 30 select KVM_GENERIC_HARDWARE_ENABLING 31 31 select KVM_MMIO 32 32 select VIRT_XFER_TO_GUEST_WORK 33 - select KVM_GENERIC_MMU_NOTIFIER 34 33 select SCHED_INFO 35 34 select GUEST_PERF_EVENTS if PERF_EVENTS 36 35 help
-1
arch/riscv/kvm/vm.c
··· 181 181 break; 182 182 case KVM_CAP_IOEVENTFD: 183 183 case KVM_CAP_USER_MEMORY: 184 - case KVM_CAP_SYNC_MMU: 185 184 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 186 185 case KVM_CAP_ONE_REG: 187 186 case KVM_CAP_READONLY_MEM:
+2 -2
arch/s390/include/asm/idle.h
··· 19 19 unsigned long mt_cycles_enter[8]; 20 20 }; 21 21 22 + DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 23 + 22 24 extern struct device_attribute dev_attr_idle_count; 23 25 extern struct device_attribute dev_attr_idle_time_us; 24 - 25 - void psw_idle(struct s390_idle_data *data, unsigned long psw_mask); 26 26 27 27 #endif /* _S390_IDLE_H */
+34
arch/s390/include/asm/vtime.h
··· 2 2 #ifndef _S390_VTIME_H 3 3 #define _S390_VTIME_H 4 4 5 + #include <asm/lowcore.h> 6 + #include <asm/cpu_mf.h> 7 + #include <asm/idle.h> 8 + 9 + DECLARE_PER_CPU(u64, mt_cycles[8]); 10 + 5 11 static inline void update_timer_sys(void) 6 12 { 7 13 struct lowcore *lc = get_lowcore(); ··· 24 18 lc->system_timer += lc->last_update_timer - lc->exit_timer; 25 19 lc->user_timer += lc->exit_timer - lc->mcck_enter_timer; 26 20 lc->last_update_timer = lc->mcck_enter_timer; 21 + } 22 + 23 + static inline void update_timer_idle(void) 24 + { 25 + struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 26 + struct lowcore *lc = get_lowcore(); 27 + u64 cycles_new[8]; 28 + int i, mtid; 29 + 30 + mtid = smp_cpu_mtid; 31 + if (mtid) { 32 + stcctm(MT_DIAG, mtid, cycles_new); 33 + for (i = 0; i < mtid; i++) 34 + __this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); 35 + } 36 + /* 37 + * This is a bit subtle: Forward last_update_clock so it excludes idle 38 + * time. For correct steal time calculation in do_account_vtime() add 39 + * passed wall time before idle_enter to steal_timer: 40 + * During the passed wall time before idle_enter CPU time may have 41 + * been accounted to system, hardirq, softirq, etc. lowcore fields. 42 + * The accounted CPU times will be subtracted again from steal_timer 43 + * when accumulated steal time is calculated in do_account_vtime(). 44 + */ 45 + lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock; 46 + lc->last_update_clock = lc->int_clock; 47 + lc->system_timer += lc->last_update_timer - idle->timer_idle_enter; 48 + lc->last_update_timer = lc->sys_enter_timer; 27 49 } 28 50 29 51 #endif /* _S390_VTIME_H */
-2
arch/s390/kernel/entry.h
··· 56 56 long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); 57 57 long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags); 58 58 59 - DECLARE_PER_CPU(u64, mt_cycles[8]); 60 - 61 59 unsigned long stack_alloc(void); 62 60 void stack_free(unsigned long stack); 63 61
+5 -20
arch/s390/kernel/idle.c
··· 15 15 #include <trace/events/power.h> 16 16 #include <asm/cpu_mf.h> 17 17 #include <asm/cputime.h> 18 + #include <asm/idle.h> 18 19 #include <asm/nmi.h> 19 20 #include <asm/smp.h> 20 - #include "entry.h" 21 21 22 - static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 22 + DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 23 23 24 24 void account_idle_time_irq(void) 25 25 { 26 26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 27 - struct lowcore *lc = get_lowcore(); 28 27 unsigned long idle_time; 29 - u64 cycles_new[8]; 30 - int i; 31 28 32 - if (smp_cpu_mtid) { 33 - stcctm(MT_DIAG, smp_cpu_mtid, cycles_new); 34 - for (i = 0; i < smp_cpu_mtid; i++) 35 - this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); 36 - } 37 - 38 - idle_time = lc->int_clock - idle->clock_idle_enter; 39 - 40 - lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock; 41 - lc->last_update_clock = lc->int_clock; 42 - 43 - lc->system_timer += lc->last_update_timer - idle->timer_idle_enter; 44 - lc->last_update_timer = lc->sys_enter_timer; 29 + idle_time = get_lowcore()->int_clock - idle->clock_idle_enter; 45 30 46 31 /* Account time spent with enabled wait psw loaded as idle time. */ 47 - WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time); 48 - WRITE_ONCE(idle->idle_count, READ_ONCE(idle->idle_count) + 1); 32 + __atomic64_add(idle_time, &idle->idle_time); 33 + __atomic64_add_const(1, &idle->idle_count); 49 34 account_idle_time(cputime_to_nsecs(idle_time)); 50 35 } 51 36
+1 -1
arch/s390/kernel/ipl.c
··· 2377 2377 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); 2378 2378 } 2379 2379 2380 - void s390_reset_system(void) 2380 + void __no_stack_protector s390_reset_system(void) 2381 2381 { 2382 2382 /* Disable prefixing */ 2383 2383 set_prefix(0);
+12 -8
arch/s390/kernel/irq.c
··· 146 146 struct pt_regs *old_regs = set_irq_regs(regs); 147 147 bool from_idle; 148 148 149 + from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 150 + if (from_idle) { 151 + update_timer_idle(); 152 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 153 + } 154 + 149 155 irq_enter_rcu(); 150 156 151 157 if (user_mode(regs)) { ··· 160 154 current->thread.last_break = regs->last_break; 161 155 } 162 156 163 - from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 164 157 if (from_idle) 165 158 account_idle_time_irq(); 166 159 ··· 176 171 177 172 set_irq_regs(old_regs); 178 173 irqentry_exit(regs, state); 179 - 180 - if (from_idle) 181 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 182 174 } 183 175 184 176 void noinstr do_ext_irq(struct pt_regs *regs) ··· 183 181 irqentry_state_t state = irqentry_enter(regs); 184 182 struct pt_regs *old_regs = set_irq_regs(regs); 185 183 bool from_idle; 184 + 185 + from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 186 + if (from_idle) { 187 + update_timer_idle(); 188 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 189 + } 186 190 187 191 irq_enter_rcu(); 188 192 ··· 202 194 regs->int_parm = get_lowcore()->ext_params; 203 195 regs->int_parm_long = get_lowcore()->ext_params2; 204 196 205 - from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 206 197 if (from_idle) 207 198 account_idle_time_irq(); 208 199 ··· 210 203 irq_exit_rcu(); 211 204 set_irq_regs(old_regs); 212 205 irqentry_exit(regs, state); 213 - 214 - if (from_idle) 215 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 216 206 } 217 207 218 208 static void show_msi_interrupt(struct seq_file *p, int irq)
+10 -32
arch/s390/kernel/vtime.c
··· 48 48 49 49 static inline int virt_timer_forward(u64 elapsed) 50 50 { 51 - BUG_ON(!irqs_disabled()); 52 - 51 + lockdep_assert_irqs_disabled(); 53 52 if (list_empty(&virt_timer_list)) 54 53 return 0; 55 54 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); ··· 136 137 lc->system_timer += timer; 137 138 138 139 /* Update MT utilization calculation */ 139 - if (smp_cpu_mtid && 140 - time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 140 + if (smp_cpu_mtid && time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) 141 141 update_mt_scaling(); 142 142 143 143 /* Calculate cputime delta */ 144 - user = update_tsk_timer(&tsk->thread.user_timer, 145 - READ_ONCE(lc->user_timer)); 146 - guest = update_tsk_timer(&tsk->thread.guest_timer, 147 - READ_ONCE(lc->guest_timer)); 148 - system = update_tsk_timer(&tsk->thread.system_timer, 149 - READ_ONCE(lc->system_timer)); 150 - hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, 151 - READ_ONCE(lc->hardirq_timer)); 152 - softirq = update_tsk_timer(&tsk->thread.softirq_timer, 153 - READ_ONCE(lc->softirq_timer)); 154 - lc->steal_timer += 155 - clock - user - guest - system - hardirq - softirq; 144 + user = update_tsk_timer(&tsk->thread.user_timer, lc->user_timer); 145 + guest = update_tsk_timer(&tsk->thread.guest_timer, lc->guest_timer); 146 + system = update_tsk_timer(&tsk->thread.system_timer, lc->system_timer); 147 + hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, lc->hardirq_timer); 148 + softirq = update_tsk_timer(&tsk->thread.softirq_timer, lc->softirq_timer); 149 + lc->steal_timer += clock - user - guest - system - hardirq - softirq; 156 150 157 151 /* Push account value */ 158 152 if (user) { ··· 217 225 return timer - lc->last_update_timer; 218 226 } 219 227 220 - /* 221 - * Update process times based on virtual cpu times stored by entry.S 222 - * to the lowcore fields user_timer, system_timer & steal_clock. 223 - */ 224 228 void vtime_account_kernel(struct task_struct *tsk) 225 229 { 226 230 struct lowcore *lc = get_lowcore(); ··· 226 238 lc->guest_timer += delta; 227 239 else 228 240 lc->system_timer += delta; 229 - 230 - virt_timer_forward(delta); 231 241 } 232 242 EXPORT_SYMBOL_GPL(vtime_account_kernel); 233 243 234 244 void vtime_account_softirq(struct task_struct *tsk) 235 245 { 236 - u64 delta = vtime_delta(); 237 - 238 - get_lowcore()->softirq_timer += delta; 239 - 240 - virt_timer_forward(delta); 246 + get_lowcore()->softirq_timer += vtime_delta(); 241 247 } 242 248 243 249 void vtime_account_hardirq(struct task_struct *tsk) 244 250 { 245 - u64 delta = vtime_delta(); 246 - 247 - get_lowcore()->hardirq_timer += delta; 248 - 249 - virt_timer_forward(delta); 251 + get_lowcore()->hardirq_timer += vtime_delta(); 250 252 } 251 253 252 254 /*
-2
arch/s390/kvm/Kconfig
··· 28 28 select HAVE_KVM_INVALID_WAKEUPS 29 29 select HAVE_KVM_NO_POLL 30 30 select KVM_VFIO 31 - select MMU_NOTIFIER 32 31 select VIRT_XFER_TO_GUEST_WORK 33 - select KVM_GENERIC_MMU_NOTIFIER 34 32 select KVM_MMU_LOCKLESS_AGING 35 33 help 36 34 Support hosting paravirtualized guest machines using the SIE
-1
arch/s390/kvm/kvm-s390.c
··· 601 601 switch (ext) { 602 602 case KVM_CAP_S390_PSW: 603 603 case KVM_CAP_S390_GMAP: 604 - case KVM_CAP_SYNC_MMU: 605 604 #ifdef CONFIG_KVM_S390_UCONTROL 606 605 case KVM_CAP_S390_UCONTROL: 607 606 #endif
+2 -2
arch/s390/mm/pfault.c
··· 62 62 "0: nopr %%r7\n" 63 63 EX_TABLE(0b, 0b) 64 64 : [rc] "+d" (rc) 65 - : [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk) 65 + : [refbk] "a" (virt_to_phys(&pfault_init_refbk)), "m" (pfault_init_refbk) 66 66 : "cc"); 67 67 return rc; 68 68 } ··· 84 84 "0: nopr %%r7\n" 85 85 EX_TABLE(0b, 0b) 86 86 : 87 - : [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) 87 + : [refbk] "a" (virt_to_phys(&pfault_fini_refbk)), "m" (pfault_fini_refbk) 88 88 : "cc"); 89 89 } 90 90
+2
arch/sparc/kernel/iommu.c
··· 312 312 if (direction != DMA_TO_DEVICE) 313 313 iopte_protection |= IOPTE_WRITE; 314 314 315 + phys &= IO_PAGE_MASK; 316 + 315 317 for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE) 316 318 iopte_val(*base) = iopte_protection | phys; 317 319
+2
arch/sparc/kernel/pci_sun4v.c
··· 410 410 411 411 iommu_batch_start(dev, prot, entry); 412 412 413 + phys &= IO_PAGE_MASK; 414 + 413 415 for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) { 414 416 long err = iommu_batch_add(phys, mask); 415 417 if (unlikely(err < 0L))
+5 -5
arch/um/drivers/ubd_kern.c
··· 69 69 }; 70 70 71 71 72 - static struct io_thread_req * (*irq_req_buffer)[]; 72 + static struct io_thread_req **irq_req_buffer; 73 73 static struct io_thread_req *irq_remainder; 74 74 static int irq_remainder_size; 75 75 76 - static struct io_thread_req * (*io_req_buffer)[]; 76 + static struct io_thread_req **io_req_buffer; 77 77 static struct io_thread_req *io_remainder; 78 78 static int io_remainder_size; 79 79 ··· 398 398 399 399 static int bulk_req_safe_read( 400 400 int fd, 401 - struct io_thread_req * (*request_buffer)[], 401 + struct io_thread_req **request_buffer, 402 402 struct io_thread_req **remainder, 403 403 int *remainder_size, 404 404 int max_recs ··· 465 465 &irq_remainder, &irq_remainder_size, 466 466 UBD_REQ_BUFFER_SIZE)) >= 0) { 467 467 for (i = 0; i < len / sizeof(struct io_thread_req *); i++) 468 - ubd_end_request((*irq_req_buffer)[i]); 468 + ubd_end_request(irq_req_buffer[i]); 469 469 } 470 470 471 471 if (len < 0 && len != -EAGAIN) ··· 1512 1512 } 1513 1513 1514 1514 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { 1515 - struct io_thread_req *req = (*io_req_buffer)[count]; 1515 + struct io_thread_req *req = io_req_buffer[count]; 1516 1516 int i; 1517 1517 1518 1518 io_count++;
+2 -3
arch/x86/entry/entry_fred.c
··· 160 160 static noinstr void fred_extint(struct pt_regs *regs) 161 161 { 162 162 unsigned int vector = regs->fred_ss.vector; 163 - unsigned int index = array_index_nospec(vector - FIRST_SYSTEM_VECTOR, 164 - NR_SYSTEM_VECTORS); 165 163 166 164 if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR)) 167 165 return; ··· 168 170 irqentry_state_t state = irqentry_enter(regs); 169 171 170 172 instrumentation_begin(); 171 - sysvec_table[index](regs); 173 + sysvec_table[array_index_nospec(vector - FIRST_SYSTEM_VECTOR, 174 + NR_SYSTEM_VECTORS)](regs); 172 175 instrumentation_end(); 173 176 irqentry_exit(regs, state); 174 177 } else {
+27 -1
arch/x86/events/intel/uncore_snbep.c
··· 6497 6497 .attr_update = uncore_alias_groups, 6498 6498 }; 6499 6499 6500 + static struct uncore_event_desc gnr_uncore_imc_events[] = { 6501 + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"), 6502 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0, "event=0x05,umask=0xcf"), 6503 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.scale, "6.103515625e-5"), 6504 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.unit, "MiB"), 6505 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1, "event=0x06,umask=0xcf"), 6506 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.scale, "6.103515625e-5"), 6507 + INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.unit, "MiB"), 6508 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0, "event=0x05,umask=0xf0"), 6509 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.scale, "6.103515625e-5"), 6510 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.unit, "MiB"), 6511 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1, "event=0x06,umask=0xf0"), 6512 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.scale, "6.103515625e-5"), 6513 + INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.unit, "MiB"), 6514 + { /* end: all zeroes */ }, 6515 + }; 6516 + 6517 + static struct intel_uncore_type gnr_uncore_imc = { 6518 + SPR_UNCORE_MMIO_COMMON_FORMAT(), 6519 + .name = "imc", 6520 + .fixed_ctr_bits = 48, 6521 + .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 6522 + .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 6523 + .event_descs = gnr_uncore_imc_events, 6524 + }; 6525 + 6500 6526 static struct intel_uncore_type gnr_uncore_pciex8 = { 6501 6527 SPR_UNCORE_PCI_COMMON_FORMAT(), 6502 6528 .name = "pciex8", ··· 6570 6544 NULL, 6571 6545 &spr_uncore_pcu, 6572 6546 &gnr_uncore_ubox, 6573 - &spr_uncore_imc, 6547 + &gnr_uncore_imc, 6574 6548 NULL, 6575 6549 &gnr_uncore_upi, 6576 6550 NULL,
+3 -3
arch/x86/include/asm/bug.h
··· 7 7 #include <linux/objtool.h> 8 8 #include <asm/asm.h> 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 struct bug_entry; 12 12 extern void __WARN_trap(struct bug_entry *bug, ...); 13 13 #endif ··· 137 137 138 138 #ifdef HAVE_ARCH_BUG_FORMAT_ARGS 139 139 140 - #ifndef __ASSEMBLY__ 140 + #ifndef __ASSEMBLER__ 141 141 #include <linux/static_call_types.h> 142 142 DECLARE_STATIC_CALL(WARN_trap, __WARN_trap); 143 143 ··· 153 153 struct sysv_va_list args; 154 154 }; 155 155 extern void *__warn_args(struct arch_va_list *args, struct pt_regs *regs); 156 - #endif /* __ASSEMBLY__ */ 156 + #endif /* __ASSEMBLER__ */ 157 157 158 158 #define __WARN_bug_entry(flags, format) ({ \ 159 159 struct bug_entry *bug; \
+8 -4
arch/x86/include/asm/cfi.h
··· 111 111 112 112 struct pt_regs; 113 113 114 + #ifdef CONFIG_CALL_PADDING 115 + #define CFI_OFFSET (CONFIG_FUNCTION_PADDING_CFI+5) 116 + #else 117 + #define CFI_OFFSET 5 118 + #endif 119 + 114 120 #ifdef CONFIG_CFI 115 121 enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); 116 122 #define __bpfcall ··· 125 119 { 126 120 switch (cfi_mode) { 127 121 case CFI_FINEIBT: 128 - return 16; 122 + return /* fineibt_prefix_size */ 16; 129 123 case CFI_KCFI: 130 - if (IS_ENABLED(CONFIG_CALL_PADDING)) 131 - return 16; 132 - return 5; 124 + return CFI_OFFSET; 133 125 default: 134 126 return 0; 135 127 }
+2 -2
arch/x86/include/asm/irqflags.h
··· 77 77 #endif 78 78 79 79 #ifndef CONFIG_PARAVIRT 80 - #ifndef __ASSEMBLY__ 80 + #ifndef __ASSEMBLER__ 81 81 /* 82 82 * Used in the idle loop; sti takes one instruction cycle 83 83 * to complete: ··· 95 95 { 96 96 native_halt(); 97 97 } 98 - #endif /* __ASSEMBLY__ */ 98 + #endif /* __ASSEMBLER__ */ 99 99 #endif /* CONFIG_PARAVIRT */ 100 100 101 101 #ifdef CONFIG_PARAVIRT_XXL
+2 -2
arch/x86/include/asm/linkage.h
··· 68 68 * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the 69 69 * CFI symbol layout changes. 70 70 * 71 - * Without CALL_THUNKS: 71 + * Without CALL_PADDING: 72 72 * 73 73 * .align FUNCTION_ALIGNMENT 74 74 * __cfi_##name: ··· 77 77 * .long __kcfi_typeid_##name 78 78 * name: 79 79 * 80 - * With CALL_THUNKS: 80 + * With CALL_PADDING: 81 81 * 82 82 * .align FUNCTION_ALIGNMENT 83 83 * __cfi_##name:
+1 -1
arch/x86/include/asm/percpu.h
··· 20 20 21 21 #define PER_CPU_VAR(var) __percpu(var)__percpu_rel 22 22 23 - #else /* !__ASSEMBLY__: */ 23 + #else /* !__ASSEMBLER__: */ 24 24 25 25 #include <linux/args.h> 26 26 #include <linux/bits.h>
+3 -3
arch/x86/include/asm/runtime-const.h
··· 6 6 #error "Cannot use runtime-const infrastructure from modules" 7 7 #endif 8 8 9 - #ifdef __ASSEMBLY__ 9 + #ifdef __ASSEMBLER__ 10 10 11 11 .macro RUNTIME_CONST_PTR sym reg 12 12 movq $0x0123456789abcdef, %\reg ··· 16 16 .popsection 17 17 .endm 18 18 19 - #else /* __ASSEMBLY__ */ 19 + #else /* __ASSEMBLER__ */ 20 20 21 21 #define runtime_const_ptr(sym) ({ \ 22 22 typeof(sym) __ret; \ ··· 74 74 } 75 75 } 76 76 77 - #endif /* __ASSEMBLY__ */ 77 + #endif /* __ASSEMBLER__ */ 78 78 #endif
+2
arch/x86/include/asm/traps.h
··· 25 25 void handle_invalid_op(struct pt_regs *regs); 26 26 #endif 27 27 28 + noinstr bool handle_bug(struct pt_regs *regs); 29 + 28 30 static inline int get_si_code(unsigned long condition) 29 31 { 30 32 if (condition & DR_STEP)
+22 -7
arch/x86/kernel/alternative.c
··· 1182 1182 1183 1183 poison_endbr(addr); 1184 1184 if (IS_ENABLED(CONFIG_FINEIBT)) 1185 - poison_cfi(addr - 16); 1185 + poison_cfi(addr - CFI_OFFSET); 1186 1186 } 1187 1187 } 1188 1188 ··· 1388 1388 #define fineibt_preamble_bhi (fineibt_preamble_bhi - fineibt_preamble_start) 1389 1389 #define fineibt_preamble_ud 0x13 1390 1390 #define fineibt_preamble_hash 5 1391 + 1392 + #define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE) 1391 1393 1392 1394 /* 1393 1395 * <fineibt_caller_start>: ··· 1636 1634 * have determined there are no indirect calls to it and we 1637 1635 * don't need no CFI either. 1638 1636 */ 1639 - if (!is_endbr(addr + 16)) 1637 + if (!is_endbr(addr + CFI_OFFSET)) 1640 1638 continue; 1641 1639 1642 1640 hash = decode_preamble_hash(addr, &arity); 1643 1641 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", 1644 1642 addr, addr, 5, addr)) 1645 1643 return -EINVAL; 1644 + 1645 + /* 1646 + * FineIBT relies on being at func-16, so if the preamble is 1647 + * actually larger than that, place it the tail end. 1648 + * 1649 + * NOTE: this is possible with things like DEBUG_CALL_THUNKS 1650 + * and DEBUG_FORCE_FUNCTION_ALIGN_64B. 1651 + */ 1652 + addr += CFI_OFFSET - fineibt_prefix_size; 1646 1653 1647 1654 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); 1648 1655 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); ··· 1675 1664 for (s = start; s < end; s++) { 1676 1665 void *addr = (void *)s + *s; 1677 1666 1678 - if (!exact_endbr(addr + 16)) 1667 + if (!exact_endbr(addr + CFI_OFFSET)) 1679 1668 continue; 1680 1669 1681 - poison_endbr(addr + 16); 1670 + poison_endbr(addr + CFI_OFFSET); 1682 1671 } 1683 1672 } 1684 1673 ··· 1783 1772 if (FINEIBT_WARN(fineibt_preamble_size, 20) || 1784 1773 FINEIBT_WARN(fineibt_preamble_bhi + fineibt_bhi1_size, 20) || 1785 1774 FINEIBT_WARN(fineibt_caller_size, 14) || 1786 - FINEIBT_WARN(fineibt_paranoid_size, 20)) 1775 + FINEIBT_WARN(fineibt_paranoid_size, 20) || 1776 + WARN_ON_ONCE(CFI_OFFSET < fineibt_prefix_size)) 1787 1777 return; 1788 1778 1789 1779 if (cfi_mode == CFI_AUTO) { ··· 1898 1886 switch (cfi_mode) { 1899 1887 case CFI_FINEIBT: 1900 1888 /* 1889 + * FineIBT preamble is at func-16. 1890 + */ 1891 + addr += CFI_OFFSET - fineibt_prefix_size; 1892 + 1893 + /* 1901 1894 * FineIBT prefix should start with an ENDBR. 1902 1895 */ 1903 1896 if (!is_endbr(addr)) ··· 1939 1922 break; 1940 1923 } 1941 1924 } 1942 - 1943 - #define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE) 1944 1925 1945 1926 /* 1946 1927 * When regs->ip points to a 0xD6 byte in the FineIBT preamble,
+1 -1
arch/x86/kernel/traps.c
··· 397 397 ILL_ILLOPN, error_get_trap_addr(regs)); 398 398 } 399 399 400 - static noinstr bool handle_bug(struct pt_regs *regs) 400 + noinstr bool handle_bug(struct pt_regs *regs) 401 401 { 402 402 unsigned long addr = regs->ip; 403 403 bool handled = false;
-1
arch/x86/kvm/Kconfig
··· 20 20 config KVM_X86 21 21 def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n) 22 22 select KVM_COMMON 23 - select KVM_GENERIC_MMU_NOTIFIER 24 23 select KVM_ELIDE_TLB_FLUSH_IF_YOUNG 25 24 select KVM_MMU_LOCKLESS_AGING 26 25 select HAVE_KVM_IRQCHIP
-1
arch/x86/kvm/x86.c
··· 4805 4805 #endif 4806 4806 case KVM_CAP_NOP_IO_DELAY: 4807 4807 case KVM_CAP_MP_STATE: 4808 - case KVM_CAP_SYNC_MMU: 4809 4808 case KVM_CAP_USER_NMI: 4810 4809 case KVM_CAP_IRQ_INJECT_STATUS: 4811 4810 case KVM_CAP_IOEVENTFD:
+2 -5
arch/x86/mm/extable.c
··· 411 411 return; 412 412 413 413 if (trapnr == X86_TRAP_UD) { 414 - if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { 415 - /* Skip the ud2. */ 416 - regs->ip += LEN_UD2; 414 + if (handle_bug(regs)) 417 415 return; 418 - } 419 416 420 417 /* 421 - * If this was a BUG and report_bug returns or if this 418 + * If this was a BUG and handle_bug returns or if this 422 419 * was just a normal #UD, we want to continue onward and 423 420 * crash. 424 421 */
+2 -11
arch/x86/net/bpf_jit_comp.c
··· 438 438 439 439 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ 440 440 #ifdef CONFIG_CALL_PADDING 441 - EMIT1(0x90); 442 - EMIT1(0x90); 443 - EMIT1(0x90); 444 - EMIT1(0x90); 445 - EMIT1(0x90); 446 - EMIT1(0x90); 447 - EMIT1(0x90); 448 - EMIT1(0x90); 449 - EMIT1(0x90); 450 - EMIT1(0x90); 451 - EMIT1(0x90); 441 + for (int i = 0; i < CONFIG_FUNCTION_PADDING_CFI; i++) 442 + EMIT1(0x90); 452 443 #endif 453 444 EMIT_ENDBR(); 454 445
+19 -13
drivers/accel/amdxdna/aie2_ctx.c
··· 23 23 #include "amdxdna_pci_drv.h" 24 24 #include "amdxdna_pm.h" 25 25 26 - static bool force_cmdlist; 26 + static bool force_cmdlist = true; 27 27 module_param(force_cmdlist, bool, 0600); 28 - MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)"); 28 + MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default true)"); 29 29 30 30 #define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */ 31 31 ··· 53 53 { 54 54 drm_sched_stop(&hwctx->priv->sched, bad_job); 55 55 aie2_destroy_context(xdna->dev_handle, hwctx); 56 + drm_sched_start(&hwctx->priv->sched, 0); 56 57 } 57 58 58 59 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx) ··· 81 80 } 82 81 83 82 out: 84 - drm_sched_start(&hwctx->priv->sched, 0); 85 83 XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret); 86 84 return ret; 87 85 } ··· 297 297 struct dma_fence *fence; 298 298 int ret; 299 299 300 - if (!hwctx->priv->mbox_chann) 300 + ret = amdxdna_pm_resume_get(hwctx->client->xdna); 301 + if (ret) 301 302 return NULL; 302 303 303 - if (!mmget_not_zero(job->mm)) 304 + if (!hwctx->priv->mbox_chann) { 305 + amdxdna_pm_suspend_put(hwctx->client->xdna); 306 + return NULL; 307 + } 308 + 309 + if (!mmget_not_zero(job->mm)) { 310 + amdxdna_pm_suspend_put(hwctx->client->xdna); 304 311 return ERR_PTR(-ESRCH); 312 + } 305 313 306 314 kref_get(&job->refcnt); 307 315 fence = dma_fence_get(job->fence); 308 - 309 - ret = amdxdna_pm_resume_get(hwctx->client->xdna); 310 - if (ret) 311 - goto out; 312 316 313 317 if (job->drv_cmd) { 314 318 switch (job->drv_cmd->opcode) { ··· 501 497 502 498 if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) { 503 499 ret = aie2_destroy_context(xdna->dev_handle, hwctx); 504 - if (ret) 500 + if (ret && ret != -ENODEV) 505 501 XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret); 506 502 } else { 507 503 ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx); ··· 633 629 goto free_entity; 634 630 } 635 631 636 - ret = amdxdna_pm_resume_get(xdna); 632 + ret = amdxdna_pm_resume_get_locked(xdna); 637 633 if (ret) 638 634 goto free_col_list; 639 635 ··· 764 760 if (!hwctx->cus) 765 761 return -ENOMEM; 766 762 767 - ret = amdxdna_pm_resume_get(xdna); 763 + ret = amdxdna_pm_resume_get_locked(xdna); 768 764 if (ret) 769 765 goto free_cus; 770 766 ··· 1074 1070 1075 1071 ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP, 1076 1072 true, MAX_SCHEDULE_TIMEOUT); 1077 - if (!ret || ret == -ERESTARTSYS) 1073 + if (!ret) 1078 1074 XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret); 1075 + else if (ret == -ERESTARTSYS) 1076 + XDNA_DBG(xdna, "Wait for bo interrupted by signal"); 1079 1077 }
+10 -5
drivers/accel/amdxdna/aie2_message.c
··· 216 216 217 217 req.context_id = id; 218 218 ret = aie2_send_mgmt_msg_wait(ndev, &msg); 219 - if (ret) 219 + if (ret && ret != -ENODEV) 220 220 XDNA_WARN(xdna, "Destroy context failed, ret %d", ret); 221 + else if (ret == -ENODEV) 222 + XDNA_DBG(xdna, "Destroy context: device already stopped"); 221 223 222 224 return ret; 223 225 } ··· 319 317 { 320 318 struct amdxdna_dev *xdna = ndev->xdna; 321 319 int ret; 320 + 321 + if (!hwctx->priv->mbox_chann) 322 + return 0; 322 323 323 324 xdna_mailbox_stop_channel(hwctx->priv->mbox_chann); 324 325 ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id); ··· 699 694 u32 cmd_len; 700 695 void *cmd; 701 696 702 - memset(npu_slot, 0, sizeof(*npu_slot)); 703 697 cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 704 698 if (*size < sizeof(*npu_slot) + cmd_len) 705 699 return -EINVAL; 706 700 701 + memset(npu_slot, 0, sizeof(*npu_slot)); 707 702 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 708 703 if (npu_slot->cu_idx == INVALID_CU_IDX) 709 704 return -EINVAL; ··· 724 719 u32 cmd_len; 725 720 u32 arg_sz; 726 721 727 - memset(npu_slot, 0, sizeof(*npu_slot)); 728 722 sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 729 723 arg_sz = cmd_len - sizeof(*sn); 730 724 if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 732 728 if (*size < sizeof(*npu_slot) + arg_sz) 733 729 return -EINVAL; 734 730 731 + memset(npu_slot, 0, sizeof(*npu_slot)); 735 732 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 736 733 if (npu_slot->cu_idx == INVALID_CU_IDX) 737 734 return -EINVAL; ··· 756 751 u32 cmd_len; 757 752 u32 arg_sz; 758 753 759 - memset(npu_slot, 0, sizeof(*npu_slot)); 760 754 pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 761 755 arg_sz = cmd_len - sizeof(*pd); 762 756 if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 764 760 if (*size < sizeof(*npu_slot) + arg_sz) 765 761 return -EINVAL; 766 762 763 + memset(npu_slot, 0, sizeof(*npu_slot)); 767 764 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 768 765 if (npu_slot->cu_idx == INVALID_CU_IDX) 769 766 return -EINVAL; ··· 792 787 u32 cmd_len; 793 788 u32 arg_sz; 794 789 795 - memset(npu_slot, 0, sizeof(*npu_slot)); 796 790 pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 797 791 arg_sz = cmd_len - sizeof(*pd); 798 792 if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 800 796 if (*size < sizeof(*npu_slot) + arg_sz) 801 797 return -EINVAL; 802 798 799 + memset(npu_slot, 0, sizeof(*npu_slot)); 803 800 npu_slot->type = EXEC_NPU_TYPE_ELF; 804 801 npu_slot->inst_buf_addr = pd->inst_buf; 805 802 npu_slot->save_buf_addr = pd->save_buf;
+29 -11
drivers/accel/amdxdna/aie2_pci.c
··· 32 32 module_param(aie2_max_col, uint, 0600); 33 33 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used"); 34 34 35 + static char *npu_fw[] = { 36 + "npu_7.sbin", 37 + "npu.sbin" 38 + }; 39 + 35 40 /* 36 41 * The management mailbox channel is allocated by firmware. 37 42 * The related register and ring buffer information is on SRAM BAR. ··· 328 323 return; 329 324 } 330 325 326 + aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL); 331 327 aie2_mgmt_fw_fini(ndev); 332 328 xdna_mailbox_stop_channel(ndev->mgmt_chann); 333 329 xdna_mailbox_destroy_channel(ndev->mgmt_chann); ··· 412 406 goto stop_psp; 413 407 } 414 408 415 - ret = aie2_pm_init(ndev); 416 - if (ret) { 417 - XDNA_ERR(xdna, "failed to init pm, ret %d", ret); 418 - goto destroy_mgmt_chann; 419 - } 420 - 421 409 ret = aie2_mgmt_fw_init(ndev); 422 410 if (ret) { 423 411 XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret); 412 + goto destroy_mgmt_chann; 413 + } 414 + 415 + ret = aie2_pm_init(ndev); 416 + if (ret) { 417 + XDNA_ERR(xdna, "failed to init pm, ret %d", ret); 424 418 goto destroy_mgmt_chann; 425 419 } 426 420 ··· 457 451 { 458 452 struct amdxdna_client *client; 459 453 460 - guard(mutex)(&xdna->dev_lock); 461 454 list_for_each_entry(client, &xdna->client_list, node) 462 455 aie2_hwctx_suspend(client); 463 456 ··· 494 489 struct psp_config psp_conf; 495 490 const struct firmware *fw; 496 491 unsigned long bars = 0; 492 + char *fw_full_path; 497 493 int i, nvec, ret; 498 494 499 495 if (!hypervisor_is_type(X86_HYPER_NATIVE)) { ··· 509 503 ndev->priv = xdna->dev_info->dev_priv; 510 504 ndev->xdna = xdna; 511 505 512 - ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev); 506 + for (i = 0; i < ARRAY_SIZE(npu_fw); i++) { 507 + fw_full_path = kasprintf(GFP_KERNEL, "%s%s", ndev->priv->fw_path, npu_fw[i]); 508 + if (!fw_full_path) 509 + return -ENOMEM; 510 + 511 + ret = firmware_request_nowarn(&fw, fw_full_path, &pdev->dev); 512 + kfree(fw_full_path); 513 + if (!ret) { 514 + XDNA_INFO(xdna, "Load firmware %s%s", ndev->priv->fw_path, npu_fw[i]); 515 + break; 516 + } 517 + } 518 + 513 519 if (ret) { 514 520 XDNA_ERR(xdna, "failed to request_firmware %s, ret %d", 515 521 ndev->priv->fw_path, ret); ··· 969 951 if (!drm_dev_enter(&xdna->ddev, &idx)) 970 952 return -ENODEV; 971 953 972 - ret = amdxdna_pm_resume_get(xdna); 954 + ret = amdxdna_pm_resume_get_locked(xdna); 973 955 if (ret) 974 956 goto dev_exit; 975 957 ··· 1062 1044 if (!drm_dev_enter(&xdna->ddev, &idx)) 1063 1045 return -ENODEV; 1064 1046 1065 - ret = amdxdna_pm_resume_get(xdna); 1047 + ret = amdxdna_pm_resume_get_locked(xdna); 1066 1048 if (ret) 1067 1049 goto dev_exit; 1068 1050 ··· 1152 1134 if (!drm_dev_enter(&xdna->ddev, &idx)) 1153 1135 return -ENODEV; 1154 1136 1155 - ret = amdxdna_pm_resume_get(xdna); 1137 + ret = amdxdna_pm_resume_get_locked(xdna); 1156 1138 if (ret) 1157 1139 goto dev_exit; 1158 1140
+1 -1
drivers/accel/amdxdna/aie2_pm.c
··· 31 31 { 32 32 int ret; 33 33 34 - ret = amdxdna_pm_resume_get(ndev->xdna); 34 + ret = amdxdna_pm_resume_get_locked(ndev->xdna); 35 35 if (ret) 36 36 return ret; 37 37
+11 -13
drivers/accel/amdxdna/amdxdna_ctx.c
··· 104 104 105 105 if (size) { 106 106 count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header); 107 - if (unlikely(count <= num_masks)) { 107 + if (unlikely(count <= num_masks || 108 + count * sizeof(u32) + 109 + offsetof(struct amdxdna_cmd, data[0]) > 110 + abo->mem.size)) { 108 111 *size = 0; 109 112 return NULL; 110 113 } ··· 269 266 struct amdxdna_drm_config_hwctx *args = data; 270 267 struct amdxdna_dev *xdna = to_xdna_dev(dev); 271 268 struct amdxdna_hwctx *hwctx; 272 - int ret, idx; 273 269 u32 buf_size; 274 270 void *buf; 271 + int ret; 275 272 u64 val; 276 273 277 274 if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad))) ··· 313 310 return -EINVAL; 314 311 } 315 312 316 - mutex_lock(&xdna->dev_lock); 317 - idx = srcu_read_lock(&client->hwctx_srcu); 313 + guard(mutex)(&xdna->dev_lock); 318 314 hwctx = xa_load(&client->hwctx_xa, args->handle); 319 315 if (!hwctx) { 320 316 XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle); 321 317 ret = -EINVAL; 322 - goto unlock_srcu; 318 + goto free_buf; 323 319 } 324 320 325 321 ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size); 326 322 327 - unlock_srcu: 328 - srcu_read_unlock(&client->hwctx_srcu, idx); 329 - mutex_unlock(&xdna->dev_lock); 323 + free_buf: 330 324 kfree(buf); 331 325 return ret; 332 326 } ··· 334 334 struct amdxdna_hwctx *hwctx; 335 335 struct amdxdna_gem_obj *abo; 336 336 struct drm_gem_object *gobj; 337 - int ret, idx; 337 + int ret; 338 338 339 339 if (!xdna->dev_info->ops->hwctx_sync_debug_bo) 340 340 return -EOPNOTSUPP; ··· 345 345 346 346 abo = to_xdna_obj(gobj); 347 347 guard(mutex)(&xdna->dev_lock); 348 - idx = srcu_read_lock(&client->hwctx_srcu); 349 348 hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx); 350 349 if (!hwctx) { 351 350 ret = -EINVAL; 352 - goto unlock_srcu; 351 + goto put_obj; 353 352 } 354 353 355 354 ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl); 356 355 357 - unlock_srcu: 358 - srcu_read_unlock(&client->hwctx_srcu, idx); 356 + put_obj: 359 357 drm_gem_object_put(gobj); 360 358 return ret; 361 359 }
+19 -19
drivers/accel/amdxdna/amdxdna_gem.c
··· 21 21 #include "amdxdna_pci_drv.h" 22 22 #include "amdxdna_ubuf.h" 23 23 24 - #define XDNA_MAX_CMD_BO_SIZE SZ_32K 25 - 26 24 MODULE_IMPORT_NS("DMA_BUF"); 27 25 28 26 static int ··· 743 745 { 744 746 struct amdxdna_dev *xdna = to_xdna_dev(dev); 745 747 struct amdxdna_gem_obj *abo; 746 - int ret; 747 - 748 - if (args->size > XDNA_MAX_CMD_BO_SIZE) { 749 - XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size); 750 - return ERR_PTR(-EINVAL); 751 - } 752 748 753 749 if (args->size < sizeof(struct amdxdna_cmd)) { 754 750 XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size); ··· 756 764 abo->type = AMDXDNA_BO_CMD; 757 765 abo->client = filp->driver_priv; 758 766 759 - ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 760 - if (ret) { 761 - XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 762 - goto release_obj; 763 - } 764 - 765 767 return abo; 766 - 767 - release_obj: 768 - drm_gem_object_put(to_gobj(abo)); 769 - return ERR_PTR(ret); 770 768 } 771 769 772 770 int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ··· 853 871 struct amdxdna_dev *xdna = client->xdna; 854 872 struct amdxdna_gem_obj *abo; 855 873 struct drm_gem_object *gobj; 874 + int ret; 856 875 857 876 gobj = drm_gem_object_lookup(client->filp, bo_hdl); 858 877 if (!gobj) { ··· 862 879 } 863 880 864 881 abo = to_xdna_obj(gobj); 865 - if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type) 882 + if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type) 883 + goto put_obj; 884 + 885 + if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva) 866 886 return abo; 867 887 888 + if (abo->mem.size > SZ_32K) { 889 + XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size); 890 + goto put_obj; 891 + } 892 + 893 + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 894 + if (ret) { 895 + XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 896 + goto put_obj; 897 + } 898 + 899 + return abo; 900 + 901 + put_obj: 868 902 drm_gem_object_put(gobj); 869 903 return NULL; 870 904 }
+3
drivers/accel/amdxdna/amdxdna_pci_drv.c
··· 23 23 MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); 24 24 MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin"); 25 25 MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin"); 26 + MODULE_FIRMWARE("amdnpu/1502_00/npu_7.sbin"); 27 + MODULE_FIRMWARE("amdnpu/17f0_10/npu_7.sbin"); 28 + MODULE_FIRMWARE("amdnpu/17f0_11/npu_7.sbin"); 26 29 27 30 /* 28 31 * 0.0: Initial version
+2
drivers/accel/amdxdna/amdxdna_pm.c
··· 16 16 struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 17 17 int ret = -EOPNOTSUPP; 18 18 19 + guard(mutex)(&xdna->dev_lock); 19 20 if (xdna->dev_info->ops->suspend) 20 21 ret = xdna->dev_info->ops->suspend(xdna); 21 22 ··· 29 28 struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 30 29 int ret = -EOPNOTSUPP; 31 30 31 + guard(mutex)(&xdna->dev_lock); 32 32 if (xdna->dev_info->ops->resume) 33 33 ret = xdna->dev_info->ops->resume(xdna); 34 34
+11
drivers/accel/amdxdna/amdxdna_pm.h
··· 15 15 void amdxdna_pm_init(struct amdxdna_dev *xdna); 16 16 void amdxdna_pm_fini(struct amdxdna_dev *xdna); 17 17 18 + static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna) 19 + { 20 + int ret; 21 + 22 + mutex_unlock(&xdna->dev_lock); 23 + ret = amdxdna_pm_resume_get(xdna); 24 + mutex_lock(&xdna->dev_lock); 25 + 26 + return ret; 27 + } 28 + 18 29 #endif /* _AMDXDNA_PM_H_ */
+5 -1
drivers/accel/amdxdna/amdxdna_ubuf.c
··· 7 7 #include <drm/drm_device.h> 8 8 #include <drm/drm_print.h> 9 9 #include <linux/dma-buf.h> 10 + #include <linux/overflow.h> 10 11 #include <linux/pagemap.h> 11 12 #include <linux/vmalloc.h> 12 13 ··· 177 176 goto free_ent; 178 177 } 179 178 180 - exp_info.size += va_ent[i].len; 179 + if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) { 180 + ret = -EINVAL; 181 + goto free_ent; 182 + } 181 183 } 182 184 183 185 ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+1 -1
drivers/accel/amdxdna/npu1_regs.c
··· 72 72 }; 73 73 74 74 static const struct amdxdna_dev_priv npu1_dev_priv = { 75 - .fw_path = "amdnpu/1502_00/npu.sbin", 75 + .fw_path = "amdnpu/1502_00/", 76 76 .rt_config = npu1_default_rt_cfg, 77 77 .dpm_clk_tbl = npu1_dpm_clk_table, 78 78 .fw_feature_tbl = npu1_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu4_regs.c
··· 98 98 }; 99 99 100 100 static const struct amdxdna_dev_priv npu4_dev_priv = { 101 - .fw_path = "amdnpu/17f0_10/npu.sbin", 101 + .fw_path = "amdnpu/17f0_10/", 102 102 .rt_config = npu4_default_rt_cfg, 103 103 .dpm_clk_tbl = npu4_dpm_clk_table, 104 104 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu5_regs.c
··· 63 63 #define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE 64 64 65 65 static const struct amdxdna_dev_priv npu5_dev_priv = { 66 - .fw_path = "amdnpu/17f0_11/npu.sbin", 66 + .fw_path = "amdnpu/17f0_11/", 67 67 .rt_config = npu4_default_rt_cfg, 68 68 .dpm_clk_tbl = npu4_dpm_clk_table, 69 69 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu6_regs.c
··· 63 63 #define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE 64 64 65 65 static const struct amdxdna_dev_priv npu6_dev_priv = { 66 - .fw_path = "amdnpu/17f0_10/npu.sbin", 66 + .fw_path = "amdnpu/17f0_10/", 67 67 .rt_config = npu4_default_rt_cfg, 68 68 .dpm_clk_tbl = npu4_dpm_clk_table, 69 69 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/ethosu/ethosu_gem.c
··· 154 154 155 155 static u64 cmd_to_addr(u32 *cmd) 156 156 { 157 - return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1]; 157 + return (((u64)cmd[0] & 0xff0000) << 16) | cmd[1]; 158 158 } 159 159 160 160 static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+13
drivers/acpi/osi.c
··· 390 390 }, 391 391 392 392 /* 393 + * The screen backlight turns off during udev device creation 394 + * when returning true for _OSI("Windows 2009") 395 + */ 396 + { 397 + .callback = dmi_disable_osi_win7, 398 + .ident = "Acer Aspire One D255", 399 + .matches = { 400 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 401 + DMI_MATCH(DMI_PRODUCT_NAME, "AOD255"), 402 + }, 403 + }, 404 + 405 + /* 393 406 * The wireless hotkey does not work on those machines when 394 407 * returning true for _OSI("Windows 2012") 395 408 */
+8
drivers/acpi/sleep.c
··· 386 386 DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), 387 387 }, 388 388 }, 389 + { 390 + .callback = init_nvs_save_s3, 391 + .ident = "Lenovo G70-35", 392 + .matches = { 393 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 394 + DMI_MATCH(DMI_PRODUCT_NAME, "80Q5"), 395 + }, 396 + }, 389 397 /* 390 398 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using 391 399 * the Low Power S0 Idle firmware interface (see
+3 -5
drivers/ata/libata-core.c
··· 6269 6269 } 6270 6270 } 6271 6271 6272 - /* Make sure the deferred qc work finished. */ 6273 - cancel_work_sync(&ap->deferred_qc_work); 6274 - WARN_ON(ap->deferred_qc); 6275 - 6276 6272 /* Tell EH to disable all devices */ 6277 6273 ap->pflags |= ATA_PFLAG_UNLOADING; 6278 6274 ata_port_schedule_eh(ap); ··· 6279 6283 /* wait till EH commits suicide */ 6280 6284 ata_port_wait_eh(ap); 6281 6285 6282 - /* it better be dead now */ 6286 + /* It better be dead now and not have any remaining deferred qc. */ 6283 6287 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6288 + WARN_ON(ap->deferred_qc); 6284 6289 6290 + cancel_work_sync(&ap->deferred_qc_work); 6285 6291 cancel_delayed_work_sync(&ap->hotplug_task); 6286 6292 cancel_delayed_work_sync(&ap->scsi_rescan_task); 6287 6293
+19 -3
drivers/ata/libata-eh.c
··· 640 640 set_host_byte(scmd, DID_OK); 641 641 642 642 ata_qc_for_each_raw(ap, qc, i) { 643 - if (qc->flags & ATA_QCFLAG_ACTIVE && 644 - qc->scsicmd == scmd) 643 + if (qc->scsicmd != scmd) 644 + continue; 645 + if ((qc->flags & ATA_QCFLAG_ACTIVE) || 646 + qc == ap->deferred_qc) 645 647 break; 646 648 } 647 649 648 - if (i < ATA_MAX_QUEUE) { 650 + if (qc == ap->deferred_qc) { 651 + /* 652 + * This is a deferred command that timed out while 653 + * waiting for the command queue to drain. Since the qc 654 + * is not active yet (deferred_qc is still set, so the 655 + * deferred qc work has not issued the command yet), 656 + * simply signal the timeout by finishing the SCSI 657 + * command and clear the deferred qc to prevent the 658 + * deferred qc work from issuing this qc. 659 + */ 660 + WARN_ON_ONCE(qc->flags & ATA_QCFLAG_ACTIVE); 661 + ap->deferred_qc = NULL; 662 + set_host_byte(scmd, DID_TIME_OUT); 663 + scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 664 + } else if (i < ATA_MAX_QUEUE) { 649 665 /* the scmd has an associated qc */ 650 666 if (!(qc->flags & ATA_QCFLAG_EH)) { 651 667 /* which hasn't failed yet, timeout */
+13 -14
drivers/base/property.c
··· 797 797 fwnode_get_next_child_node(const struct fwnode_handle *fwnode, 798 798 struct fwnode_handle *child) 799 799 { 800 - return fwnode_call_ptr_op(fwnode, get_next_child_node, child); 800 + struct fwnode_handle *next; 801 + 802 + if (IS_ERR_OR_NULL(fwnode)) 803 + return NULL; 804 + 805 + /* Try to find a child in primary fwnode */ 806 + next = fwnode_call_ptr_op(fwnode, get_next_child_node, child); 807 + if (next) 808 + return next; 809 + 810 + /* When no more children in primary, continue with secondary */ 811 + return fwnode_call_ptr_op(fwnode->secondary, get_next_child_node, child); 801 812 } 802 813 EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); 803 814 ··· 852 841 struct fwnode_handle *device_get_next_child_node(const struct device *dev, 853 842 struct fwnode_handle *child) 854 843 { 855 - const struct fwnode_handle *fwnode = dev_fwnode(dev); 856 - struct fwnode_handle *next; 857 - 858 - if (IS_ERR_OR_NULL(fwnode)) 859 - return NULL; 860 - 861 - /* Try to find a child in primary fwnode */ 862 - next = fwnode_get_next_child_node(fwnode, child); 863 - if (next) 864 - return next; 865 - 866 - /* When no more children in primary, continue with secondary */ 867 - return fwnode_get_next_child_node(fwnode->secondary, child); 844 + return fwnode_get_next_child_node(dev_fwnode(dev), child); 868 845 } 869 846 EXPORT_SYMBOL_GPL(device_get_next_child_node); 870 847
+23 -30
drivers/block/drbd/drbd_actlog.c
··· 483 483 484 484 int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i) 485 485 { 486 - struct lru_cache *al = device->act_log; 487 486 /* for bios crossing activity log extent boundaries, 488 487 * we may need to activate two extents in one go */ 489 488 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 490 489 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 491 - unsigned nr_al_extents; 492 - unsigned available_update_slots; 493 490 unsigned enr; 494 491 495 - D_ASSERT(device, first <= last); 496 - 497 - nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */ 498 - available_update_slots = min(al->nr_elements - al->used, 499 - al->max_pending_changes - al->pending_changes); 500 - 501 - /* We want all necessary updates for a given request within the same transaction 502 - * We could first check how many updates are *actually* needed, 503 - * and use that instead of the worst-case nr_al_extents */ 504 - if (available_update_slots < nr_al_extents) { 505 - /* Too many activity log extents are currently "hot". 506 - * 507 - * If we have accumulated pending changes already, 508 - * we made progress. 509 - * 510 - * If we cannot get even a single pending change through, 511 - * stop the fast path until we made some progress, 512 - * or requests to "cold" extents could be starved. */ 513 - if (!al->pending_changes) 514 - __set_bit(__LC_STARVING, &device->act_log->flags); 515 - return -ENOBUFS; 492 + if (i->partially_in_al_next_enr) { 493 + D_ASSERT(device, first < i->partially_in_al_next_enr); 494 + D_ASSERT(device, last >= i->partially_in_al_next_enr); 495 + first = i->partially_in_al_next_enr; 516 496 } 497 + 498 + D_ASSERT(device, first <= last); 517 499 518 500 /* Is resync active in this area? */ 519 501 for (enr = first; enr <= last; enr++) { ··· 511 529 } 512 530 } 513 531 514 - /* Checkout the refcounts. 515 - * Given that we checked for available elements and update slots above, 516 - * this has to be successful. */ 532 + /* Try to checkout the refcounts. */ 517 533 for (enr = first; enr <= last; enr++) { 518 534 struct lc_element *al_ext; 519 535 al_ext = lc_get_cumulative(device->act_log, enr); 520 - if (!al_ext) 521 - drbd_info(device, "LOGIC BUG for enr=%u\n", enr); 536 + 537 + if (!al_ext) { 538 + /* Did not work. We may have exhausted the possible 539 + * changes per transaction. Or raced with someone 540 + * "locking" it against changes. 541 + * Remember where to continue from. 542 + */ 543 + if (enr > first) 544 + i->partially_in_al_next_enr = enr; 545 + return -ENOBUFS; 546 + } 522 547 } 523 548 return 0; 524 549 } ··· 545 556 546 557 for (enr = first; enr <= last; enr++) { 547 558 extent = lc_find(device->act_log, enr); 548 - if (!extent) { 559 + /* Yes, this masks a bug elsewhere. However, during normal 560 + * operation this is harmless, so no need to crash the kernel 561 + * by the BUG_ON(refcount == 0) in lc_put(). 562 + */ 563 + if (!extent || extent->refcnt == 0) { 549 564 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr); 550 565 continue; 551 566 }
+4 -1
drivers/block/drbd/drbd_interval.h
··· 8 8 struct drbd_interval { 9 9 struct rb_node rb; 10 10 sector_t sector; /* start sector of the interval */ 11 - unsigned int size; /* size in bytes */ 12 11 sector_t end; /* highest interval end in subtree */ 12 + unsigned int size; /* size in bytes */ 13 13 unsigned int local:1 /* local or remote request? */; 14 14 unsigned int waiting:1; /* someone is waiting for completion */ 15 15 unsigned int completed:1; /* this has been completed already; 16 16 * ignore for conflict detection */ 17 + 18 + /* to resume a partially successful drbd_al_begin_io_nonblock(); */ 19 + unsigned int partially_in_al_next_enr; 17 20 }; 18 21 19 22 static inline void drbd_clear_interval(struct drbd_interval *i)
+9 -5
drivers/block/drbd/drbd_main.c
··· 32 32 #include <linux/memcontrol.h> 33 33 #include <linux/mm_inline.h> 34 34 #include <linux/slab.h> 35 + #include <linux/string.h> 35 36 #include <linux/random.h> 36 37 #include <linux/reboot.h> 37 38 #include <linux/notifier.h> ··· 733 732 } 734 733 735 734 if (apv >= 88) 736 - strcpy(p->verify_alg, nc->verify_alg); 735 + strscpy(p->verify_alg, nc->verify_alg); 737 736 if (apv >= 89) 738 - strcpy(p->csums_alg, nc->csums_alg); 737 + strscpy(p->csums_alg, nc->csums_alg); 739 738 rcu_read_unlock(); 740 739 741 740 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0); ··· 746 745 struct drbd_socket *sock; 747 746 struct p_protocol *p; 748 747 struct net_conf *nc; 748 + size_t integrity_alg_len; 749 749 int size, cf; 750 750 751 751 sock = &connection->data; ··· 764 762 } 765 763 766 764 size = sizeof(*p); 767 - if (connection->agreed_pro_version >= 87) 768 - size += strlen(nc->integrity_alg) + 1; 765 + if (connection->agreed_pro_version >= 87) { 766 + integrity_alg_len = strlen(nc->integrity_alg) + 1; 767 + size += integrity_alg_len; 768 + } 769 769 770 770 p->protocol = cpu_to_be32(nc->wire_protocol); 771 771 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); ··· 782 778 p->conn_flags = cpu_to_be32(cf); 783 779 784 780 if (connection->agreed_pro_version >= 87) 785 - strcpy(p->integrity_alg, nc->integrity_alg); 781 + strscpy(p->integrity_alg, nc->integrity_alg, integrity_alg_len); 786 782 rcu_read_unlock(); 787 783 788 784 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
+2 -2
drivers/block/drbd/drbd_receiver.c
··· 3801 3801 *new_net_conf = *old_net_conf; 3802 3802 3803 3803 if (verify_tfm) { 3804 - strcpy(new_net_conf->verify_alg, p->verify_alg); 3804 + strscpy(new_net_conf->verify_alg, p->verify_alg); 3805 3805 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3806 3806 crypto_free_shash(peer_device->connection->verify_tfm); 3807 3807 peer_device->connection->verify_tfm = verify_tfm; 3808 3808 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); 3809 3809 } 3810 3810 if (csums_tfm) { 3811 - strcpy(new_net_conf->csums_alg, p->csums_alg); 3811 + strscpy(new_net_conf->csums_alg, p->csums_alg); 3812 3812 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3813 3813 crypto_free_shash(peer_device->connection->csums_tfm); 3814 3814 peer_device->connection->csums_tfm = csums_tfm;
+2 -1
drivers/block/drbd/drbd_req.c
··· 621 621 break; 622 622 623 623 case READ_COMPLETED_WITH_ERROR: 624 - drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); 624 + drbd_set_out_of_sync(first_peer_device(device), 625 + req->i.sector, req->i.size); 625 626 drbd_report_io_error(device, req); 626 627 __drbd_chk_io_error(device, DRBD_READ_ERROR); 627 628 fallthrough;
+24 -7
drivers/block/zloop.c
··· 542 542 zloop_put_cmd(cmd); 543 543 } 544 544 545 + /* 546 + * Sync the entire FS containing the zone files instead of walking all files. 547 + */ 548 + static int zloop_flush(struct zloop_device *zlo) 549 + { 550 + struct super_block *sb = file_inode(zlo->data_dir)->i_sb; 551 + int ret; 552 + 553 + down_read(&sb->s_umount); 554 + ret = sync_filesystem(sb); 555 + up_read(&sb->s_umount); 556 + 557 + return ret; 558 + } 559 + 545 560 static void zloop_handle_cmd(struct zloop_cmd *cmd) 546 561 { 547 562 struct request *rq = blk_mq_rq_from_pdu(cmd); ··· 577 562 zloop_rw(cmd); 578 563 return; 579 564 case REQ_OP_FLUSH: 580 - /* 581 - * Sync the entire FS containing the zone files instead of 582 - * walking all files 583 - */ 584 - cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb); 565 + cmd->ret = zloop_flush(zlo); 585 566 break; 586 567 case REQ_OP_ZONE_RESET: 587 568 cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq)); ··· 992 981 struct queue_limits lim = { 993 982 .max_hw_sectors = SZ_1M >> SECTOR_SHIFT, 994 983 .chunk_sectors = opts->zone_size, 995 - .features = BLK_FEAT_ZONED, 984 + .features = BLK_FEAT_ZONED | BLK_FEAT_WRITE_CACHE, 985 + 996 986 }; 997 987 unsigned int nr_zones, i, j; 998 988 struct zloop_device *zlo; ··· 1174 1162 int ret; 1175 1163 1176 1164 if (!(opts->mask & ZLOOP_OPT_ID)) { 1177 - pr_err("No ID specified\n"); 1165 + pr_err("No ID specified for remove\n"); 1166 + return -EINVAL; 1167 + } 1168 + 1169 + if (opts->mask & ~ZLOOP_OPT_ID) { 1170 + pr_err("Invalid option specified for remove\n"); 1178 1171 return -EINVAL; 1179 1172 } 1180 1173
+14 -10
drivers/bluetooth/hci_qca.c
··· 2046 2046 } 2047 2047 2048 2048 out: 2049 - if (ret && retries < MAX_INIT_RETRIES) { 2050 - bt_dev_warn(hdev, "Retry BT power ON:%d", retries); 2049 + if (ret) { 2051 2050 qca_power_shutdown(hu); 2052 - if (hu->serdev) { 2053 - serdev_device_close(hu->serdev); 2054 - ret = serdev_device_open(hu->serdev); 2055 - if (ret) { 2056 - bt_dev_err(hdev, "failed to open port"); 2057 - return ret; 2051 + 2052 + if (retries < MAX_INIT_RETRIES) { 2053 + bt_dev_warn(hdev, "Retry BT power ON:%d", retries); 2054 + if (hu->serdev) { 2055 + serdev_device_close(hu->serdev); 2056 + ret = serdev_device_open(hu->serdev); 2057 + if (ret) { 2058 + bt_dev_err(hdev, "failed to open port"); 2059 + return ret; 2060 + } 2058 2061 } 2062 + retries++; 2063 + goto retry; 2059 2064 } 2060 - retries++; 2061 - goto retry; 2065 + return ret; 2062 2066 } 2063 2067 2064 2068 /* Setup bdaddr */
+5
drivers/char/ipmi/ipmi_ipmb.c
··· 202 202 break; 203 203 204 204 case I2C_SLAVE_READ_REQUESTED: 205 + *val = 0xff; 206 + ipmi_ipmb_check_msg_done(iidev); 207 + break; 208 + 205 209 case I2C_SLAVE_STOP: 206 210 ipmi_ipmb_check_msg_done(iidev); 207 211 break; 208 212 209 213 case I2C_SLAVE_READ_PROCESSED: 214 + *val = 0xff; 210 215 break; 211 216 } 212 217
+96 -47
drivers/char/ipmi/ipmi_msghandler.c
··· 602 602 static int __scan_channels(struct ipmi_smi *intf, 603 603 struct ipmi_device_id *id, bool rescan); 604 604 605 + static void ipmi_lock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion, 606 + unsigned long *flags) 607 + { 608 + if (run_to_completion) 609 + return; 610 + spin_lock_irqsave(&intf->xmit_msgs_lock, *flags); 611 + } 612 + 613 + static void ipmi_unlock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion, 614 + unsigned long *flags) 615 + { 616 + if (run_to_completion) 617 + return; 618 + spin_unlock_irqrestore(&intf->xmit_msgs_lock, *flags); 619 + } 620 + 605 621 static void free_ipmi_user(struct kref *ref) 606 622 { 607 623 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); ··· 1885 1869 return smi_msg; 1886 1870 } 1887 1871 1888 - static void smi_send(struct ipmi_smi *intf, 1872 + static int smi_send(struct ipmi_smi *intf, 1889 1873 const struct ipmi_smi_handlers *handlers, 1890 1874 struct ipmi_smi_msg *smi_msg, int priority) 1891 1875 { 1892 1876 int run_to_completion = READ_ONCE(intf->run_to_completion); 1893 1877 unsigned long flags = 0; 1878 + int rv = 0; 1894 1879 1895 - if (!run_to_completion) 1896 - spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1880 + ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 1897 1881 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1898 - if (!run_to_completion) 1899 - spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1882 + ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 1900 1883 1901 - if (smi_msg) 1902 - handlers->sender(intf->send_info, smi_msg); 1884 + if (smi_msg) { 1885 + rv = handlers->sender(intf->send_info, smi_msg); 1886 + if (rv) { 1887 + ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 1888 + intf->curr_msg = NULL; 1889 + ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 1890 + /* 1891 + * Something may have been added to the transmit 1892 + * queue, so schedule a check for that. 1893 + */ 1894 + queue_work(system_wq, &intf->smi_work); 1895 + } 1896 + } 1897 + return rv; 1903 1898 } 1904 1899 1905 1900 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) ··· 2323 2296 struct ipmi_recv_msg *recv_msg; 2324 2297 int run_to_completion = READ_ONCE(intf->run_to_completion); 2325 2298 int rv = 0; 2299 + bool in_seq_table = false; 2326 2300 2327 2301 if (supplied_recv) { 2328 2302 recv_msg = supplied_recv; ··· 2377 2349 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2378 2350 source_address, source_lun, 2379 2351 retries, retry_time_ms); 2352 + in_seq_table = true; 2380 2353 } else if (is_ipmb_direct_addr(addr)) { 2381 2354 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2382 2355 recv_msg, source_lun); 2383 2356 } else if (is_lan_addr(addr)) { 2384 2357 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2385 2358 source_lun, retries, retry_time_ms); 2359 + in_seq_table = true; 2386 2360 } else { 2387 - /* Unknown address type. */ 2361 + /* Unknown address type. */ 2388 2362 ipmi_inc_stat(intf, sent_invalid_commands); 2389 2363 rv = -EINVAL; 2390 2364 } 2391 2365 2392 - if (rv) { 2366 + if (!rv) { 2367 + dev_dbg(intf->si_dev, "Send: %*ph\n", 2368 + smi_msg->data_size, smi_msg->data); 2369 + 2370 + rv = smi_send(intf, intf->handlers, smi_msg, priority); 2371 + if (rv != IPMI_CC_NO_ERROR) 2372 + /* smi_send() returns an IPMI err, return a Linux one. */ 2373 + rv = -EIO; 2374 + if (rv && in_seq_table) { 2375 + /* 2376 + * If it's in the sequence table, it will be 2377 + * retried later, so ignore errors. 2378 + */ 2379 + rv = 0; 2380 + /* But we need to fix the timeout. */ 2381 + intf_start_seq_timer(intf, smi_msg->msgid); 2382 + ipmi_free_smi_msg(smi_msg); 2383 + smi_msg = NULL; 2384 + } 2385 + } 2393 2386 out_err: 2387 + if (!run_to_completion) 2388 + mutex_unlock(&intf->users_mutex); 2389 + 2390 + if (rv) { 2394 2391 if (!supplied_smi) 2395 2392 ipmi_free_smi_msg(smi_msg); 2396 2393 if (!supplied_recv) 2397 2394 ipmi_free_recv_msg(recv_msg); 2398 - } else { 2399 - dev_dbg(intf->si_dev, "Send: %*ph\n", 2400 - smi_msg->data_size, smi_msg->data); 2401 - 2402 - smi_send(intf, intf->handlers, smi_msg, priority); 2403 2395 } 2404 - if (!run_to_completion) 2405 - mutex_unlock(&intf->users_mutex); 2406 - 2407 2396 return rv; 2408 2397 } 2409 2398 ··· 3994 3949 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3995 3950 msg->data_size, msg->data); 3996 3951 3997 - smi_send(intf, intf->handlers, msg, 0); 3998 - /* 3999 - * We used the message, so return the value that 4000 - * causes it to not be freed or queued. 4001 - */ 4002 - rv = -1; 3952 + if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 3953 + /* 3954 + * We used the message, so return the value that 3955 + * causes it to not be freed or queued. 3956 + */ 3957 + rv = -1; 4003 3958 } else if (!IS_ERR(recv_msg)) { 4004 3959 /* Extract the source address from the data. */ 4005 3960 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; ··· 4073 4028 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 4074 4029 msg->data_size = 5; 4075 4030 4076 - smi_send(intf, intf->handlers, msg, 0); 4077 - /* 4078 - * We used the message, so return the value that 4079 - * causes it to not be freed or queued. 4080 - */ 4081 - rv = -1; 4031 + if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 4032 + /* 4033 + * We used the message, so return the value that 4034 + * causes it to not be freed or queued. 4035 + */ 4036 + rv = -1; 4082 4037 } else if (!IS_ERR(recv_msg)) { 4083 4038 /* Extract the source address from the data. */ 4084 4039 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; ··· 4218 4173 struct ipmi_smi_msg *msg) 4219 4174 { 4220 4175 struct cmd_rcvr *rcvr; 4221 - int rv = 0; 4176 + int rv = 0; /* Free by default */ 4222 4177 unsigned char netfn; 4223 4178 unsigned char cmd; 4224 4179 unsigned char chan; ··· 4271 4226 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 4272 4227 msg->data_size, msg->data); 4273 4228 4274 - smi_send(intf, intf->handlers, msg, 0); 4275 - /* 4276 - * We used the message, so return the value that 4277 - * causes it to not be freed or queued. 4278 - */ 4279 - rv = -1; 4229 + if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR) 4230 + /* 4231 + * We used the message, so return the value that 4232 + * causes it to not be freed or queued. 4233 + */ 4234 + rv = -1; 4280 4235 } else if (!IS_ERR(recv_msg)) { 4281 4236 /* Extract the source address from the data. */ 4282 4237 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; ··· 4869 4824 * message delivery. 4870 4825 */ 4871 4826 restart: 4872 - if (!run_to_completion) 4873 - spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4827 + ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4874 4828 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4875 4829 struct list_head *entry = NULL; 4876 4830 ··· 4885 4841 intf->curr_msg = newmsg; 4886 4842 } 4887 4843 } 4888 - if (!run_to_completion) 4889 - spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4844 + ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4890 4845 4891 4846 if (newmsg) { 4892 4847 cc = intf->handlers->sender(intf->send_info, newmsg); ··· 4893 4850 if (newmsg->recv_msg) 4894 4851 deliver_err_response(intf, 4895 4852 newmsg->recv_msg, cc); 4896 - else 4897 - ipmi_free_smi_msg(newmsg); 4853 + ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4854 + intf->curr_msg = NULL; 4855 + ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4856 + ipmi_free_smi_msg(newmsg); 4857 + newmsg = NULL; 4898 4858 goto restart; 4899 4859 } 4900 4860 } ··· 4965 4919 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4966 4920 flags); 4967 4921 4968 - if (!run_to_completion) 4969 - spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4922 + ipmi_lock_xmit_msgs(intf, run_to_completion, &flags); 4970 4923 /* 4971 4924 * We can get an asynchronous event or receive message in addition 4972 4925 * to commands we send. 4973 4926 */ 4974 4927 if (msg == intf->curr_msg) 4975 4928 intf->curr_msg = NULL; 4976 - if (!run_to_completion) 4977 - spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4929 + ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags); 4978 4930 4979 4931 if (run_to_completion) 4980 4932 smi_work(&intf->smi_work); ··· 5085 5041 ipmi_inc_stat(intf, 5086 5042 retransmitted_ipmb_commands); 5087 5043 5088 - smi_send(intf, intf->handlers, smi_msg, 0); 5044 + /* If this fails we'll retry later or timeout. */ 5045 + if (smi_send(intf, intf->handlers, smi_msg, 0) != IPMI_CC_NO_ERROR) { 5046 + /* But fix the timeout. */ 5047 + intf_start_seq_timer(intf, smi_msg->msgid); 5048 + ipmi_free_smi_msg(smi_msg); 5049 + } 5089 5050 } else 5090 5051 ipmi_free_smi_msg(smi_msg); 5091 5052
+24 -13
drivers/char/ipmi/ipmi_si_intf.c
··· 809 809 */ 810 810 return_hosed_msg(smi_info, IPMI_BUS_ERR); 811 811 } 812 + if (smi_info->waiting_msg != NULL) { 813 + /* Also handle if there was a message waiting. */ 814 + smi_info->curr_msg = smi_info->waiting_msg; 815 + smi_info->waiting_msg = NULL; 816 + return_hosed_msg(smi_info, IPMI_BUS_ERR); 817 + } 812 818 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED); 813 819 goto out; 814 820 } ··· 924 918 { 925 919 struct smi_info *smi_info = send_info; 926 920 unsigned long flags; 921 + int rv = IPMI_CC_NO_ERROR; 927 922 928 923 debug_timestamp(smi_info, "Enqueue"); 929 924 925 + /* 926 + * Check here for run to completion mode. A check under lock is 927 + * later. 928 + */ 930 929 if (smi_info->si_state == SI_HOSED) 931 930 return IPMI_BUS_ERR; 932 931 ··· 945 934 } 946 935 947 936 spin_lock_irqsave(&smi_info->si_lock, flags); 948 - /* 949 - * The following two lines don't need to be under the lock for 950 - * the lock's sake, but they do need SMP memory barriers to 951 - * avoid getting things out of order. We are already claiming 952 - * the lock, anyway, so just do it under the lock to avoid the 953 - * ordering problem. 954 - */ 955 - BUG_ON(smi_info->waiting_msg); 956 - smi_info->waiting_msg = msg; 957 - check_start_timer_thread(smi_info); 937 + if (smi_info->si_state == SI_HOSED) { 938 + rv = IPMI_BUS_ERR; 939 + } else { 940 + BUG_ON(smi_info->waiting_msg); 941 + smi_info->waiting_msg = msg; 942 + check_start_timer_thread(smi_info); 943 + } 958 944 spin_unlock_irqrestore(&smi_info->si_lock, flags); 959 - return IPMI_CC_NO_ERROR; 945 + return rv; 960 946 } 961 947 962 948 static void set_run_to_completion(void *send_info, bool i_run_to_completion) ··· 1121 1113 * SI_USEC_PER_JIFFY); 1122 1114 smi_result = smi_event_handler(smi_info, time_diff); 1123 1115 1124 - if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 1116 + if (smi_info->si_state == SI_HOSED) { 1117 + timeout = jiffies + SI_TIMEOUT_HOSED; 1118 + } else if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 1125 1119 /* Running with interrupts, only do long timeouts. */ 1126 1120 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1127 1121 smi_inc_stat(smi_info, long_timeouts); ··· 2236 2226 unsigned long jiffies_now; 2237 2227 long time_diff; 2238 2228 2239 - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { 2229 + while (smi_info->si_state != SI_HOSED && 2230 + (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL))) { 2240 2231 jiffies_now = jiffies; 2241 2232 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 2242 2233 * SI_USEC_PER_JIFFY);
+1 -1
drivers/char/ipmi/ipmi_si_ls2k.c
··· 168 168 ipmi_si_remove_by_dev(&pdev->dev); 169 169 } 170 170 171 - struct platform_driver ipmi_ls2k_platform_driver = { 171 + static struct platform_driver ipmi_ls2k_platform_driver = { 172 172 .driver = { 173 173 .name = "ls2k-ipmi-si", 174 174 },
+1 -11
drivers/char/random.c
··· 96 96 /* Control how we warn userspace. */ 97 97 static struct ratelimit_state urandom_warning = 98 98 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); 99 - static int ratelimit_disable __read_mostly = 100 - IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); 99 + static int ratelimit_disable __read_mostly = 0; 101 100 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); 102 101 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); 103 102 ··· 166 167 spin_unlock_irqrestore(&random_ready_notifier.lock, flags); 167 168 return ret; 168 169 } 169 - 170 - #define warn_unseeded_randomness() \ 171 - if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ 172 - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ 173 - __func__, (void *)_RET_IP_, crng_init) 174 - 175 170 176 171 /********************************************************************* 177 172 * ··· 427 434 */ 428 435 void get_random_bytes(void *buf, size_t len) 429 436 { 430 - warn_unseeded_randomness(); 431 437 _get_random_bytes(buf, len); 432 438 } 433 439 EXPORT_SYMBOL(get_random_bytes); ··· 514 522 unsigned long flags; \ 515 523 struct batch_ ##type *batch; \ 516 524 unsigned long next_gen; \ 517 - \ 518 - warn_unseeded_randomness(); \ 519 525 \ 520 526 if (!crng_ready()) { \ 521 527 _get_random_bytes(&ret, sizeof(ret)); \
+23 -1
drivers/clk/imx/clk-imx8qxp.c
··· 346 346 }, 347 347 .probe = imx8qxp_clk_probe, 348 348 }; 349 - module_platform_driver(imx8qxp_clk_driver); 349 + 350 + static int __init imx8qxp_clk_init(void) 351 + { 352 + int ret; 353 + 354 + ret = platform_driver_register(&imx8qxp_clk_driver); 355 + if (ret) 356 + return ret; 357 + 358 + ret = imx_clk_scu_module_init(); 359 + if (ret) 360 + platform_driver_unregister(&imx8qxp_clk_driver); 361 + 362 + return ret; 363 + } 364 + module_init(imx8qxp_clk_init); 365 + 366 + static void __exit imx8qxp_clk_exit(void) 367 + { 368 + imx_clk_scu_module_exit(); 369 + platform_driver_unregister(&imx8qxp_clk_driver); 370 + } 371 + module_exit(imx8qxp_clk_exit); 350 372 351 373 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>"); 352 374 MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
+11 -1
drivers/clk/imx/clk-scu.c
··· 191 191 return p != NULL; 192 192 } 193 193 194 + int __init imx_clk_scu_module_init(void) 195 + { 196 + return platform_driver_register(&imx_clk_scu_driver); 197 + } 198 + 199 + void __exit imx_clk_scu_module_exit(void) 200 + { 201 + return platform_driver_unregister(&imx_clk_scu_driver); 202 + } 203 + 194 204 int imx_clk_scu_init(struct device_node *np, 195 205 const struct imx_clk_scu_rsrc_table *data) 196 206 { ··· 225 215 rsrc_table = data; 226 216 } 227 217 228 - return platform_driver_register(&imx_clk_scu_driver); 218 + return 0; 229 219 } 230 220 231 221 /*
+2
drivers/clk/imx/clk-scu.h
··· 25 25 extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qxp; 26 26 extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qm; 27 27 28 + int __init imx_clk_scu_module_init(void); 29 + void __exit imx_clk_scu_module_exit(void); 28 30 int imx_clk_scu_init(struct device_node *np, 29 31 const struct imx_clk_scu_rsrc_table *data); 30 32 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+8 -6
drivers/cpufreq/intel_pstate.c
··· 1476 1476 refresh_frequency_limits(policy); 1477 1477 } 1478 1478 1479 - static bool intel_pstate_update_max_freq(struct cpudata *cpudata) 1479 + static bool intel_pstate_update_max_freq(int cpu) 1480 1480 { 1481 - struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); 1481 + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); 1482 1482 if (!policy) 1483 1483 return false; 1484 1484 1485 - __intel_pstate_update_max_freq(policy, cpudata); 1485 + __intel_pstate_update_max_freq(policy, all_cpu_data[cpu]); 1486 1486 1487 1487 return true; 1488 1488 } ··· 1501 1501 int cpu; 1502 1502 1503 1503 for_each_possible_cpu(cpu) 1504 - intel_pstate_update_max_freq(all_cpu_data[cpu]); 1504 + intel_pstate_update_max_freq(cpu); 1505 1505 1506 1506 mutex_lock(&hybrid_capacity_lock); 1507 1507 ··· 1647 1647 static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type) 1648 1648 { 1649 1649 struct cpudata *cpudata = all_cpu_data[cpu]; 1650 - unsigned int freq = cpudata->pstate.turbo_freq; 1651 1650 struct freq_qos_request *req; 1651 + unsigned int freq; 1652 1652 1653 1653 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); 1654 1654 if (!policy) ··· 1660 1660 1661 1661 if (hwp_active) 1662 1662 intel_pstate_get_hwp_cap(cpudata); 1663 + 1664 + freq = cpudata->pstate.turbo_freq; 1663 1665 1664 1666 if (type == FREQ_QOS_MIN) { 1665 1667 freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100); ··· 1910 1908 struct cpudata *cpudata = 1911 1909 container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); 1912 1910 1913 - if (intel_pstate_update_max_freq(cpudata)) { 1911 + if (intel_pstate_update_max_freq(cpudata->cpu)) { 1914 1912 /* 1915 1913 * The driver will not be unregistered while this function is 1916 1914 * running, so update the capacity without acquiring the driver
+18
drivers/cxl/core/core.h
··· 152 152 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, 153 153 struct access_coordinate *c); 154 154 155 + static inline struct device *port_to_host(struct cxl_port *port) 156 + { 157 + struct cxl_port *parent = is_cxl_root(port) ? NULL : 158 + to_cxl_port(port->dev.parent); 159 + 160 + /* 161 + * The host of CXL root port and the first level of ports is 162 + * the platform firmware device, the host of all other ports 163 + * is their parent port. 164 + */ 165 + if (!parent) 166 + return port->uport_dev; 167 + else if (is_cxl_root(parent)) 168 + return parent->uport_dev; 169 + else 170 + return &parent->dev; 171 + } 172 + 155 173 static inline struct device *dport_to_host(struct cxl_dport *dport) 156 174 { 157 175 struct cxl_port *port = dport->port;
+1 -1
drivers/cxl/core/hdm.c
··· 904 904 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 905 905 return; 906 906 907 - if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags)) 907 + if (cxld->flags & CXL_DECODER_F_LOCK) 908 908 return; 909 909 910 910 if (port->commit_end == id)
+9 -2
drivers/cxl/core/mbox.c
··· 311 311 * cxl_payload_from_user_allowed() - Check contents of in_payload. 312 312 * @opcode: The mailbox command opcode. 313 313 * @payload_in: Pointer to the input payload passed in from user space. 314 + * @in_size: Size of @payload_in in bytes. 314 315 * 315 316 * Return: 316 317 * * true - payload_in passes check for @opcode. ··· 326 325 * 327 326 * The specific checks are determined by the opcode. 328 327 */ 329 - static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) 328 + static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in, 329 + size_t in_size) 330 330 { 331 331 switch (opcode) { 332 332 case CXL_MBOX_OP_SET_PARTITION_INFO: { 333 333 struct cxl_mbox_set_partition_info *pi = payload_in; 334 334 335 + if (in_size < sizeof(*pi)) 336 + return false; 335 337 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 336 338 return false; 337 339 break; ··· 342 338 case CXL_MBOX_OP_CLEAR_LOG: { 343 339 const uuid_t *uuid = (uuid_t *)payload_in; 344 340 341 + if (in_size < sizeof(uuid_t)) 342 + return false; 345 343 /* 346 344 * Restrict the ‘Clear log’ action to only apply to 347 345 * Vendor debug logs. ··· 371 365 if (IS_ERR(mbox_cmd->payload_in)) 372 366 return PTR_ERR(mbox_cmd->payload_in); 373 367 374 - if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) { 368 + if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in, 369 + in_size)) { 375 370 dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n", 376 371 cxl_mem_opcode_to_name(opcode)); 377 372 kvfree(mbox_cmd->payload_in);
+9 -4
drivers/cxl/core/memdev.c
··· 1089 1089 DEFINE_FREE(put_cxlmd, struct cxl_memdev *, 1090 1090 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) 1091 1091 1092 - static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1092 + static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd) 1093 1093 { 1094 - int rc; 1095 - 1096 1094 /* 1097 1095 * If @attach is provided fail if the driver is not attached upon 1098 1096 * return. Note that failure here could be the result of a race to ··· 1098 1100 * succeeded and then cxl_mem unbound before the lock is acquired. 1099 1101 */ 1100 1102 guard(device)(&cxlmd->dev); 1101 - if (cxlmd->attach && !cxlmd->dev.driver) { 1103 + return (cxlmd->attach && !cxlmd->dev.driver); 1104 + } 1105 + 1106 + static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1107 + { 1108 + int rc; 1109 + 1110 + if (cxl_memdev_attach_failed(cxlmd)) { 1102 1111 cxl_memdev_unregister(cxlmd); 1103 1112 return ERR_PTR(-ENXIO); 1104 1113 }
+32 -10
drivers/cxl/core/pmem.c
··· 115 115 device_unregister(&cxl_nvb->dev); 116 116 } 117 117 118 - /** 119 - * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology 120 - * @host: platform firmware root device 121 - * @port: CXL port at the root of a CXL topology 122 - * 123 - * Return: bridge device that can host cxl_nvdimm objects 124 - */ 125 - struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 126 - struct cxl_port *port) 118 + static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb) 119 + { 120 + struct device *dev = &cxl_nvb->dev; 121 + 122 + guard(device)(dev); 123 + /* If the device has no driver, then it failed to attach. */ 124 + return dev->driver == NULL; 125 + } 126 + 127 + struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host, 128 + struct cxl_port *port) 127 129 { 128 130 struct cxl_nvdimm_bridge *cxl_nvb; 129 131 struct device *dev; ··· 147 145 if (rc) 148 146 goto err; 149 147 148 + if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) { 149 + unregister_nvb(cxl_nvb); 150 + return ERR_PTR(-ENODEV); 151 + } 152 + 150 153 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); 151 154 if (rc) 152 155 return ERR_PTR(rc); ··· 162 155 put_device(dev); 163 156 return ERR_PTR(rc); 164 157 } 165 - EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL"); 158 + EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem"); 166 159 167 160 static void cxl_nvdimm_release(struct device *dev) 168 161 { ··· 261 254 cxl_nvb = cxl_find_nvdimm_bridge(port); 262 255 if (!cxl_nvb) 263 256 return -ENODEV; 257 + 258 + /* 259 + * Take the uport_dev lock to guard against race of nvdimm_bus object. 260 + * cxl_acpi_probe() registers the nvdimm_bus and is done under the 261 + * root port uport_dev lock. 262 + * 263 + * Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a 264 + * consistent state. And the driver registers nvdimm_bus. 265 + */ 266 + guard(device)(cxl_nvb->port->uport_dev); 267 + guard(device)(&cxl_nvb->dev); 268 + if (!cxl_nvb->nvdimm_bus) { 269 + rc = -ENODEV; 270 + goto err_alloc; 271 + } 264 272 265 273 cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd); 266 274 if (IS_ERR(cxl_nvd)) {
+18 -34
drivers/cxl/core/port.c
··· 615 615 static void unregister_port(void *_port) 616 616 { 617 617 struct cxl_port *port = _port; 618 - struct cxl_port *parent = parent_port_of(port); 619 - struct device *lock_dev; 620 618 621 - /* 622 - * CXL root port's and the first level of ports are unregistered 623 - * under the platform firmware device lock, all other ports are 624 - * unregistered while holding their parent port lock. 625 - */ 626 - if (!parent) 627 - lock_dev = port->uport_dev; 628 - else if (is_cxl_root(parent)) 629 - lock_dev = parent->uport_dev; 630 - else 631 - lock_dev = &parent->dev; 632 - 633 - device_lock_assert(lock_dev); 619 + device_lock_assert(port_to_host(port)); 634 620 port->dead = true; 635 621 device_unregister(&port->dev); 636 622 } ··· 1413 1427 return NULL; 1414 1428 } 1415 1429 1416 - static struct device *endpoint_host(struct cxl_port *endpoint) 1417 - { 1418 - struct cxl_port *port = to_cxl_port(endpoint->dev.parent); 1419 - 1420 - if (is_cxl_root(port)) 1421 - return port->uport_dev; 1422 - return &port->dev; 1423 - } 1424 - 1425 1430 static void delete_endpoint(void *data) 1426 1431 { 1427 1432 struct cxl_memdev *cxlmd = data; 1428 1433 struct cxl_port *endpoint = cxlmd->endpoint; 1429 - struct device *host = endpoint_host(endpoint); 1434 + struct device *host = port_to_host(endpoint); 1430 1435 1431 1436 scoped_guard(device, host) { 1432 1437 if (host->driver && !endpoint->dead) { ··· 1433 1456 1434 1457 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1435 1458 { 1436 - struct device *host = endpoint_host(endpoint); 1459 + struct device *host = port_to_host(endpoint); 1437 1460 struct device *dev = &cxlmd->dev; 1438 1461 1439 1462 get_device(host); ··· 1767 1790 { 1768 1791 struct cxl_dport *dport; 1769 1792 1770 - device_lock_assert(&port->dev); 1793 + /* 1794 + * The port is already visible in CXL hierarchy, but it may still 1795 + * be in the process of binding to the CXL port driver at this point. 1796 + * 1797 + * port creation and driver binding are protected by the port's host 1798 + * lock, so acquire the host lock here to ensure the port has completed 1799 + * driver binding before proceeding with dport addition. 1800 + */ 1801 + guard(device)(port_to_host(port)); 1802 + guard(device)(&port->dev); 1771 1803 dport = cxl_find_dport_by_dev(port, dport_dev); 1772 1804 if (!dport) { 1773 1805 dport = probe_dport(port, dport_dev); ··· 1843 1857 * RP port enumerated by cxl_acpi without dport will 1844 1858 * have the dport added here. 1845 1859 */ 1846 - scoped_guard(device, &port->dev) { 1847 - dport = find_or_add_dport(port, dport_dev); 1848 - if (IS_ERR(dport)) { 1849 - if (PTR_ERR(dport) == -EAGAIN) 1850 - goto retry; 1851 - return PTR_ERR(dport); 1852 - } 1860 + dport = find_or_add_dport(port, dport_dev); 1861 + if (IS_ERR(dport)) { 1862 + if (PTR_ERR(dport) == -EAGAIN) 1863 + goto retry; 1864 + return PTR_ERR(dport); 1853 1865 } 1854 1866 1855 1867 rc = cxl_add_ep(dport, &cxlmd->dev);
+2 -2
drivers/cxl/core/region.c
··· 1100 1100 static void cxl_region_setup_flags(struct cxl_region *cxlr, 1101 1101 struct cxl_decoder *cxld) 1102 1102 { 1103 - if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags)) { 1103 + if (cxld->flags & CXL_DECODER_F_LOCK) { 1104 1104 set_bit(CXL_REGION_F_LOCK, &cxlr->flags); 1105 1105 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 1106 1106 } 1107 1107 1108 - if (test_bit(CXL_DECODER_F_NORMALIZED_ADDRESSING, &cxld->flags)) 1108 + if (cxld->flags & CXL_DECODER_F_NORMALIZED_ADDRESSING) 1109 1109 set_bit(CXL_REGION_F_NORMALIZED_ADDRESSING, &cxlr->flags); 1110 1110 } 1111 1111
+7
drivers/cxl/cxl.h
··· 574 574 575 575 #define CXL_DEV_ID_LEN 19 576 576 577 + enum { 578 + CXL_NVD_F_INVALIDATED = 0, 579 + }; 580 + 577 581 struct cxl_nvdimm { 578 582 struct device dev; 579 583 struct cxl_memdev *cxlmd; 580 584 u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ 581 585 u64 dirty_shutdowns; 586 + unsigned long flags; 582 587 }; 583 588 584 589 struct cxl_pmem_region_mapping { ··· 925 920 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); 926 921 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 927 922 struct cxl_port *port); 923 + struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host, 924 + struct cxl_port *port); 928 925 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); 929 926 bool is_cxl_nvdimm(struct device *dev); 930 927 int devm_cxl_add_nvdimm(struct device *host, struct cxl_port *port,
+21 -1
drivers/cxl/pmem.c
··· 13 13 14 14 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 15 15 16 + /** 17 + * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology 18 + * @host: platform firmware root device 19 + * @port: CXL port at the root of a CXL topology 20 + * 21 + * Return: bridge device that can host cxl_nvdimm objects 22 + */ 23 + struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 24 + struct cxl_port *port) 25 + { 26 + return __devm_cxl_add_nvdimm_bridge(host, port); 27 + } 28 + EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL"); 29 + 16 30 static void clear_exclusive(void *mds) 17 31 { 18 32 clear_exclusive_cxl_commands(mds, exclusive_cmds); ··· 142 128 unsigned long flags = 0, cmd_mask = 0; 143 129 struct nvdimm *nvdimm; 144 130 int rc; 131 + 132 + if (test_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags)) 133 + return -EBUSY; 145 134 146 135 set_exclusive_cxl_commands(mds, exclusive_cmds); 147 136 rc = devm_add_action_or_reset(dev, clear_exclusive, mds); ··· 326 309 scoped_guard(device, dev) { 327 310 if (dev->driver) { 328 311 cxl_nvd = to_cxl_nvdimm(dev); 329 - if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) 312 + if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) { 330 313 release = true; 314 + set_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags); 315 + } 331 316 } 332 317 } 333 318 if (release) ··· 372 353 .probe = cxl_nvdimm_bridge_probe, 373 354 .id = CXL_DEVICE_NVDIMM_BRIDGE, 374 355 .drv = { 356 + .probe_type = PROBE_FORCE_SYNCHRONOUS, 375 357 .suppress_bind_attrs = true, 376 358 }, 377 359 };
+2 -5
drivers/dpll/zl3073x/core.c
··· 981 981 } 982 982 983 983 /* Add devres action to release DPLL related resources */ 984 - rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev); 985 - if (rc) 986 - goto error; 987 - 988 - return 0; 984 + return devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev); 989 985 990 986 error: 991 987 zl3073x_dev_dpll_fini(zldev); ··· 1022 1026 "Unknown or non-match chip ID: 0x%0x\n", 1023 1027 id); 1024 1028 } 1029 + zldev->chip_id = id; 1025 1030 1026 1031 /* Read revision, firmware version and custom config version */ 1027 1032 rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+28
drivers/dpll/zl3073x/core.h
··· 35 35 * @dev: pointer to device 36 36 * @regmap: regmap to access device registers 37 37 * @multiop_lock: to serialize multiple register operations 38 + * @chip_id: chip ID read from hardware 38 39 * @ref: array of input references' invariants 39 40 * @out: array of outs' invariants 40 41 * @synth: array of synths' invariants ··· 49 48 struct device *dev; 50 49 struct regmap *regmap; 51 50 struct mutex multiop_lock; 51 + u16 chip_id; 52 52 53 53 /* Invariants */ 54 54 struct zl3073x_ref ref[ZL3073X_NUM_REFS]; ··· 145 143 *****************/ 146 144 147 145 int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel); 146 + 147 + /** 148 + * zl3073x_dev_is_ref_phase_comp_32bit - check ref phase comp register size 149 + * @zldev: pointer to zl3073x device 150 + * 151 + * Some chip IDs have a 32-bit wide ref_phase_offset_comp register instead 152 + * of the default 48-bit. 153 + * 154 + * Return: true if the register is 32-bit, false if 48-bit 155 + */ 156 + static inline bool 157 + zl3073x_dev_is_ref_phase_comp_32bit(struct zl3073x_dev *zldev) 158 + { 159 + switch (zldev->chip_id) { 160 + case 0x0E30: 161 + case 0x0E93: 162 + case 0x0E94: 163 + case 0x0E95: 164 + case 0x0E96: 165 + case 0x0E97: 166 + case 0x1F60: 167 + return true; 168 + default: 169 + return false; 170 + } 171 + } 148 172 149 173 static inline bool 150 174 zl3073x_is_n_pin(u8 id)
+5 -2
drivers/dpll/zl3073x/dpll.c
··· 475 475 ref_id = zl3073x_input_pin_ref_get(pin->id); 476 476 ref = zl3073x_ref_state_get(zldev, ref_id); 477 477 478 - /* Perform sign extension for 48bit signed value */ 479 - phase_comp = sign_extend64(ref->phase_comp, 47); 478 + /* Perform sign extension based on register width */ 479 + if (zl3073x_dev_is_ref_phase_comp_32bit(zldev)) 480 + phase_comp = sign_extend64(ref->phase_comp, 31); 481 + else 482 + phase_comp = sign_extend64(ref->phase_comp, 47); 480 483 481 484 /* Reverse two's complement negation applied during set and convert 482 485 * to 32bit signed int
+20 -5
drivers/dpll/zl3073x/ref.c
··· 121 121 return rc; 122 122 123 123 /* Read phase compensation register */ 124 - rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, 125 - &ref->phase_comp); 124 + if (zl3073x_dev_is_ref_phase_comp_32bit(zldev)) { 125 + u32 val; 126 + 127 + rc = zl3073x_read_u32(zldev, ZL_REG_REF_PHASE_OFFSET_COMP_32, 128 + &val); 129 + ref->phase_comp = val; 130 + } else { 131 + rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, 132 + &ref->phase_comp); 133 + } 126 134 if (rc) 127 135 return rc; 128 136 ··· 187 179 if (!rc && dref->sync_ctrl != ref->sync_ctrl) 188 180 rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL, 189 181 ref->sync_ctrl); 190 - if (!rc && dref->phase_comp != ref->phase_comp) 191 - rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, 192 - ref->phase_comp); 182 + if (!rc && dref->phase_comp != ref->phase_comp) { 183 + if (zl3073x_dev_is_ref_phase_comp_32bit(zldev)) 184 + rc = zl3073x_write_u32(zldev, 185 + ZL_REG_REF_PHASE_OFFSET_COMP_32, 186 + ref->phase_comp); 187 + else 188 + rc = zl3073x_write_u48(zldev, 189 + ZL_REG_REF_PHASE_OFFSET_COMP, 190 + ref->phase_comp); 191 + } 193 192 if (rc) 194 193 return rc; 195 194
+1
drivers/dpll/zl3073x/regs.h
··· 194 194 #define ZL_REF_CONFIG_DIFF_EN BIT(2) 195 195 196 196 #define ZL_REG_REF_PHASE_OFFSET_COMP ZL_REG(10, 0x28, 6) 197 + #define ZL_REG_REF_PHASE_OFFSET_COMP_32 ZL_REG(10, 0x28, 4) 197 198 198 199 #define ZL_REG_REF_SYNC_CTRL ZL_REG(10, 0x2e, 1) 199 200 #define ZL_REF_SYNC_CTRL_MODE GENMASK(2, 0)
+1 -1
drivers/firewire/ohci.c
··· 848 848 { 849 849 struct device *dev = ohci->card.device; 850 850 unsigned int i; 851 - struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; 851 + struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES] = { NULL }; 852 852 dma_addr_t dma_addrs[AR_BUFFERS]; 853 853 void *vaddr; 854 854 struct descriptor *d;
+3 -3
drivers/gpio/gpiolib-shared.c
··· 748 748 static void gpio_shared_free_exclusive(void) 749 749 { 750 750 struct gpio_shared_entry *entry, *epos; 751 + struct gpio_shared_ref *ref, *rpos; 751 752 752 753 list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) { 753 754 if (gpio_shared_entry_is_really_shared(entry)) 754 755 continue; 755 756 756 - gpio_shared_drop_ref(list_first_entry(&entry->refs, 757 - struct gpio_shared_ref, 758 - list)); 757 + list_for_each_entry_safe(ref, rpos, &entry->refs, list) 758 + gpio_shared_drop_ref(ref); 759 759 gpio_shared_drop_entry(entry); 760 760 } 761 761 }
+6 -2
drivers/gpio/gpiolib.c
··· 3267 3267 3268 3268 /* Make sure this is called after checking for gc->get(). */ 3269 3269 ret = gc->get(gc, offset); 3270 - if (ret > 1) 3271 - ret = -EBADE; 3270 + if (ret > 1) { 3271 + gpiochip_warn(gc, 3272 + "invalid return value from gc->get(): %d, consider fixing the driver\n", 3273 + ret); 3274 + ret = !!ret; 3275 + } 3272 3276 3273 3277 return ret; 3274 3278 }
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
··· 641 641 aca_bank_error_remove(aerr, bank_error); 642 642 643 643 out_unlock: 644 + mutex_unlock(&aerr->lock); 644 645 mutex_destroy(&aerr->lock); 645 646 } 646 647
+10 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 7059 7059 dev_info(adev->dev, "PCI error: slot reset callback!!\n"); 7060 7060 7061 7061 memset(&reset_context, 0, sizeof(reset_context)); 7062 + INIT_LIST_HEAD(&device_list); 7063 + hive = amdgpu_get_xgmi_hive(adev); 7064 + if (hive) { 7065 + mutex_lock(&hive->hive_lock); 7066 + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 7067 + list_add_tail(&tmp_adev->reset_list, &device_list); 7068 + } else { 7069 + list_add_tail(&adev->reset_list, &device_list); 7070 + } 7062 7071 7063 7072 if (adev->pcie_reset_ctx.swus) 7064 7073 link_dev = adev->pcie_reset_ctx.swus; ··· 7108 7099 reset_context.reset_req_dev = adev; 7109 7100 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 7110 7101 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 7111 - INIT_LIST_HEAD(&device_list); 7112 7102 7113 - hive = amdgpu_get_xgmi_hive(adev); 7114 7103 if (hive) { 7115 - mutex_lock(&hive->hive_lock); 7116 7104 reset_context.hive = hive; 7117 - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 7105 + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 7118 7106 tmp_adev->pcie_reset_ctx.in_link_reset = true; 7119 - list_add_tail(&tmp_adev->reset_list, &device_list); 7120 - } 7121 7107 } else { 7122 7108 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 7123 - list_add_tail(&adev->reset_list, &device_list); 7124 7109 } 7125 7110 7126 7111 r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
+7 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
··· 332 332 if (!context || !context->initialized) { 333 333 dev_err(adev->dev, "TA is not initialized\n"); 334 334 ret = -EINVAL; 335 - goto err_free_shared_buf; 335 + goto free_shared_buf; 336 336 } 337 337 338 338 if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) { 339 339 dev_err(adev->dev, "Unsupported function to invoke TA\n"); 340 340 ret = -EOPNOTSUPP; 341 - goto err_free_shared_buf; 341 + goto free_shared_buf; 342 342 } 343 343 344 344 context->session_id = ta_id; ··· 346 346 mutex_lock(&psp->ras_context.mutex); 347 347 ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len); 348 348 if (ret) 349 - goto err_free_shared_buf; 349 + goto unlock; 350 350 351 351 ret = psp_fn_ta_invoke(psp, cmd_id); 352 352 if (ret || context->resp_status) { ··· 354 354 ret, context->resp_status); 355 355 if (!ret) { 356 356 ret = -EINVAL; 357 - goto err_free_shared_buf; 357 + goto unlock; 358 358 } 359 359 } 360 360 361 361 if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) 362 362 ret = -EFAULT; 363 363 364 - err_free_shared_buf: 364 + unlock: 365 365 mutex_unlock(&psp->ras_context.mutex); 366 + 367 + free_shared_buf: 366 368 kfree(shared_buf); 367 369 368 370 return ret;
+18 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
··· 35 35 static const struct dma_fence_ops amdgpu_userq_fence_ops; 36 36 static struct kmem_cache *amdgpu_userq_fence_slab; 37 37 38 + #define AMDGPU_USERQ_MAX_HANDLES (1U << 16) 39 + 38 40 int amdgpu_userq_fence_slab_init(void) 39 41 { 40 42 amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence", ··· 480 478 if (!amdgpu_userq_enabled(dev)) 481 479 return -ENOTSUPP; 482 480 481 + if (args->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES || 482 + args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES || 483 + args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES) 484 + return -EINVAL; 485 + 483 486 num_syncobj_handles = args->num_syncobj_handles; 484 487 syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles), 485 488 size_mul(sizeof(u32), num_syncobj_handles)); ··· 671 664 if (!amdgpu_userq_enabled(dev)) 672 665 return -ENOTSUPP; 673 666 667 + if (wait_info->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES || 668 + wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES || 669 + wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES) 670 + return -EINVAL; 671 + 674 672 num_read_bo_handles = wait_info->num_bo_read_handles; 675 673 bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles), 676 674 size_mul(sizeof(u32), num_read_bo_handles)); ··· 845 833 846 834 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, 847 835 DMA_RESV_USAGE_READ, fence) { 848 - if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 836 + if (num_fences >= wait_info->num_fences) { 849 837 r = -EINVAL; 850 838 goto free_fences; 851 839 } ··· 862 850 863 851 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, 864 852 DMA_RESV_USAGE_WRITE, fence) { 865 - if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 853 + if (num_fences >= wait_info->num_fences) { 866 854 r = -EINVAL; 867 855 goto free_fences; 868 856 } ··· 886 874 goto free_fences; 887 875 888 876 dma_fence_unwrap_for_each(f, &iter, fence) { 889 - if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 877 + if (num_fences >= wait_info->num_fences) { 890 878 r = -EINVAL; 879 + dma_fence_put(fence); 891 880 goto free_fences; 892 881 } 893 882 ··· 911 898 if (r) 912 899 goto free_fences; 913 900 914 - if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 901 + if (num_fences >= wait_info->num_fences) { 915 902 r = -EINVAL; 903 + dma_fence_put(fence); 916 904 goto free_fences; 917 905 } 918 906
-5
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 720 720 mes_set_hw_res_pkt.enable_reg_active_poll = 1; 721 721 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; 722 722 mes_set_hw_res_pkt.oversubscription_timer = 50; 723 - if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f) 724 - mes_set_hw_res_pkt.enable_lr_compute_wa = 1; 725 - else 726 - dev_info_once(mes->adev->dev, 727 - "MES FW version must be >= 0x7f to enable LR compute workaround.\n"); 728 723 729 724 if (amdgpu_mes_log_enable) { 730 725 mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
-5
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 779 779 mes_set_hw_res_pkt.use_different_vmid_compute = 1; 780 780 mes_set_hw_res_pkt.enable_reg_active_poll = 1; 781 781 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; 782 - if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82) 783 - mes_set_hw_res_pkt.enable_lr_compute_wa = 1; 784 - else 785 - dev_info_once(adev->dev, 786 - "MES FW version must be >= 0x82 to enable LR compute workaround.\n"); 787 782 788 783 /* 789 784 * Keep oversubscribe timer for sdma . When we have unmapped doorbell
+4
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
··· 174 174 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 175 175 fw_shared->sq.is_enabled = 1; 176 176 177 + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); 178 + fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? 179 + AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; 180 + 177 181 if (amdgpu_vcnfw_log) 178 182 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); 179 183
+2 -2
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 170 170 if (sink == NULL) 171 171 goto fail; 172 172 173 - stream = kzalloc_obj(struct dc_stream_state); 173 + stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC); 174 174 if (stream == NULL) 175 175 goto fail; 176 176 177 - stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_KERNEL); 177 + stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_ATOMIC); 178 178 if (stream->update_scratch == NULL) 179 179 goto fail; 180 180
+14 -9
drivers/gpu/drm/bridge/samsung-dsim.c
··· 1881 1881 return 0; 1882 1882 } 1883 1883 1884 + static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) 1885 + { 1886 + if (dsi->te_gpio) { 1887 + free_irq(gpiod_to_irq(dsi->te_gpio), dsi); 1888 + gpiod_put(dsi->te_gpio); 1889 + } 1890 + } 1891 + 1884 1892 static int samsung_dsim_host_attach(struct mipi_dsi_host *host, 1885 1893 struct mipi_dsi_device *device) 1886 1894 { ··· 1969 1961 if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) { 1970 1962 ret = samsung_dsim_register_te_irq(dsi, &device->dev); 1971 1963 if (ret) 1972 - return ret; 1964 + goto err_remove_bridge; 1973 1965 } 1974 1966 1975 1967 // The next bridge can be used by host_ops->attach ··· 1990 1982 err_release_next_bridge: 1991 1983 drm_bridge_put(dsi->bridge.next_bridge); 1992 1984 dsi->bridge.next_bridge = NULL; 1993 - return ret; 1994 - } 1995 1985 1996 - static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) 1997 - { 1998 - if (dsi->te_gpio) { 1999 - free_irq(gpiod_to_irq(dsi->te_gpio), dsi); 2000 - gpiod_put(dsi->te_gpio); 2001 - } 1986 + if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) 1987 + samsung_dsim_unregister_te_irq(dsi); 1988 + err_remove_bridge: 1989 + drm_bridge_remove(&dsi->bridge); 1990 + return ret; 2002 1991 } 2003 1992 2004 1993 static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
+3 -1
drivers/gpu/drm/bridge/synopsys/dw-dp.c
··· 2049 2049 bridge->type = DRM_MODE_CONNECTOR_DisplayPort; 2050 2050 bridge->ycbcr_420_allowed = true; 2051 2051 2052 - devm_drm_bridge_add(dev, bridge); 2052 + ret = devm_drm_bridge_add(dev, bridge); 2053 + if (ret) 2054 + return ERR_PTR(ret); 2053 2055 2054 2056 dp->aux.dev = dev; 2055 2057 dp->aux.drm_dev = encoder->dev;
+4 -2
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1415 1415 { 1416 1416 struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); 1417 1417 struct device_node *np = pdata->dev->of_node; 1418 + const struct i2c_client *client = to_i2c_client(pdata->dev); 1418 1419 int ret; 1419 1420 1420 1421 pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0); ··· 1434 1433 ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; 1435 1434 1436 1435 if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) { 1437 - pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT | 1438 - DRM_BRIDGE_OP_HPD; 1436 + pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; 1437 + if (client->irq) 1438 + pdata->bridge.ops |= DRM_BRIDGE_OP_HPD; 1439 1439 /* 1440 1440 * If comms were already enabled they would have been enabled 1441 1441 * with the wrong value of HPD_DISABLE. Update it now. Comms
+2 -1
drivers/gpu/drm/drm_client_modeset.c
··· 930 930 mutex_unlock(&client->modeset_mutex); 931 931 out: 932 932 kfree(crtcs); 933 - modes_destroy(dev, modes, connector_count); 933 + if (modes) 934 + modes_destroy(dev, modes, connector_count); 934 935 kfree(modes); 935 936 kfree(offsets); 936 937 kfree(enabled);
+5 -5
drivers/gpu/drm/drm_gpusvm.c
··· 1338 1338 EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid); 1339 1339 1340 1340 /** 1341 - * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked 1341 + * drm_gpusvm_pages_valid_unlocked() - GPU SVM pages valid unlocked 1342 1342 * @gpusvm: Pointer to the GPU SVM structure 1343 - * @range: Pointer to the GPU SVM range structure 1343 + * @svm_pages: Pointer to the GPU SVM pages structure 1344 1344 * 1345 - * This function determines if a GPU SVM range pages are valid. Expected be 1346 - * called without holding gpusvm->notifier_lock. 1345 + * This function determines if a GPU SVM pages are valid. Expected be called 1346 + * without holding gpusvm->notifier_lock. 1347 1347 * 1348 - * Return: True if GPU SVM range has valid pages, False otherwise 1348 + * Return: True if GPU SVM pages are valid, False otherwise 1349 1349 */ 1350 1350 static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm, 1351 1351 struct drm_gpusvm_pages *svm_pages)
+1 -6
drivers/gpu/drm/i915/display/intel_alpm.c
··· 562 562 mutex_lock(&intel_dp->alpm.lock); 563 563 564 564 intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder), 565 - ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE | 566 - ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); 567 - 568 - intel_de_rmw(display, 569 - PORT_ALPM_CTL(cpu_transcoder), 570 - PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); 565 + ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE, 0); 571 566 572 567 drm_dbg_kms(display->drm, "Disabling ALPM\n"); 573 568 mutex_unlock(&intel_dp->alpm.lock);
+3 -1
drivers/gpu/drm/imx/ipuv3/parallel-display.c
··· 256 256 257 257 platform_set_drvdata(pdev, imxpd); 258 258 259 - devm_drm_bridge_add(dev, &imxpd->bridge); 259 + ret = devm_drm_bridge_add(dev, &imxpd->bridge); 260 + if (ret) 261 + return ret; 260 262 261 263 return component_add(dev, &imx_pd_ops); 262 264 }
+2 -2
drivers/gpu/drm/logicvc/logicvc_drm.c
··· 92 92 struct device *dev = drm_dev->dev; 93 93 struct device_node *of_node = dev->of_node; 94 94 struct logicvc_drm_config *config = &logicvc->config; 95 - struct device_node *layers_node; 96 95 int ret; 97 96 98 97 logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING, ··· 127 128 if (ret) 128 129 return ret; 129 130 130 - layers_node = of_get_child_by_name(of_node, "layers"); 131 + struct device_node *layers_node __free(device_node) = 132 + of_get_child_by_name(of_node, "layers"); 131 133 if (!layers_node) { 132 134 drm_err(drm_dev, "Missing non-optional layers node\n"); 133 135 return -EINVAL;
+6 -6
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
··· 737 737 if (!obj) 738 738 goto done; 739 739 740 - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 741 - WARN_ON(obj->buffer.length != 4)) 740 + if (obj->type != ACPI_TYPE_BUFFER || 741 + obj->buffer.length != 4) 742 742 goto done; 743 743 744 744 caps->status = 0; ··· 773 773 if (!obj) 774 774 goto done; 775 775 776 - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 777 - WARN_ON(obj->buffer.length != 4)) 776 + if (obj->type != ACPI_TYPE_BUFFER || 777 + obj->buffer.length != 4) 778 778 goto done; 779 779 780 780 jt->status = 0; ··· 861 861 862 862 _DOD = output.pointer; 863 863 864 - if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 865 - WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 864 + if (_DOD->type != ACPI_TYPE_PACKAGE || 865 + _DOD->package.count > ARRAY_SIZE(dod->acpiIdList)) 866 866 return; 867 867 868 868 for (int i = 0; i < _DOD->package.count; i++) {
+2 -2
drivers/gpu/drm/tiny/sharp-memory.c
··· 541 541 542 542 smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver, 543 543 struct sharp_memory_device, drm); 544 - if (!smd) 545 - return -ENOMEM; 544 + if (IS_ERR(smd)) 545 + return PTR_ERR(smd); 546 546 547 547 spi_set_drvdata(spi, smd); 548 548
+4
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 105 105 * @handle: DMA address handle for the command buffer space if @using_mob is 106 106 * false. Immutable. 107 107 * @size: The size of the command buffer space. Immutable. 108 + * @id: Monotonically increasing ID of the last cmdbuf submitted. 108 109 * @num_contexts: Number of contexts actually enabled. 109 110 */ 110 111 struct vmw_cmdbuf_man { ··· 133 132 bool has_pool; 134 133 dma_addr_t handle; 135 134 size_t size; 135 + u64 id; 136 136 u32 num_contexts; 137 137 }; 138 138 ··· 304 302 { 305 303 struct vmw_cmdbuf_man *man = header->man; 306 304 u32 val; 305 + 306 + header->cb_header->id = man->id++; 307 307 308 308 val = upper_32_bits(header->handle); 309 309 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1143 1143 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1144 1144 if (ret != 0) { 1145 1145 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); 1146 - return PTR_ERR(vmw_bo); 1146 + return ret; 1147 1147 } 1148 1148 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); 1149 1149 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); ··· 1199 1199 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1200 1200 if (ret != 0) { 1201 1201 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); 1202 - return PTR_ERR(vmw_bo); 1202 + return ret; 1203 1203 } 1204 1204 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 1205 1205 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+8 -1
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
··· 260 260 return ret; 261 261 } 262 262 263 + static void vmw_bo_dirty_free(struct kref *kref) 264 + { 265 + struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count); 266 + 267 + kvfree(dirty); 268 + } 269 + 263 270 /** 264 271 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object 265 272 * @vbo: The buffer object ··· 281 274 { 282 275 struct vmw_bo_dirty *dirty = vbo->dirty; 283 276 284 - if (dirty && kref_put(&dirty->ref_count, (void *)kvfree)) 277 + if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free)) 285 278 vbo->dirty = NULL; 286 279 } 287 280
+6
drivers/gpu/drm/xe/regs/xe_engine_regs.h
··· 96 96 #define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13) 97 97 98 98 #define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED) 99 + 100 + #define CS_MMIO_GROUP_INSTANCE_SELECT(base) XE_REG((base) + 0xcc) 101 + #define SELECTIVE_READ_ADDRESSING REG_BIT(30) 102 + #define SELECTIVE_READ_GROUP REG_GENMASK(29, 23) 103 + #define SELECTIVE_READ_INSTANCE REG_GENMASK(22, 16) 104 + 99 105 /* 100 106 * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. 101 107 * The lsb of each can be considered a separate enabling bit for encryption.
+54 -12
drivers/gpu/drm/xe/xe_gt.c
··· 210 210 return ret; 211 211 } 212 212 213 + /* Dwords required to emit a RMW of a register */ 214 + #define EMIT_RMW_DW 20 215 + 213 216 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) 214 217 { 215 - struct xe_reg_sr *sr = &q->hwe->reg_lrc; 218 + struct xe_hw_engine *hwe = q->hwe; 219 + struct xe_reg_sr *sr = &hwe->reg_lrc; 216 220 struct xe_reg_sr_entry *entry; 217 - int count_rmw = 0, count = 0, ret; 221 + int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret; 218 222 unsigned long idx; 219 223 struct xe_bb *bb; 220 224 size_t bb_len = 0; ··· 228 224 xa_for_each(&sr->xa, idx, entry) { 229 225 if (entry->reg.masked || entry->clr_bits == ~0) 230 226 ++count; 227 + else if (entry->reg.mcr) 228 + ++count_rmw_mcr; 231 229 else 232 230 ++count_rmw; 233 231 } ··· 237 231 if (count) 238 232 bb_len += count * 2 + 1; 239 233 240 - if (count_rmw) 241 - bb_len += count_rmw * 20 + 7; 234 + /* 235 + * RMW of MCR registers is the same as a normal RMW, except an 236 + * additional LRI (3 dwords) is required per register to steer the read 237 + * to a nom-terminated instance. 238 + * 239 + * We could probably shorten the batch slightly by eliding the 240 + * steering for consecutive MCR registers that have the same 241 + * group/instance target, but it's not worth the extra complexity to do 242 + * so. 243 + */ 244 + bb_len += count_rmw * EMIT_RMW_DW; 245 + bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3); 242 246 243 - if (q->hwe->class == XE_ENGINE_CLASS_RENDER) 247 + /* 248 + * After doing all RMW, we need 7 trailing dwords to clean up, 249 + * plus an additional 3 dwords to reset steering if any of the 250 + * registers were MCR. 251 + */ 252 + if (count_rmw || count_rmw_mcr) 253 + bb_len += 7 + (count_rmw_mcr ? 3 : 0); 254 + 255 + if (hwe->class == XE_ENGINE_CLASS_RENDER) 244 256 /* 245 257 * Big enough to emit all of the context's 3DSTATE via 246 258 * xe_lrc_emit_hwe_state_instructions() 247 259 */ 248 - bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32); 260 + bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32); 249 261 250 - xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len); 262 + xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len); 251 263 252 264 bb = xe_bb_new(gt, bb_len, false); 253 265 if (IS_ERR(bb)) ··· 300 276 } 301 277 } 302 278 303 - if (count_rmw) { 304 - /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */ 305 - 279 + if (count_rmw || count_rmw_mcr) { 306 280 xa_for_each(&sr->xa, idx, entry) { 307 281 if (entry->reg.masked || entry->clr_bits == ~0) 308 282 continue; 283 + 284 + if (entry->reg.mcr) { 285 + struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw }; 286 + u8 group, instance; 287 + 288 + xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance); 289 + *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1); 290 + *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr; 291 + *cs++ = SELECTIVE_READ_ADDRESSING | 292 + REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) | 293 + REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance); 294 + } 309 295 310 296 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; 311 297 *cs++ = entry->reg.addr; ··· 342 308 *cs++ = CS_GPR_REG(0, 0).addr; 343 309 *cs++ = entry->reg.addr; 344 310 345 - xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", 346 - entry->reg.addr, entry->clr_bits, entry->set_bits); 311 + xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n", 312 + entry->reg.addr, entry->clr_bits, entry->set_bits, 313 + entry->reg.mcr ? " (MCR)" : ""); 347 314 } 348 315 349 316 /* reset used GPR */ ··· 356 321 *cs++ = 0; 357 322 *cs++ = CS_GPR_REG(0, 2).addr; 358 323 *cs++ = 0; 324 + 325 + /* reset steering */ 326 + if (count_rmw_mcr) { 327 + *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1); 328 + *cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr; 329 + *cs++ = 0; 330 + } 359 331 } 360 332 361 333 cs = xe_lrc_emit_hwe_state_instructions(q, cs);
+21 -9
drivers/gpu/drm/xe/xe_sync.c
··· 146 146 147 147 if (!signal) { 148 148 sync->fence = drm_syncobj_fence_get(sync->syncobj); 149 - if (XE_IOCTL_DBG(xe, !sync->fence)) 150 - return -EINVAL; 149 + if (XE_IOCTL_DBG(xe, !sync->fence)) { 150 + err = -EINVAL; 151 + goto free_sync; 152 + } 151 153 } 152 154 break; 153 155 ··· 169 167 170 168 if (signal) { 171 169 sync->chain_fence = dma_fence_chain_alloc(); 172 - if (!sync->chain_fence) 173 - return -ENOMEM; 170 + if (!sync->chain_fence) { 171 + err = -ENOMEM; 172 + goto free_sync; 173 + } 174 174 } else { 175 175 sync->fence = drm_syncobj_fence_get(sync->syncobj); 176 - if (XE_IOCTL_DBG(xe, !sync->fence)) 177 - return -EINVAL; 176 + if (XE_IOCTL_DBG(xe, !sync->fence)) { 177 + err = -EINVAL; 178 + goto free_sync; 179 + } 178 180 179 181 err = dma_fence_chain_find_seqno(&sync->fence, 180 182 sync_in.timeline_value); 181 183 if (err) 182 - return err; 184 + goto free_sync; 183 185 } 184 186 break; 185 187 ··· 206 200 if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) 207 201 return PTR_ERR(sync->ufence); 208 202 sync->ufence_chain_fence = dma_fence_chain_alloc(); 209 - if (!sync->ufence_chain_fence) 210 - return -ENOMEM; 203 + if (!sync->ufence_chain_fence) { 204 + err = -ENOMEM; 205 + goto free_sync; 206 + } 211 207 sync->ufence_syncobj = ufence_syncobj; 212 208 } 213 209 ··· 224 216 sync->timeline_value = sync_in.timeline_value; 225 217 226 218 return 0; 219 + 220 + free_sync: 221 + xe_sync_entry_cleanup(sync); 222 + return err; 227 223 } 228 224 ALLOW_ERROR_INJECTION(xe_sync_entry_parse, ERRNO); 229 225
+1
drivers/infiniband/Kconfig
··· 6 6 depends on INET 7 7 depends on m || IPV6 != m 8 8 depends on !ALPHA 9 + select DMA_SHARED_BUFFER 9 10 select IRQ_POLL 10 11 select DIMLIB 11 12 help
+13
drivers/infiniband/core/cache.c
··· 926 926 if (err) 927 927 return err; 928 928 929 + /* 930 + * Mark the device as ready for GID cache updates. This allows netdev 931 + * event handlers to update the GID cache even before the device is 932 + * fully registered. 933 + */ 934 + ib_device_enable_gid_updates(ib_dev); 935 + 929 936 rdma_roce_rescan_device(ib_dev); 930 937 931 938 return err; ··· 1644 1637 1645 1638 void ib_cache_cleanup_one(struct ib_device *device) 1646 1639 { 1640 + /* 1641 + * Clear the GID updates mark first to prevent event handlers from 1642 + * accessing the device while it's being torn down. 1643 + */ 1644 + ib_device_disable_gid_updates(device); 1645 + 1647 1646 /* The cleanup function waits for all in-progress workqueue 1648 1647 * elements and cleans up the GID cache. This function should be 1649 1648 * called after the device was removed from the devices list and
+5 -1
drivers/infiniband/core/cma.c
··· 2729 2729 *to_destroy = NULL; 2730 2730 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2731 2731 return 0; 2732 + if (id_priv->restricted_node_type != RDMA_NODE_UNSPECIFIED && 2733 + id_priv->restricted_node_type != cma_dev->device->node_type) 2734 + return 0; 2732 2735 2733 2736 dev_id_priv = 2734 2737 __rdma_create_id(net, cma_listen_handler, id_priv, ··· 2739 2736 if (IS_ERR(dev_id_priv)) 2740 2737 return PTR_ERR(dev_id_priv); 2741 2738 2739 + dev_id_priv->restricted_node_type = id_priv->restricted_node_type; 2742 2740 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2743 2741 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2744 2742 rdma_addr_size(cma_src_addr(id_priv))); ··· 4198 4194 } 4199 4195 4200 4196 mutex_lock(&lock); 4201 - if (id_priv->cma_dev) 4197 + if (READ_ONCE(id_priv->state) != RDMA_CM_IDLE) 4202 4198 ret = -EALREADY; 4203 4199 else 4204 4200 id_priv->restricted_node_type = node_type;
+3
drivers/infiniband/core/core_priv.h
··· 100 100 roce_netdev_callback cb, 101 101 void *cookie); 102 102 103 + void ib_device_enable_gid_updates(struct ib_device *device); 104 + void ib_device_disable_gid_updates(struct ib_device *device); 105 + 103 106 typedef int (*nldev_callback)(struct ib_device *device, 104 107 struct sk_buff *skb, 105 108 struct netlink_callback *cb,
+33 -1
drivers/infiniband/core/device.c
··· 93 93 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); 94 94 static DECLARE_RWSEM(devices_rwsem); 95 95 #define DEVICE_REGISTERED XA_MARK_1 96 + #define DEVICE_GID_UPDATES XA_MARK_2 96 97 97 98 static u32 highest_client_id; 98 99 #define CLIENT_REGISTERED XA_MARK_1 ··· 2413 2412 unsigned long index; 2414 2413 2415 2414 down_read(&devices_rwsem); 2416 - xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) 2415 + xa_for_each_marked(&devices, index, dev, DEVICE_GID_UPDATES) 2417 2416 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 2418 2417 up_read(&devices_rwsem); 2418 + } 2419 + 2420 + /** 2421 + * ib_device_enable_gid_updates - Mark device as ready for GID cache updates 2422 + * @device: Device to mark 2423 + * 2424 + * Called after GID table is allocated and initialized. After this mark is set, 2425 + * netdevice event handlers can update the device's GID cache. This allows 2426 + * events that arrive during device registration to be processed, avoiding 2427 + * stale GID entries when netdev properties change during the device 2428 + * registration process. 2429 + */ 2430 + void ib_device_enable_gid_updates(struct ib_device *device) 2431 + { 2432 + down_write(&devices_rwsem); 2433 + xa_set_mark(&devices, device->index, DEVICE_GID_UPDATES); 2434 + up_write(&devices_rwsem); 2435 + } 2436 + 2437 + /** 2438 + * ib_device_disable_gid_updates - Clear the GID updates mark 2439 + * @device: Device to unmark 2440 + * 2441 + * Called before GID table cleanup to prevent event handlers from accessing 2442 + * the device while it's being torn down. 2443 + */ 2444 + void ib_device_disable_gid_updates(struct ib_device *device) 2445 + { 2446 + down_write(&devices_rwsem); 2447 + xa_clear_mark(&devices, device->index, DEVICE_GID_UPDATES); 2448 + up_write(&devices_rwsem); 2419 2449 } 2420 2450 2421 2451 /*
+1 -3
drivers/infiniband/core/umem_dmabuf.c
··· 218 218 219 219 err = ib_umem_dmabuf_map_pages(umem_dmabuf); 220 220 if (err) 221 - goto err_unpin; 221 + goto err_release; 222 222 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 223 223 224 224 return umem_dmabuf; 225 225 226 - err_unpin: 227 - dma_buf_unpin(umem_dmabuf->attach); 228 226 err_release: 229 227 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 230 228 ib_umem_release(&umem_dmabuf->umem);
+2
drivers/infiniband/core/uverbs_std_types_dmabuf.c
··· 10 10 #include "rdma_core.h" 11 11 #include "uverbs.h" 12 12 13 + MODULE_IMPORT_NS("DMA_BUF"); 14 + 13 15 static int uverbs_dmabuf_attach(struct dma_buf *dmabuf, 14 16 struct dma_buf_attachment *attachment) 15 17 {
+19 -37
drivers/infiniband/hw/bng_re/bng_dev.c
··· 54 54 { 55 55 struct bng_re_chip_ctx *chip_ctx; 56 56 57 - if (!rdev->chip_ctx) 58 - return; 59 - 60 57 kfree(rdev->dev_attr); 61 58 rdev->dev_attr = NULL; 62 59 ··· 121 124 struct bnge_fw_msg fw_msg = {}; 122 125 int rc = -EINVAL; 123 126 124 - if (!rdev) 125 - return rc; 126 - 127 - if (!aux_dev) 128 - return rc; 129 - 130 127 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE); 131 128 req.ring_type = type; 132 129 req.ring_id = cpu_to_le16(fw_ring_id); ··· 141 150 struct hwrm_ring_alloc_input req = {}; 142 151 struct hwrm_ring_alloc_output resp; 143 152 struct bnge_fw_msg fw_msg = {}; 144 - int rc = -EINVAL; 145 - 146 - if (!aux_dev) 147 - return rc; 153 + int rc; 148 154 149 155 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC); 150 156 req.enables = 0; ··· 172 184 struct hwrm_stat_ctx_free_input req = {}; 173 185 struct hwrm_stat_ctx_free_output resp = {}; 174 186 struct bnge_fw_msg fw_msg = {}; 175 - int rc = -EINVAL; 176 - 177 - if (!aux_dev) 178 - return rc; 187 + int rc; 179 188 180 189 bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE); 181 190 req.stat_ctx_id = cpu_to_le32(rdev->stats_ctx.fw_id); ··· 193 208 struct hwrm_stat_ctx_alloc_output resp = {}; 194 209 struct hwrm_stat_ctx_alloc_input req = {}; 195 210 struct bnge_fw_msg fw_msg = {}; 196 - int rc = -EINVAL; 211 + int rc; 197 212 198 213 stats->fw_id = BNGE_INVALID_STATS_CTX_ID; 199 - 200 - if (!aux_dev) 201 - return rc; 202 214 203 215 bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC); 204 216 req.update_period_ms = cpu_to_le32(1000); ··· 285 303 if (rc) { 286 304 ibdev_err(&rdev->ibdev, 287 305 "Failed to register with netedev: %#x\n", rc); 288 - return -EINVAL; 306 + goto reg_netdev_fail; 289 307 } 290 308 291 309 set_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); ··· 294 312 ibdev_err(&rdev->ibdev, 295 313 "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", 296 314 rdev->aux_dev->auxr_info->msix_requested); 297 - bnge_unregister_dev(rdev->aux_dev); 298 - clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 299 - return -EINVAL; 315 + rc = -EINVAL; 316 + goto msix_ctx_fail; 300 317 } 301 318 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", 302 319 rdev->aux_dev->auxr_info->msix_requested); 303 320 304 321 rc = bng_re_setup_chip_ctx(rdev); 305 322 if (rc) { 306 - bnge_unregister_dev(rdev->aux_dev); 307 - clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 308 323 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 309 - return -EINVAL; 324 + goto msix_ctx_fail; 310 325 } 311 326 312 327 bng_re_query_hwrm_version(rdev); ··· 312 333 if (rc) { 313 334 ibdev_err(&rdev->ibdev, 314 335 "Failed to allocate RCFW Channel: %#x\n", rc); 315 - goto fail; 336 + goto alloc_fw_chl_fail; 316 337 } 317 338 318 339 /* Allocate nq record memory */ 319 340 rdev->nqr = kzalloc_obj(*rdev->nqr); 320 341 if (!rdev->nqr) { 321 - bng_re_destroy_chip_ctx(rdev); 322 - bnge_unregister_dev(rdev->aux_dev); 323 - clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 324 - return -ENOMEM; 342 + rc = -ENOMEM; 343 + goto nq_alloc_fail; 325 344 } 326 345 327 346 rdev->nqr->num_msix = rdev->aux_dev->auxr_info->msix_requested; ··· 388 411 free_ring: 389 412 bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 390 413 free_rcfw: 414 + kfree(rdev->nqr); 415 + nq_alloc_fail: 391 416 bng_re_free_rcfw_channel(&rdev->rcfw); 392 - fail: 393 - bng_re_dev_uninit(rdev); 417 + alloc_fw_chl_fail: 418 + bng_re_destroy_chip_ctx(rdev); 419 + msix_ctx_fail: 420 + bnge_unregister_dev(rdev->aux_dev); 421 + clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 422 + reg_netdev_fail: 394 423 return rc; 395 424 } 396 425 ··· 469 486 470 487 rdev = dev_info->rdev; 471 488 472 - if (rdev) 473 - bng_re_remove_device(rdev, adev); 489 + bng_re_remove_device(rdev, adev); 474 490 kfree(dev_info); 475 491 } 476 492
+1 -1
drivers/infiniband/hw/efa/efa_verbs.c
··· 1661 1661 struct efa_mr *mr; 1662 1662 1663 1663 if (udata && udata->inlen && 1664 - !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) { 1664 + !ib_is_udata_cleared(udata, 0, udata->inlen)) { 1665 1665 ibdev_dbg(&dev->ibdev, 1666 1666 "Incompatible ABI params, udata not cleared\n"); 1667 1667 return ERR_PTR(-EINVAL);
+1 -1
drivers/infiniband/hw/ionic/ionic_controlpath.c
··· 1218 1218 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx); 1219 1219 struct ionic_vcq *vcq = to_ionic_vcq(ibcq); 1220 1220 struct ionic_tbl_buf buf = {}; 1221 - struct ionic_cq_resp resp; 1221 + struct ionic_cq_resp resp = {}; 1222 1222 struct ionic_cq_req req; 1223 1223 int udma_idx = 0, rc; 1224 1224
+2
drivers/infiniband/hw/ionic/ionic_ibdev.c
··· 81 81 return -EINVAL; 82 82 83 83 ndev = ib_device_get_netdev(ibdev, port); 84 + if (!ndev) 85 + return -ENODEV; 84 86 85 87 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 86 88 attr->state = IB_PORT_ACTIVE;
+1 -1
drivers/infiniband/hw/irdma/verbs.c
··· 5212 5212 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) 5213 5213 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 5214 5214 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 5215 - struct irdma_create_ah_resp uresp; 5215 + struct irdma_create_ah_resp uresp = {}; 5216 5216 struct irdma_ah *parent_ah; 5217 5217 int err; 5218 5218
+3 -2
drivers/infiniband/hw/mthca/mthca_provider.c
··· 428 428 429 429 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 430 430 mthca_free_srq(to_mdev(ibsrq->device), srq); 431 + mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar, 432 + context->db_tab, ucmd.db_index); 431 433 return -EFAULT; 432 434 } 433 435 ··· 438 436 439 437 static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) 440 438 { 439 + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 441 440 if (udata) { 442 441 struct mthca_ucontext *context = 443 442 rdma_udata_to_drv_context( ··· 449 446 mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 450 447 context->db_tab, to_msrq(srq)->db_index); 451 448 } 452 - 453 - mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 454 449 return 0; 455 450 } 456 451
+4
drivers/irqchip/irq-gic-v3-its.c
··· 3474 3474 int lpi_base; 3475 3475 int nr_lpis; 3476 3476 int nr_ites; 3477 + int id_bits; 3477 3478 int sz; 3478 3479 3479 3480 if (!its_alloc_device_table(its, dev_id)) ··· 3486 3485 /* 3487 3486 * Even if the device wants a single LPI, the ITT must be 3488 3487 * sized as a power of two (and you need at least one bit...). 3488 + * Also honor the ITS's own EID limit. 3489 3489 */ 3490 + id_bits = FIELD_GET(GITS_TYPER_IDBITS, its->typer) + 1; 3491 + nvecs = min_t(unsigned int, nvecs, BIT(id_bits)); 3490 3492 nr_ites = max(2, nvecs); 3491 3493 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); 3492 3494 sz = max(sz, ITS_ITT_ALIGN);
+1 -1
drivers/irqchip/irq-gic-v5-irs.c
··· 699 699 */ 700 700 if (list_empty(&irs_nodes)) { 701 701 idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR0); 702 - gicv5_global_data.virt_capable = !FIELD_GET(GICV5_IRS_IDR0_VIRT, idr); 702 + gicv5_global_data.virt_capable = !!FIELD_GET(GICV5_IRS_IDR0_VIRT, idr); 703 703 704 704 idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); 705 705 irs_setup_pri_bits(idr);
+34 -19
drivers/irqchip/irq-ls-extirq.c
··· 125 125 static int 126 126 ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) 127 127 { 128 - struct of_imap_parser imap_parser; 129 - struct of_imap_item imap_item; 128 + const __be32 *map; 129 + u32 mapsize; 130 130 int ret; 131 131 132 - ret = of_imap_parser_init(&imap_parser, node, &imap_item); 133 - if (ret) 134 - return ret; 132 + map = of_get_property(node, "interrupt-map", &mapsize); 133 + if (!map) 134 + return -ENOENT; 135 + if (mapsize % sizeof(*map)) 136 + return -EINVAL; 137 + mapsize /= sizeof(*map); 135 138 136 - for_each_of_imap_item(&imap_parser, &imap_item) { 139 + while (mapsize) { 137 140 struct device_node *ipar; 138 - u32 hwirq; 139 - int i; 141 + u32 hwirq, intsize, j; 140 142 141 - hwirq = imap_item.child_imap[0]; 142 - if (hwirq >= MAXIRQ) { 143 - of_node_put(imap_item.parent_args.np); 143 + if (mapsize < 3) 144 144 return -EINVAL; 145 - } 145 + hwirq = be32_to_cpup(map); 146 + if (hwirq >= MAXIRQ) 147 + return -EINVAL; 146 148 priv->nirq = max(priv->nirq, hwirq + 1); 147 149 148 - ipar = of_node_get(imap_item.parent_args.np); 149 - priv->map[hwirq].fwnode = of_fwnode_handle(ipar); 150 + ipar = of_find_node_by_phandle(be32_to_cpup(map + 2)); 151 + map += 3; 152 + mapsize -= 3; 153 + if (!ipar) 154 + return -EINVAL; 155 + priv->map[hwirq].fwnode = &ipar->fwnode; 156 + ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize); 157 + if (ret) 158 + return ret; 150 159 151 - priv->map[hwirq].param_count = imap_item.parent_args.args_count; 152 - for (i = 0; i < priv->map[hwirq].param_count; i++) 153 - priv->map[hwirq].param[i] = imap_item.parent_args.args[i]; 160 + if (intsize > mapsize) 161 + return -EINVAL; 162 + 163 + priv->map[hwirq].param_count = intsize; 164 + for (j = 0; j < intsize; ++j) 165 + priv->map[hwirq].param[j] = be32_to_cpup(map++); 166 + mapsize -= intsize; 154 167 } 155 168 return 0; 156 169 } ··· 190 177 return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); 191 178 192 179 priv->intpcr = devm_of_iomap(dev, node, 0, NULL); 193 - if (!priv->intpcr) 194 - return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node); 180 + if (IS_ERR(priv->intpcr)) { 181 + return dev_err_probe(dev, PTR_ERR(priv->intpcr), 182 + "Cannot ioremap OF node %pOF\n", node); 183 + } 195 184 196 185 ret = ls_extirq_parse_map(priv, node); 197 186 if (ret)
+1 -1
drivers/irqchip/irq-mmp.c
··· 136 136 } 137 137 } 138 138 139 - struct irq_chip icu_irq_chip = { 139 + static const struct irq_chip icu_irq_chip = { 140 140 .name = "icu_irq", 141 141 .irq_mask = icu_mask_irq, 142 142 .irq_mask_ack = icu_mask_ack_irq,
+6 -1
drivers/irqchip/irq-sifive-plic.c
··· 172 172 static void plic_irq_eoi(struct irq_data *d) 173 173 { 174 174 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 175 + u32 __iomem *reg; 176 + bool enabled; 175 177 176 - if (unlikely(irqd_irq_disabled(d))) { 178 + reg = handler->enable_base + (d->hwirq / 32) * sizeof(u32); 179 + enabled = readl(reg) & BIT(d->hwirq % 32); 180 + 181 + if (unlikely(!enabled)) { 177 182 plic_toggle(handler, d->hwirq, 1); 178 183 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 179 184 plic_toggle(handler, d->hwirq, 0);
+3 -1
drivers/media/dvb-core/dmxdev.c
··· 168 168 mutex_unlock(&dmxdev->mutex); 169 169 return -ENOMEM; 170 170 } 171 - dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); 171 + dmxdev->dvr_buffer.data = mem; 172 + dmxdev->dvr_buffer.size = DVR_BUFFER_SIZE; 173 + dvb_ringbuffer_reset(&dmxdev->dvr_buffer); 172 174 if (dmxdev->may_do_mmap) 173 175 dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", 174 176 &dmxdev->mutex,
+37 -1
drivers/mmc/host/dw_mmc-rockchip.c
··· 36 36 int default_sample_phase; 37 37 int num_phases; 38 38 bool internal_phase; 39 + int sample_phase; 40 + int drv_phase; 39 41 }; 40 42 41 43 /* ··· 575 573 dw_mci_pltfm_remove(pdev); 576 574 } 577 575 576 + static int dw_mci_rockchip_runtime_suspend(struct device *dev) 577 + { 578 + struct platform_device *pdev = to_platform_device(dev); 579 + struct dw_mci *host = platform_get_drvdata(pdev); 580 + struct dw_mci_rockchip_priv_data *priv = host->priv; 581 + 582 + if (priv->internal_phase) { 583 + priv->sample_phase = rockchip_mmc_get_phase(host, true); 584 + priv->drv_phase = rockchip_mmc_get_phase(host, false); 585 + } 586 + 587 + return dw_mci_runtime_suspend(dev); 588 + } 589 + 590 + static int dw_mci_rockchip_runtime_resume(struct device *dev) 591 + { 592 + struct platform_device *pdev = to_platform_device(dev); 593 + struct dw_mci *host = platform_get_drvdata(pdev); 594 + struct dw_mci_rockchip_priv_data *priv = host->priv; 595 + int ret; 596 + 597 + ret = dw_mci_runtime_resume(dev); 598 + if (ret) 599 + return ret; 600 + 601 + if (priv->internal_phase) { 602 + rockchip_mmc_set_phase(host, true, priv->sample_phase); 603 + rockchip_mmc_set_phase(host, false, priv->drv_phase); 604 + mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE); 605 + } 606 + 607 + return ret; 608 + } 609 + 578 610 static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = { 579 611 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 580 - RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL) 612 + RUNTIME_PM_OPS(dw_mci_rockchip_runtime_suspend, dw_mci_rockchip_runtime_resume, NULL) 581 613 }; 582 614 583 615 static struct platform_driver dw_mci_rockchip_pltfm_driver = {
+1
drivers/mmc/host/mmci_qcom_dml.c
··· 109 109 &dma_spec)) 110 110 return -ENODEV; 111 111 112 + of_node_put(dma_spec.np); 112 113 if (dma_spec.args_count) 113 114 return dma_spec.args[0]; 114 115
+1 -1
drivers/mmc/host/sdhci-brcmstb.c
··· 116 116 writel(sr->boot_main_ctl, priv->boot_regs + SDIO_BOOT_MAIN_CTL); 117 117 118 118 if (ver == SDIO_CFG_CORE_V1) { 119 - writel(sr->sd_pin_sel, cr + SDIO_CFG_SD_PIN_SEL); 119 + writel(sr->sd_pin_sel, cr + SDIO_CFG_V1_SD_PIN_SEL); 120 120 return; 121 121 } 122 122
+8 -1
drivers/net/dsa/sja1105/sja1105_main.c
··· 2278 2278 * change it through the dynamic interface later. 2279 2279 */ 2280 2280 dsa_switch_for_each_available_port(dp, ds) { 2281 + /* May be called during unbind when we unoffload a VLAN-aware 2282 + * bridge from port 1 while port 0 was already torn down 2283 + */ 2284 + if (!dp->pl) 2285 + continue; 2286 + 2281 2287 phylink_replay_link_begin(dp->pl); 2282 2288 mac[dp->index].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; 2283 2289 } ··· 2340 2334 } 2341 2335 2342 2336 dsa_switch_for_each_available_port(dp, ds) 2343 - phylink_replay_link_end(dp->pl); 2337 + if (dp->pl) 2338 + phylink_replay_link_end(dp->pl); 2344 2339 2345 2340 rc = sja1105_reload_cbs(priv); 2346 2341 if (rc < 0)
+7 -6
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 6232 6232 int rc; 6233 6233 6234 6234 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 6235 + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 6236 + return 0; 6237 + 6235 6238 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 6236 6239 if (rc) 6237 6240 return rc; ··· 10882 10879 struct bnxt_ntuple_filter *ntp_fltr; 10883 10880 int i; 10884 10881 10885 - if (netif_running(bp->dev)) { 10886 - bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10887 - for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10888 - if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10889 - bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10890 - } 10882 + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10883 + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10884 + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10885 + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10891 10886 } 10892 10887 if (!all) 10893 10888 return;
+7
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 3034 3034 goto err_close; 3035 3035 } 3036 3036 3037 + if (ethsw->sw_attr.num_ifs >= DPSW_MAX_IF) { 3038 + dev_err(dev, "DPSW num_ifs %u exceeds max %u\n", 3039 + ethsw->sw_attr.num_ifs, DPSW_MAX_IF); 3040 + err = -EINVAL; 3041 + goto err_close; 3042 + } 3043 + 3037 3044 err = dpsw_get_api_version(ethsw->mc_io, 0, 3038 3045 &ethsw->major, 3039 3046 &ethsw->minor);
+24 -30
drivers/net/ethernet/google/gve/gve_tx_dqo.c
··· 167 167 } 168 168 } 169 169 170 + static void gve_unmap_packet(struct device *dev, 171 + struct gve_tx_pending_packet_dqo *pkt) 172 + { 173 + int i; 174 + 175 + if (!pkt->num_bufs) 176 + return; 177 + 178 + /* SKB linear portion is guaranteed to be mapped */ 179 + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), 180 + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); 181 + for (i = 1; i < pkt->num_bufs; i++) { 182 + netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]), 183 + dma_unmap_len(pkt, len[i]), 184 + DMA_TO_DEVICE, 0); 185 + } 186 + pkt->num_bufs = 0; 187 + } 188 + 170 189 /* gve_tx_free_desc - Cleans up all pending tx requests and buffers. 171 190 */ 172 191 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) ··· 195 176 for (i = 0; i < tx->dqo.num_pending_packets; i++) { 196 177 struct gve_tx_pending_packet_dqo *cur_state = 197 178 &tx->dqo.pending_packets[i]; 198 - int j; 199 179 200 - for (j = 0; j < cur_state->num_bufs; j++) { 201 - if (j == 0) { 202 - dma_unmap_single(tx->dev, 203 - dma_unmap_addr(cur_state, dma[j]), 204 - dma_unmap_len(cur_state, len[j]), 205 - DMA_TO_DEVICE); 206 - } else { 207 - dma_unmap_page(tx->dev, 208 - dma_unmap_addr(cur_state, dma[j]), 209 - dma_unmap_len(cur_state, len[j]), 210 - DMA_TO_DEVICE); 211 - } 212 - } 180 + if (tx->dqo.qpl) 181 + gve_free_tx_qpl_bufs(tx, cur_state); 182 + else 183 + gve_unmap_packet(tx->dev, cur_state); 184 + 213 185 if (cur_state->skb) { 214 186 dev_consume_skb_any(cur_state->skb); 215 187 cur_state->skb = NULL; ··· 1162 1152 } else { 1163 1153 tx->dqo.pending_packets[next_index].prev = prev_index; 1164 1154 } 1165 - } 1166 - 1167 - static void gve_unmap_packet(struct device *dev, 1168 - struct gve_tx_pending_packet_dqo *pkt) 1169 - { 1170 - int i; 1171 - 1172 - /* SKB linear portion is guaranteed to be mapped */ 1173 - dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), 1174 - dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); 1175 - for (i = 1; i < pkt->num_bufs; i++) { 1176 - netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]), 1177 - dma_unmap_len(pkt, len[i]), 1178 - DMA_TO_DEVICE, 0); 1179 - } 1180 - pkt->num_bufs = 0; 1181 1155 } 1182 1156 1183 1157 /* Completion types and expected behavior:
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 259 259 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, 260 260 struct mlx5_accel_esp_xfrm_attrs *attrs) 261 261 { 262 - struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 263 262 struct mlx5e_ipsec_addr *addrs = &attrs->addrs; 264 263 struct net_device *netdev = sa_entry->dev; 265 264 struct xfrm_state *x = sa_entry->x; ··· 275 276 attrs->type != XFRM_DEV_OFFLOAD_PACKET) 276 277 return; 277 278 278 - mlx5_query_mac_address(mdev, addr); 279 + ether_addr_copy(addr, netdev->dev_addr); 279 280 switch (attrs->dir) { 280 281 case XFRM_DEV_OFFLOAD_IN: 281 282 src = attrs->dmac;
+2
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 4068 4068 4069 4069 if (mlx5_mode == MLX5_ESWITCH_LEGACY) 4070 4070 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; 4071 + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) 4072 + esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_SWITCH_LEGACY; 4071 4073 mlx5_eswitch_disable_locked(esw); 4072 4074 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) { 4073 4075 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+6 -2
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
··· 1869 1869 mutex_lock(&ldev->lock); 1870 1870 1871 1871 ldev->mode_changes_in_progress++; 1872 - if (__mlx5_lag_is_active(ldev)) 1873 - mlx5_disable_lag(ldev); 1872 + if (__mlx5_lag_is_active(ldev)) { 1873 + if (ldev->mode == MLX5_LAG_MODE_MPESW) 1874 + mlx5_lag_disable_mpesw(ldev); 1875 + else 1876 + mlx5_disable_lag(ldev); 1877 + } 1874 1878 1875 1879 mutex_unlock(&ldev->lock); 1876 1880 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
··· 65 65 return err; 66 66 } 67 67 68 - static int enable_mpesw(struct mlx5_lag *ldev) 68 + static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev) 69 69 { 70 70 struct mlx5_core_dev *dev0; 71 71 int err; ··· 126 126 return err; 127 127 } 128 128 129 - static void disable_mpesw(struct mlx5_lag *ldev) 129 + void mlx5_lag_disable_mpesw(struct mlx5_lag *ldev) 130 130 { 131 131 if (ldev->mode == MLX5_LAG_MODE_MPESW) { 132 132 mlx5_mpesw_metadata_cleanup(ldev); ··· 152 152 } 153 153 154 154 if (mpesww->op == MLX5_MPESW_OP_ENABLE) 155 - mpesww->result = enable_mpesw(ldev); 155 + mpesww->result = mlx5_lag_enable_mpesw(ldev); 156 156 else if (mpesww->op == MLX5_MPESW_OP_DISABLE) 157 - disable_mpesw(ldev); 157 + mlx5_lag_disable_mpesw(ldev); 158 158 unlock: 159 159 mutex_unlock(&ldev->lock); 160 160 mlx5_devcom_comp_unlock(devcom);
+5
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
··· 31 31 bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev); 32 32 void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev); 33 33 int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev); 34 + #ifdef CONFIG_MLX5_ESWITCH 35 + void mlx5_lag_disable_mpesw(struct mlx5_lag *ldev); 36 + #else 37 + static inline void mlx5_lag_disable_mpesw(struct mlx5_lag *ldev) {} 38 + #endif /* CONFIG_MLX5_ESWITCH */ 34 39 35 40 #ifdef CONFIG_MLX5_ESWITCH 36 41 void mlx5_mpesw_speed_update_work(struct work_struct *work);
+2
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
··· 193 193 err = pci_enable_sriov(pdev, num_vfs); 194 194 if (err) { 195 195 mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); 196 + devl_lock(devlink); 196 197 mlx5_device_disable_sriov(dev, num_vfs, true, true); 198 + devl_unlock(devlink); 197 199 } 198 200 return err; 199 201 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
··· 1051 1051 struct mlx5dr_table *tbl; 1052 1052 int ret; 1053 1053 1054 - mutex_lock(&dmn->dump_info.dbg_mutex); 1055 1054 mlx5dr_domain_lock(dmn); 1055 + mutex_lock(&dmn->dump_info.dbg_mutex); 1056 1056 1057 1057 ret = dr_dump_domain(file, dmn); 1058 1058 if (ret < 0) ··· 1065 1065 } 1066 1066 1067 1067 unlock_mutex: 1068 - mlx5dr_domain_unlock(dmn); 1069 1068 mutex_unlock(&dmn->dump_info.dbg_mutex); 1069 + mlx5dr_domain_unlock(dmn); 1070 1070 return ret; 1071 1071 } 1072 1072
+4 -1
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 1946 1946 1947 1947 mana_gd_remove_irqs(pdev); 1948 1948 1949 - destroy_workqueue(gc->service_wq); 1949 + if (gc->service_wq) { 1950 + destroy_workqueue(gc->service_wq); 1951 + gc->service_wq = NULL; 1952 + } 1950 1953 dev_dbg(&pdev->dev, "mana gdma cleanup successful\n"); 1951 1954 } 1952 1955
+3 -1
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 3757 3757 } 3758 3758 3759 3759 WRITE_ONCE(gd->rdma_teardown, true); 3760 - flush_workqueue(gc->service_wq); 3760 + 3761 + if (gc->service_wq) 3762 + flush_workqueue(gc->service_wq); 3761 3763 3762 3764 if (gd->adev) 3763 3765 remove_adev(gd);
+1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 853 853 netdev_info(priv->dev, 854 854 "IEEE 1588-2008 Advanced Timestamp supported\n"); 855 855 856 + memset(&priv->tstamp_config, 0, sizeof(priv->tstamp_config)); 856 857 priv->hwts_tx_en = 0; 857 858 priv->hwts_rx_en = 0; 858 859
+1 -4
drivers/net/ethernet/xscale/ixp4xx_eth.c
··· 403 403 int ret; 404 404 int ch; 405 405 406 - if (!cpu_is_ixp46x()) 407 - return -EOPNOTSUPP; 408 - 409 406 if (!netif_running(netdev)) 410 407 return -EINVAL; 411 408 412 409 ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); 413 410 if (ret) 414 - return ret; 411 + return -EOPNOTSUPP; 415 412 416 413 ch = PORT2CHANNEL(port); 417 414 regs = port->timesync_regs;
+3
drivers/net/ethernet/xscale/ptp_ixp46x.c
··· 232 232 233 233 int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) 234 234 { 235 + if (!cpu_is_ixp46x()) 236 + return -ENODEV; 237 + 235 238 *regs = ixp_clock.regs; 236 239 *phc_index = ptp_clock_index(ixp_clock.ptp_clock); 237 240
+2 -1
drivers/net/netconsole.c
··· 1679 1679 if (release_len) { 1680 1680 release = init_utsname()->release; 1681 1681 1682 - scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); 1682 + scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%.*s", release, 1683 + msg_len, msg); 1683 1684 msg_len += release_len; 1684 1685 } else { 1685 1686 memcpy(nt->buf, msg, msg_len);
+38 -19
drivers/net/ovpn/tcp.c
··· 70 70 peer->tcp.sk_cb.sk_data_ready(sk); 71 71 } 72 72 73 + static struct sk_buff *ovpn_tcp_skb_packet(const struct ovpn_peer *peer, 74 + struct sk_buff *orig_skb, 75 + const int pkt_len, const int pkt_off) 76 + { 77 + struct sk_buff *ovpn_skb; 78 + int err; 79 + 80 + /* create a new skb with only the content of the current packet */ 81 + ovpn_skb = netdev_alloc_skb(peer->ovpn->dev, pkt_len); 82 + if (unlikely(!ovpn_skb)) 83 + goto err; 84 + 85 + skb_copy_header(ovpn_skb, orig_skb); 86 + err = skb_copy_bits(orig_skb, pkt_off, skb_put(ovpn_skb, pkt_len), 87 + pkt_len); 88 + if (unlikely(err)) { 89 + net_warn_ratelimited("%s: skb_copy_bits failed for peer %u\n", 90 + netdev_name(peer->ovpn->dev), peer->id); 91 + kfree_skb(ovpn_skb); 92 + goto err; 93 + } 94 + 95 + consume_skb(orig_skb); 96 + return ovpn_skb; 97 + err: 98 + kfree_skb(orig_skb); 99 + return NULL; 100 + } 101 + 73 102 static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb) 74 103 { 75 104 struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp); 76 105 struct strp_msg *msg = strp_msg(skb); 77 - size_t pkt_len = msg->full_len - 2; 78 - size_t off = msg->offset + 2; 106 + int pkt_len = msg->full_len - 2; 79 107 u8 opcode; 80 108 81 - /* ensure skb->data points to the beginning of the openvpn packet */ 82 - if (!pskb_pull(skb, off)) { 83 - net_warn_ratelimited("%s: packet too small for peer %u\n", 84 - netdev_name(peer->ovpn->dev), peer->id); 85 - goto err; 86 - } 87 - 88 - /* strparser does not trim the skb for us, therefore we do it now */ 89 - if (pskb_trim(skb, pkt_len) != 0) { 90 - net_warn_ratelimited("%s: trimming skb failed for peer %u\n", 91 - netdev_name(peer->ovpn->dev), peer->id); 92 - goto err; 93 - } 94 - 95 - /* we need the first 4 bytes of data to be accessible 109 + /* we need at least 4 bytes of data in the packet 96 110 * to extract the opcode and the key ID later on 97 111 */ 98 - if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) { 112 + if (unlikely(pkt_len < OVPN_OPCODE_SIZE)) { 99 113 net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n", 100 114 netdev_name(peer->ovpn->dev), peer->id); 101 115 goto err; 102 116 } 117 + 118 + /* extract the packet into a new skb */ 119 + skb = ovpn_tcp_skb_packet(peer, skb, pkt_len, msg->offset + 2); 120 + if (unlikely(!skb)) 121 + goto err; 103 122 104 123 /* DATA_V2 packets are handled in kernel, the rest goes to user space */ 105 124 opcode = ovpn_opcode_from_skb(skb, 0); ··· 132 113 /* The packet size header must be there when sending the packet 133 114 * to userspace, therefore we put it back 134 115 */ 135 - skb_push(skb, 2); 116 + *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(pkt_len); 136 117 ovpn_tcp_to_userspace(peer, strp->sk, skb); 137 118 return; 138 119 }
+17 -8
drivers/net/phy/phy_device.c
··· 1866 1866 goto error; 1867 1867 1868 1868 phy_resume(phydev); 1869 - if (!phydev->is_on_sfp_module) 1870 - phy_led_triggers_register(phydev); 1871 1869 1872 1870 /** 1873 1871 * If the external phy used by current mac interface is managed by ··· 1979 1981 1980 1982 phydev->phy_link_change = NULL; 1981 1983 phydev->phylink = NULL; 1982 - 1983 - if (!phydev->is_on_sfp_module) 1984 - phy_led_triggers_unregister(phydev); 1985 1984 1986 1985 if (phydev->mdio.dev.driver) 1987 1986 module_put(phydev->mdio.dev.driver->owner); ··· 3773 3778 /* Set the state to READY by default */ 3774 3779 phydev->state = PHY_READY; 3775 3780 3781 + /* Register the PHY LED triggers */ 3782 + if (!phydev->is_on_sfp_module) 3783 + phy_led_triggers_register(phydev); 3784 + 3776 3785 /* Get the LEDs from the device tree, and instantiate standard 3777 3786 * LEDs for them. 3778 3787 */ 3779 - if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev)) 3788 + if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev)) { 3780 3789 err = of_phy_leds(phydev); 3790 + if (err) 3791 + goto out; 3792 + } 3793 + 3794 + return 0; 3781 3795 3782 3796 out: 3797 + if (!phydev->is_on_sfp_module) 3798 + phy_led_triggers_unregister(phydev); 3799 + 3783 3800 /* Re-assert the reset signal on error */ 3784 - if (err) 3785 - phy_device_reset(phydev, 1); 3801 + phy_device_reset(phydev, 1); 3786 3802 3787 3803 return err; 3788 3804 } ··· 3806 3800 3807 3801 if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev)) 3808 3802 phy_leds_unregister(phydev); 3803 + 3804 + if (!phydev->is_on_sfp_module) 3805 + phy_led_triggers_unregister(phydev); 3809 3806 3810 3807 phydev->state = PHY_DOWN; 3811 3808
+1 -1
drivers/net/phy/qcom/qca807x.c
··· 375 375 reg = QCA807X_MMD7_LED_FORCE_CTRL(offset); 376 376 val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg); 377 377 378 - return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val); 378 + return !!FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val); 379 379 } 380 380 381 381 static int qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+21 -5
drivers/net/team/team_core.c
··· 1290 1290 1291 1291 static void __team_port_change_port_removed(struct team_port *port); 1292 1292 1293 - static int team_port_del(struct team *team, struct net_device *port_dev) 1293 + static int team_port_del(struct team *team, struct net_device *port_dev, bool unregister) 1294 1294 { 1295 1295 struct net_device *dev = team->dev; 1296 1296 struct team_port *port; ··· 1328 1328 __team_port_change_port_removed(port); 1329 1329 1330 1330 team_port_set_orig_dev_addr(port); 1331 - dev_set_mtu(port_dev, port->orig.mtu); 1331 + if (unregister) { 1332 + netdev_lock_ops(port_dev); 1333 + __netif_set_mtu(port_dev, port->orig.mtu); 1334 + netdev_unlock_ops(port_dev); 1335 + } else { 1336 + dev_set_mtu(port_dev, port->orig.mtu); 1337 + } 1332 1338 kfree_rcu(port, rcu); 1333 1339 netdev_info(dev, "Port device %s removed\n", portname); 1334 1340 netdev_compute_master_upper_features(team->dev, true); ··· 1638 1632 ASSERT_RTNL(); 1639 1633 1640 1634 list_for_each_entry_safe(port, tmp, &team->port_list, list) 1641 - team_port_del(team, port->dev); 1635 + team_port_del(team, port->dev, false); 1642 1636 1643 1637 __team_change_mode(team, NULL); /* cleanup */ 1644 1638 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); ··· 1937 1931 1938 1932 ASSERT_RTNL(); 1939 1933 1940 - return team_port_del(team, port_dev); 1934 + return team_port_del(team, port_dev, false); 1935 + } 1936 + 1937 + static int team_del_slave_on_unregister(struct net_device *dev, struct net_device *port_dev) 1938 + { 1939 + struct team *team = netdev_priv(dev); 1940 + 1941 + ASSERT_RTNL(); 1942 + 1943 + return team_port_del(team, port_dev, true); 1941 1944 } 1942 1945 1943 1946 static netdev_features_t team_fix_features(struct net_device *dev, ··· 2939 2924 !!netif_oper_up(port->dev)); 2940 2925 break; 2941 2926 case NETDEV_UNREGISTER: 2942 - team_del_slave(port->team->dev, dev); 2927 + team_del_slave_on_unregister(port->team->dev, dev); 2943 2928 break; 2944 2929 case NETDEV_FEAT_CHANGE: 2945 2930 if (!port->team->notifier_ctx) { ··· 3012 2997 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); 3013 2998 MODULE_DESCRIPTION("Ethernet team device driver"); 3014 2999 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 3000 + MODULE_IMPORT_NS("NETDEV_INTERNAL");
+7
drivers/net/usb/kalmia.c
··· 132 132 { 133 133 int status; 134 134 u8 ethernet_addr[ETH_ALEN]; 135 + static const u8 ep_addr[] = { 136 + 1 | USB_DIR_IN, 137 + 2 | USB_DIR_OUT, 138 + 0}; 135 139 136 140 /* Don't bind to AT command interface */ 137 141 if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) 138 142 return -EINVAL; 143 + 144 + if (!usb_check_bulk_endpoints(intf, ep_addr)) 145 + return -ENODEV; 139 146 140 147 dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK); 141 148 dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
+13 -2
drivers/net/usb/kaweth.c
··· 765 765 766 766 netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap); 767 767 768 - netif_stop_queue(net); 769 768 770 769 if (net->flags & IFF_PROMISC) { 771 770 packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; ··· 774 775 } 775 776 776 777 kaweth->packet_filter_bitmap = packet_filter_bitmap; 777 - netif_wake_queue(net); 778 778 } 779 779 780 780 /**************************************************************** ··· 883 885 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 884 886 int result = 0; 885 887 int rv = -EIO; 888 + static const u8 bulk_ep_addr[] = { 889 + 1 | USB_DIR_IN, 890 + 2 | USB_DIR_OUT, 891 + 0}; 892 + static const u8 int_ep_addr[] = { 893 + 3 | USB_DIR_IN, 894 + 0}; 886 895 887 896 dev_dbg(dev, 888 897 "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", ··· 902 897 dev_dbg(dev, "Descriptor length: %x type: %x\n", 903 898 (int)udev->descriptor.bLength, 904 899 (int)udev->descriptor.bDescriptorType); 900 + 901 + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || 902 + !usb_check_int_endpoints(intf, int_ep_addr)) { 903 + dev_err(dev, "couldn't find required endpoints\n"); 904 + return -ENODEV; 905 + } 905 906 906 907 netdev = alloc_etherdev(sizeof(*kaweth)); 907 908 if (!netdev)
-2
drivers/net/usb/lan78xx.c
··· 2094 2094 dev->mdiobus->phy_mask = ~(1 << 1); 2095 2095 break; 2096 2096 case ID_REV_CHIP_ID_7801_: 2097 - /* scan thru PHYAD[2..0] */ 2098 - dev->mdiobus->phy_mask = ~(0xFF); 2099 2097 break; 2100 2098 } 2101 2099
+42 -6
drivers/net/usb/pegasus.c
··· 28 28 BMSR_100FULL | BMSR_ANEGCAPABLE) 29 29 #define CARRIER_CHECK_DELAY (2 * HZ) 30 30 31 + /* 32 + * USB endpoints. 33 + */ 34 + 35 + enum pegasus_usb_ep { 36 + PEGASUS_USB_EP_CONTROL = 0, 37 + PEGASUS_USB_EP_BULK_IN = 1, 38 + PEGASUS_USB_EP_BULK_OUT = 2, 39 + PEGASUS_USB_EP_INT_IN = 3, 40 + }; 41 + 31 42 static bool loopback; 32 43 static bool mii_mode; 33 44 static char *devid; ··· 553 542 goto tl_sched; 554 543 goon: 555 544 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 556 - usb_rcvbulkpipe(pegasus->usb, 1), 545 + usb_rcvbulkpipe(pegasus->usb, PEGASUS_USB_EP_BULK_IN), 557 546 pegasus->rx_skb->data, PEGASUS_MTU, 558 547 read_bulk_callback, pegasus); 559 548 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); ··· 593 582 return; 594 583 } 595 584 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 596 - usb_rcvbulkpipe(pegasus->usb, 1), 585 + usb_rcvbulkpipe(pegasus->usb, PEGASUS_USB_EP_BULK_IN), 597 586 pegasus->rx_skb->data, PEGASUS_MTU, 598 587 read_bulk_callback, pegasus); 599 588 try_again: ··· 721 710 ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); 722 711 skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); 723 712 usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, 724 - usb_sndbulkpipe(pegasus->usb, 2), 713 + usb_sndbulkpipe(pegasus->usb, PEGASUS_USB_EP_BULK_OUT), 725 714 pegasus->tx_buff, count, 726 715 write_bulk_callback, pegasus); 727 716 if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) { ··· 812 801 813 802 static int alloc_urbs(pegasus_t *pegasus) 814 803 { 804 + static const u8 bulk_ep_addr[] = { 805 + 1 | USB_DIR_IN, 806 + 2 | USB_DIR_OUT, 807 + 0}; 808 + static const u8 int_ep_addr[] = { 809 + 3 | USB_DIR_IN, 810 + 0}; 815 811 int res = -ENOMEM; 812 + 813 + if (!usb_check_bulk_endpoints(pegasus->intf, bulk_ep_addr) || 814 + !usb_check_int_endpoints(pegasus->intf, int_ep_addr)) 815 + return -ENODEV; 816 816 817 817 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 818 818 if (!pegasus->rx_urb) { ··· 859 837 set_registers(pegasus, EthID, 6, net->dev_addr); 860 838 861 839 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 862 - usb_rcvbulkpipe(pegasus->usb, 1), 840 + usb_rcvbulkpipe(pegasus->usb, PEGASUS_USB_EP_BULK_IN), 863 841 pegasus->rx_skb->data, PEGASUS_MTU, 864 842 read_bulk_callback, pegasus); 865 843 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { ··· 870 848 } 871 849 872 850 usb_fill_int_urb(pegasus->intr_urb, pegasus->usb, 873 - usb_rcvintpipe(pegasus->usb, 3), 851 + usb_rcvintpipe(pegasus->usb, PEGASUS_USB_EP_INT_IN), 874 852 pegasus->intr_buff, sizeof(pegasus->intr_buff), 875 853 intr_callback, pegasus, pegasus->intr_interval); 876 854 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { ··· 1155 1133 pegasus_t *pegasus; 1156 1134 int dev_index = id - pegasus_ids; 1157 1135 int res = -ENOMEM; 1136 + static const u8 bulk_ep_addr[] = { 1137 + PEGASUS_USB_EP_BULK_IN | USB_DIR_IN, 1138 + PEGASUS_USB_EP_BULK_OUT | USB_DIR_OUT, 1139 + 0}; 1140 + static const u8 int_ep_addr[] = { 1141 + PEGASUS_USB_EP_INT_IN | USB_DIR_IN, 1142 + 0}; 1158 1143 1159 1144 if (pegasus_blacklisted(dev)) 1160 1145 return -ENODEV; 1146 + 1147 + /* Verify that all required endpoints are present */ 1148 + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || 1149 + !usb_check_int_endpoints(intf, int_ep_addr)) { 1150 + dev_err(&intf->dev, "Missing or invalid endpoints\n"); 1151 + return -ENODEV; 1152 + } 1161 1153 1162 1154 net = alloc_etherdev(sizeof(struct pegasus)); 1163 1155 if (!net) ··· 1179 1143 1180 1144 pegasus = netdev_priv(net); 1181 1145 pegasus->dev_index = dev_index; 1146 + pegasus->intf = intf; 1182 1147 1183 1148 res = alloc_urbs(pegasus); 1184 1149 if (res < 0) { ··· 1191 1154 1192 1155 INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); 1193 1156 1194 - pegasus->intf = intf; 1195 1157 pegasus->usb = dev; 1196 1158 pegasus->net = net; 1197 1159
+2
drivers/net/wan/farsync.c
··· 2550 2550 2551 2551 fst_disable_intr(card); 2552 2552 free_irq(card->irq, card); 2553 + tasklet_kill(&fst_tx_task); 2554 + tasklet_kill(&fst_int_task); 2553 2555 2554 2556 iounmap(card->ctlmem); 2555 2557 iounmap(card->mem);
+3 -4
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
··· 951 951 goto out; 952 952 953 953 /* try to attach to the target device */ 954 - sdiodev->bus = brcmf_sdio_probe(sdiodev); 955 - if (IS_ERR(sdiodev->bus)) { 956 - ret = PTR_ERR(sdiodev->bus); 954 + ret = brcmf_sdio_probe(sdiodev); 955 + if (ret) 957 956 goto out; 958 - } 957 + 959 958 brcmf_sdiod_host_fixup(sdiodev->func2->card->host); 960 959 out: 961 960 if (ret)
+4 -3
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 4445 4445 return fwreq; 4446 4446 } 4447 4447 4448 - struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4448 + int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4449 4449 { 4450 4450 int ret; 4451 4451 struct brcmf_sdio *bus; ··· 4551 4551 goto fail; 4552 4552 } 4553 4553 4554 - return bus; 4554 + return 0; 4555 4555 4556 4556 fail: 4557 4557 brcmf_sdio_remove(bus); 4558 - return ERR_PTR(ret); 4558 + sdiodev->bus = NULL; 4559 + return ret; 4559 4560 } 4560 4561 4561 4562 /* Detach and free everything */
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
··· 358 358 int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev); 359 359 int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev); 360 360 361 - struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); 361 + int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); 362 362 void brcmf_sdio_remove(struct brcmf_sdio *bus); 363 363 void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr); 364 364
+2 -2
drivers/net/wireless/marvell/libertas/main.c
··· 799 799 { 800 800 lbs_free_cmd_buffer(priv); 801 801 kfifo_free(&priv->event_fifo); 802 - timer_delete(&priv->command_timer); 803 - timer_delete(&priv->tx_lockup_timer); 802 + timer_delete_sync(&priv->command_timer); 803 + timer_delete_sync(&priv->tx_lockup_timer); 804 804 } 805 805 806 806 static const struct net_device_ops lbs_netdev_ops = {
+1 -1
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 3148 3148 SET_NETDEV_DEV(dev, adapter->dev); 3149 3149 3150 3150 ret = dev_alloc_name(dev, name); 3151 - if (ret) 3151 + if (ret < 0) 3152 3152 goto err_alloc_name; 3153 3153 3154 3154 priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC-%s",
+1
drivers/nfc/pn533/usb.c
··· 628 628 usb_free_urb(phy->out_urb); 629 629 usb_free_urb(phy->ack_urb); 630 630 kfree(phy->ack_buffer); 631 + usb_put_dev(phy->udev); 631 632 632 633 nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); 633 634 }
+16 -9
drivers/pci/controller/dwc/pcie-designware-ep.c
··· 905 905 * supported, so we avoid reprogramming the region on every MSI, 906 906 * specifically unmapping immediately after writel(). 907 907 */ 908 + if (ep->msi_iatu_mapped && (ep->msi_msg_addr != msg_addr || 909 + ep->msi_map_size != map_size)) { 910 + /* 911 + * The host changed the MSI target address or the required 912 + * mapping size changed. Reprogramming the iATU when there are 913 + * operations in flight is unsafe on this controller. However, 914 + * there is no unified way to check if we have operations in 915 + * flight, thus we don't know if we should WARN() or not. 916 + */ 917 + dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 918 + ep->msi_iatu_mapped = false; 919 + } 920 + 908 921 if (!ep->msi_iatu_mapped) { 909 922 ret = dw_pcie_ep_map_addr(epc, func_no, 0, 910 923 ep->msi_mem_phys, msg_addr, ··· 928 915 ep->msi_iatu_mapped = true; 929 916 ep->msi_msg_addr = msg_addr; 930 917 ep->msi_map_size = map_size; 931 - } else if (WARN_ON_ONCE(ep->msi_msg_addr != msg_addr || 932 - ep->msi_map_size != map_size)) { 933 - /* 934 - * The host changed the MSI target address or the required 935 - * mapping size changed. Reprogramming the iATU at runtime is 936 - * unsafe on this controller, so bail out instead of trying to 937 - * update the existing region. 938 - */ 939 - return -EINVAL; 940 918 } 941 919 942 920 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); ··· 1013 1009 return ret; 1014 1010 1015 1011 writel(msg_data, ep->msi_mem + offset); 1012 + 1013 + /* flush posted write before unmap */ 1014 + readl(ep->msi_mem + offset); 1016 1015 1017 1016 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 1018 1017
+3 -1
drivers/pmdomain/imx/gpcv2.c
··· 1416 1416 1417 1417 static int imx_pgc_domain_resume(struct device *dev) 1418 1418 { 1419 - return pm_runtime_put(dev); 1419 + pm_runtime_put(dev); 1420 + 1421 + return 0; 1420 1422 } 1421 1423 #endif 1422 1424
+1 -1
drivers/regulator/Kconfig
··· 508 508 This driver supports the FP9931/JD9930 voltage regulator chip 509 509 which is used to provide power to Electronic Paper Displays 510 510 so it is found in E-Book readers. 511 - If HWWON is enabled, it also provides temperature measurement. 511 + If HWMON is enabled, it also provides temperature measurement. 512 512 513 513 config REGULATOR_LM363X 514 514 tristate "TI LM363X voltage regulators"
+1 -2
drivers/regulator/bq257xx-regulator.c
··· 115 115 return; 116 116 117 117 subchild = of_get_child_by_name(child, pdata->desc.of_match); 118 + of_node_put(child); 118 119 if (!subchild) 119 120 return; 120 - 121 - of_node_put(child); 122 121 123 122 pdata->otg_en_gpio = devm_fwnode_gpiod_get_index(&pdev->dev, 124 123 of_fwnode_handle(subchild),
+3 -4
drivers/regulator/fp9931.c
··· 144 144 return ret; 145 145 146 146 ret = regmap_read(data->regmap, FP9931_REG_TMST_VALUE, &val); 147 - if (ret) 148 - return ret; 147 + if (!ret) 148 + *temp = (s8)val * 1000; 149 149 150 150 pm_runtime_put_autosuspend(data->dev); 151 - *temp = (s8)val * 1000; 152 151 153 - return 0; 152 + return ret; 154 153 } 155 154 156 155 static umode_t fp9931_hwmon_is_visible(const void *data,
+3
drivers/regulator/tps65185.c
··· 332 332 int i; 333 333 334 334 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); 335 + if (!data) 336 + return -ENOMEM; 337 + 335 338 data->regmap = devm_regmap_init_i2c(client, &regmap_config); 336 339 if (IS_ERR(data->regmap)) 337 340 return dev_err_probe(&client->dev, PTR_ERR(data->regmap),
+2
drivers/scsi/lpfc/lpfc_init.c
··· 12025 12025 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12026 12026 if (phba->sli4_hba.dpp_regs_memmap_p) 12027 12027 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 12028 + if (phba->sli4_hba.dpp_regs_memmap_wc_p) 12029 + iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p); 12028 12030 break; 12029 12031 case LPFC_SLI_INTF_IF_TYPE_1: 12030 12032 break;
+30 -6
drivers/scsi/lpfc/lpfc_sli.c
··· 15977 15977 return NULL; 15978 15978 } 15979 15979 15980 + static __maybe_unused void __iomem * 15981 + lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset) 15982 + { 15983 + 15984 + /* DPP region is supposed to cover 64-bit BAR2 */ 15985 + if (dpp_barset != WQ_PCI_BAR_4_AND_5) { 15986 + lpfc_log_msg(phba, KERN_WARNING, LOG_INIT, 15987 + "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n", 15988 + dpp_barset); 15989 + return NULL; 15990 + } 15991 + 15992 + if (!phba->sli4_hba.dpp_regs_memmap_wc_p) { 15993 + void __iomem *dpp_map; 15994 + 15995 + dpp_map = ioremap_wc(phba->pci_bar2_map, 15996 + pci_resource_len(phba->pcidev, 15997 + PCI_64BIT_BAR4)); 15998 + 15999 + if (dpp_map) 16000 + phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map; 16001 + } 16002 + 16003 + return phba->sli4_hba.dpp_regs_memmap_wc_p; 16004 + } 16005 + 15980 16006 /** 15981 16007 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 15982 16008 * @phba: HBA structure that EQs are on. ··· 16966 16940 uint8_t dpp_barset; 16967 16941 uint32_t dpp_offset; 16968 16942 uint8_t wq_create_version; 16969 - #ifdef CONFIG_X86 16970 - unsigned long pg_addr; 16971 - #endif 16972 16943 16973 16944 /* sanity check on queue memory */ 16974 16945 if (!wq || !cq) ··· 17151 17128 17152 17129 #ifdef CONFIG_X86 17153 17130 /* Enable combined writes for DPP aperture */ 17154 - pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 17155 - rc = set_memory_wc(pg_addr, 1); 17156 - if (rc) { 17131 + bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset); 17132 + if (!bar_memmap_p) { 17157 17133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17158 17134 "3272 Cannot setup Combined " 17159 17135 "Write on WQ[%d] - disable DPP\n", 17160 17136 wq->queue_id); 17161 17137 phba->cfg_enable_dpp = 0; 17138 + } else { 17139 + wq->dpp_regaddr = bar_memmap_p + dpp_offset; 17162 17140 } 17163 17141 #else 17164 17142 phba->cfg_enable_dpp = 0;
+3
drivers/scsi/lpfc/lpfc_sli4.h
··· 785 785 void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for 786 786 * dpp registers 787 787 */ 788 + void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for 789 + * dpp registers with write combining 790 + */ 788 791 union { 789 792 struct { 790 793 /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+18 -14
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 4807 4807 } 4808 4808 4809 4809 for (i = 0; i < mrioc->num_queues; i++) { 4810 - mrioc->op_reply_qinfo[i].qid = 0; 4811 - mrioc->op_reply_qinfo[i].ci = 0; 4812 - mrioc->op_reply_qinfo[i].num_replies = 0; 4813 - mrioc->op_reply_qinfo[i].ephase = 0; 4814 - atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4815 - atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4816 - mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4810 + if (mrioc->op_reply_qinfo) { 4811 + mrioc->op_reply_qinfo[i].qid = 0; 4812 + mrioc->op_reply_qinfo[i].ci = 0; 4813 + mrioc->op_reply_qinfo[i].num_replies = 0; 4814 + mrioc->op_reply_qinfo[i].ephase = 0; 4815 + atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4816 + atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4817 + mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4818 + } 4817 4819 4818 - mrioc->req_qinfo[i].ci = 0; 4819 - mrioc->req_qinfo[i].pi = 0; 4820 - mrioc->req_qinfo[i].num_requests = 0; 4821 - mrioc->req_qinfo[i].qid = 0; 4822 - mrioc->req_qinfo[i].reply_qid = 0; 4823 - spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4824 - mpi3mr_memset_op_req_q_buffers(mrioc, i); 4820 + if (mrioc->req_qinfo) { 4821 + mrioc->req_qinfo[i].ci = 0; 4822 + mrioc->req_qinfo[i].pi = 0; 4823 + mrioc->req_qinfo[i].num_requests = 0; 4824 + mrioc->req_qinfo[i].qid = 0; 4825 + mrioc->req_qinfo[i].reply_qid = 0; 4826 + spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4827 + mpi3mr_memset_op_req_q_buffers(mrioc, i); 4828 + } 4825 4829 } 4826 4830 4827 4831 atomic_set(&mrioc->pend_large_data_sz, 0);
+3 -2
drivers/scsi/pm8001/pm8001_sas.c
··· 525 525 } else { 526 526 task->task_done(task); 527 527 } 528 - rc = -ENODEV; 529 - goto err_out; 528 + spin_unlock_irqrestore(&pm8001_ha->lock, flags); 529 + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n"); 530 + return 0; 530 531 } 531 532 532 533 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+2 -3
drivers/scsi/ses.c
··· 528 528 }; 529 529 530 530 static int ses_enclosure_find_by_addr(struct enclosure_device *edev, 531 - void *data) 531 + struct efd *efd) 532 532 { 533 - struct efd *efd = data; 534 533 int i; 535 534 struct ses_component *scomp; 536 535 ··· 682 683 if (efd.addr) { 683 684 efd.dev = &sdev->sdev_gendev; 684 685 685 - enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); 686 + ses_enclosure_find_by_addr(edev, &efd); 686 687 } 687 688 } 688 689
-9
drivers/scsi/snic/vnic_dev.c
··· 42 42 struct vnic_devcmd_notify *notify; 43 43 struct vnic_devcmd_notify notify_copy; 44 44 dma_addr_t notify_pa; 45 - u32 *linkstatus; 46 - dma_addr_t linkstatus_pa; 47 45 struct vnic_stats *stats; 48 46 dma_addr_t stats_pa; 49 47 struct vnic_devcmd_fw_info *fw_info; ··· 648 650 649 651 int svnic_dev_link_status(struct vnic_dev *vdev) 650 652 { 651 - if (vdev->linkstatus) 652 - return *vdev->linkstatus; 653 653 654 654 if (!vnic_dev_notify_ready(vdev)) 655 655 return 0; ··· 682 686 sizeof(struct vnic_devcmd_notify), 683 687 vdev->notify, 684 688 vdev->notify_pa); 685 - if (vdev->linkstatus) 686 - dma_free_coherent(&vdev->pdev->dev, 687 - sizeof(u32), 688 - vdev->linkstatus, 689 - vdev->linkstatus_pa); 690 689 if (vdev->stats) 691 690 dma_free_coherent(&vdev->pdev->dev, 692 691 sizeof(struct vnic_stats),
+3 -2
drivers/scsi/storvsc_drv.c
··· 1856 1856 cmd_request->payload_sz = payload_sz; 1857 1857 1858 1858 /* Invokes the vsc to start an IO */ 1859 - ret = storvsc_do_io(dev, cmd_request, get_cpu()); 1860 - put_cpu(); 1859 + migrate_disable(); 1860 + ret = storvsc_do_io(dev, cmd_request, smp_processor_id()); 1861 + migrate_enable(); 1861 1862 1862 1863 if (ret) 1863 1864 scsi_dma_unmap(scmnd);
+3
drivers/spi/spi-stm32.c
··· 1625 1625 return -EINVAL; 1626 1626 } 1627 1627 1628 + *rx_mdma_desc = _mdma_desc; 1629 + *rx_dma_desc = _dma_desc; 1630 + 1628 1631 return 0; 1629 1632 } 1630 1633
+34 -13
drivers/ufs/core/ufshcd.c
··· 24 24 #include <linux/pm_opp.h> 25 25 #include <linux/regulator/consumer.h> 26 26 #include <linux/sched/clock.h> 27 + #include <linux/sizes.h> 27 28 #include <linux/iopoll.h> 28 29 #include <scsi/scsi_cmnd.h> 29 30 #include <scsi/scsi_dbg.h> ··· 518 517 519 518 if (hba->mcq_enabled) { 520 519 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 521 - 522 - hwq_id = hwq->id; 520 + if (hwq) 521 + hwq_id = hwq->id; 523 522 } else { 524 523 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 525 524 } ··· 4390 4389 spin_unlock_irqrestore(hba->host->host_lock, flags); 4391 4390 mutex_unlock(&hba->uic_cmd_mutex); 4392 4391 4393 - /* 4394 - * If the h8 exit fails during the runtime resume process, it becomes 4395 - * stuck and cannot be recovered through the error handler. To fix 4396 - * this, use link recovery instead of the error handler. 4397 - */ 4398 - if (ret && hba->pm_op_in_progress) 4399 - ret = ufshcd_link_recovery(hba); 4400 - 4401 4392 return ret; 4402 4393 } 4403 4394 ··· 5242 5249 hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE]; 5243 5250 hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE]; 5244 5251 hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE]; 5252 + 5253 + if (hba->dev_info.wspecversion <= 0x0220) { 5254 + /* 5255 + * These older spec chips have only one RPMB region, 5256 + * sized between 128 kB minimum and 16 MB maximum. 5257 + * No per region size fields are provided (respective 5258 + * REGIONX_SIZE fields always contain zeros), so get 5259 + * it from the logical block count and size fields for 5260 + * compatibility 5261 + * 5262 + * (See JESD220C-2_2 Section 14.1.4.6 5263 + * RPMB Unit Descriptor,* offset 13h, 4 bytes) 5264 + */ 5265 + hba->dev_info.rpmb_region_size[0] = 5266 + (get_unaligned_be64(desc_buf 5267 + + RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT) 5268 + << desc_buf[RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE]) 5269 + / SZ_128K; 5270 + } 5245 5271 } 5246 5272 5247 5273 ··· 5975 5963 5976 5964 hba->auto_bkops_enabled = false; 5977 5965 trace_ufshcd_auto_bkops_state(hba, "Disabled"); 5966 + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 5978 5967 hba->is_urgent_bkops_lvl_checked = false; 5979 5968 out: 5980 5969 return err; ··· 6079 6066 * impacted or critical. Handle these device by determining their urgent 6080 6067 * bkops status at runtime. 6081 6068 */ 6082 - if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 6069 + if ((curr_status > BKOPS_STATUS_NO_OP) && (curr_status < BKOPS_STATUS_PERF_IMPACT)) { 6083 6070 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 6084 6071 __func__, curr_status); 6085 6072 /* update the current status as the urgent bkops level */ ··· 7110 7097 7111 7098 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 7112 7099 if (ret) 7113 - outstanding_cqs = (1U << hba->nr_hw_queues) - 1; 7100 + outstanding_cqs = (1ULL << hba->nr_hw_queues) - 1; 7114 7101 7115 7102 /* Exclude the poll queues */ 7116 7103 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; ··· 10192 10179 } else { 10193 10180 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 10194 10181 __func__, ret); 10195 - goto vendor_suspend; 10182 + /* 10183 + * If the h8 exit fails during the runtime resume 10184 + * process, it becomes stuck and cannot be recovered 10185 + * through the error handler. To fix this, use link 10186 + * recovery instead of the error handler. 10187 + */ 10188 + ret = ufshcd_link_recovery(hba); 10189 + if (ret) 10190 + goto vendor_suspend; 10196 10191 } 10197 10192 } else if (ufshcd_is_link_off(hba)) { 10198 10193 /*
+2 -1
fs/binfmt_elf.c
··· 47 47 #include <linux/dax.h> 48 48 #include <linux/uaccess.h> 49 49 #include <uapi/linux/rseq.h> 50 + #include <linux/rseq.h> 50 51 #include <asm/param.h> 51 52 #include <asm/page.h> 52 53 ··· 287 286 } 288 287 #ifdef CONFIG_RSEQ 289 288 NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end)); 290 - NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq)); 289 + NEW_AUX_ENT(AT_RSEQ_ALIGN, rseq_alloc_align()); 291 290 #endif 292 291 #undef NEW_AUX_ENT 293 292 /* AT_NULL is zero; clear the rest too */
+6 -1
fs/erofs/inode.c
··· 222 222 223 223 static int erofs_fill_inode(struct inode *inode) 224 224 { 225 + const struct address_space_operations *aops; 225 226 int err; 226 227 227 228 trace_erofs_fill_inode(inode); ··· 255 254 } 256 255 257 256 mapping_set_large_folios(inode->i_mapping); 258 - return erofs_inode_set_aops(inode, inode, false); 257 + aops = erofs_get_aops(inode, false); 258 + if (IS_ERR(aops)) 259 + return PTR_ERR(aops); 260 + inode->i_mapping->a_ops = aops; 261 + return 0; 259 262 } 260 263 261 264 /*
+7 -9
fs/erofs/internal.h
··· 471 471 return NULL; 472 472 } 473 473 474 - static inline int erofs_inode_set_aops(struct inode *inode, 475 - struct inode *realinode, bool no_fscache) 474 + static inline const struct address_space_operations * 475 + erofs_get_aops(struct inode *realinode, bool no_fscache) 476 476 { 477 477 if (erofs_inode_is_data_compressed(EROFS_I(realinode)->datalayout)) { 478 478 if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP)) 479 - return -EOPNOTSUPP; 479 + return ERR_PTR(-EOPNOTSUPP); 480 480 DO_ONCE_LITE_IF(realinode->i_blkbits != PAGE_SHIFT, 481 481 erofs_info, realinode->i_sb, 482 482 "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); 483 - inode->i_mapping->a_ops = &z_erofs_aops; 484 - return 0; 483 + return &z_erofs_aops; 485 484 } 486 - inode->i_mapping->a_ops = &erofs_aops; 487 485 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !no_fscache && 488 486 erofs_is_fscache_mode(realinode->i_sb)) 489 - inode->i_mapping->a_ops = &erofs_fscache_access_aops; 487 + return &erofs_fscache_access_aops; 490 488 if (IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && 491 489 erofs_is_fileio_mode(EROFS_SB(realinode->i_sb))) 492 - inode->i_mapping->a_ops = &erofs_fileio_aops; 493 - return 0; 490 + return &erofs_fileio_aops; 491 + return &erofs_aops; 494 492 } 495 493 496 494 int erofs_register_sysfs(struct super_block *sb);
+9 -5
fs/erofs/ishare.c
··· 40 40 { 41 41 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 42 42 struct erofs_inode *vi = EROFS_I(inode); 43 + const struct address_space_operations *aops; 43 44 struct erofs_inode_fingerprint fp; 44 45 struct inode *sharedinode; 45 46 unsigned long hash; 46 47 48 + aops = erofs_get_aops(inode, true); 49 + if (IS_ERR(aops)) 50 + return false; 47 51 if (erofs_xattr_fill_inode_fingerprint(&fp, inode, sbi->domain_id)) 48 52 return false; 49 53 hash = xxh32(fp.opaque, fp.size, 0); ··· 60 56 } 61 57 62 58 if (inode_state_read_once(sharedinode) & I_NEW) { 63 - if (erofs_inode_set_aops(sharedinode, inode, true)) { 64 - iget_failed(sharedinode); 65 - kfree(fp.opaque); 66 - return false; 67 - } 59 + sharedinode->i_mapping->a_ops = aops; 68 60 sharedinode->i_size = vi->vfs_inode.i_size; 69 61 unlock_new_inode(sharedinode); 70 62 } else { 71 63 kfree(fp.opaque); 64 + if (aops != sharedinode->i_mapping->a_ops) { 65 + iput(sharedinode); 66 + return false; 67 + } 72 68 if (sharedinode->i_size != vi->vfs_inode.i_size) { 73 69 _erofs_printk(inode->i_sb, KERN_WARNING 74 70 "size(%lld:%lld) not matches for the same fingerprint\n",
+35 -48
fs/erofs/super.c
··· 424 424 425 425 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 426 426 { 427 - #ifdef CONFIG_FS_DAX 428 - struct erofs_sb_info *sbi = fc->s_fs_info; 427 + if (IS_ENABLED(CONFIG_FS_DAX)) { 428 + struct erofs_sb_info *sbi = fc->s_fs_info; 429 429 430 - switch (mode) { 431 - case EROFS_MOUNT_DAX_ALWAYS: 432 - set_opt(&sbi->opt, DAX_ALWAYS); 433 - clear_opt(&sbi->opt, DAX_NEVER); 434 - return true; 435 - case EROFS_MOUNT_DAX_NEVER: 436 - set_opt(&sbi->opt, DAX_NEVER); 437 - clear_opt(&sbi->opt, DAX_ALWAYS); 438 - return true; 439 - default: 430 + if (mode == EROFS_MOUNT_DAX_ALWAYS) { 431 + set_opt(&sbi->opt, DAX_ALWAYS); 432 + clear_opt(&sbi->opt, DAX_NEVER); 433 + return true; 434 + } else if (mode == EROFS_MOUNT_DAX_NEVER) { 435 + set_opt(&sbi->opt, DAX_NEVER); 436 + clear_opt(&sbi->opt, DAX_ALWAYS); 437 + return true; 438 + } 440 439 DBG_BUGON(1); 441 440 return false; 442 441 } 443 - #else 444 442 errorfc(fc, "dax options not supported"); 445 443 return false; 446 - #endif 447 444 } 448 445 449 446 static int erofs_fc_parse_param(struct fs_context *fc, ··· 457 460 458 461 switch (opt) { 459 462 case Opt_user_xattr: 460 - #ifdef CONFIG_EROFS_FS_XATTR 461 - if (result.boolean) 463 + if (!IS_ENABLED(CONFIG_EROFS_FS_XATTR)) 464 + errorfc(fc, "{,no}user_xattr options not supported"); 465 + else if (result.boolean) 462 466 set_opt(&sbi->opt, XATTR_USER); 463 467 else 464 468 clear_opt(&sbi->opt, XATTR_USER); 465 - #else 466 - errorfc(fc, "{,no}user_xattr options not supported"); 467 - #endif 468 469 break; 469 470 case Opt_acl: 470 - #ifdef CONFIG_EROFS_FS_POSIX_ACL 471 - if (result.boolean) 471 + if (!IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL)) 472 + errorfc(fc, "{,no}acl options not supported"); 473 + else if (result.boolean) 472 474 set_opt(&sbi->opt, POSIX_ACL); 473 475 else 474 476 clear_opt(&sbi->opt, POSIX_ACL); 475 - #else 476 - errorfc(fc, "{,no}acl options not supported"); 477 - #endif 478 477 break; 479 478 case Opt_cache_strategy: 480 - #ifdef CONFIG_EROFS_FS_ZIP 481 - sbi->opt.cache_strategy = result.uint_32; 482 - #else 483 - errorfc(fc, "compression not supported, cache_strategy ignored"); 484 - #endif 479 + if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP)) 480 + errorfc(fc, "compression not supported, cache_strategy ignored"); 481 + else 482 + sbi->opt.cache_strategy = result.uint_32; 485 483 break; 486 484 case Opt_dax: 487 485 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) ··· 525 533 break; 526 534 #endif 527 535 case Opt_directio: 528 - #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE 529 - if (result.boolean) 536 + if (!IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE)) 537 + errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 538 + else if (result.boolean) 530 539 set_opt(&sbi->opt, DIRECT_IO); 531 540 else 532 541 clear_opt(&sbi->opt, DIRECT_IO); 533 - #else 534 - errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 535 - #endif 536 542 break; 537 543 case Opt_fsoffset: 538 544 sbi->dif0.fsoff = result.uint_64; 539 545 break; 540 546 case Opt_inode_share: 541 - #ifdef CONFIG_EROFS_FS_PAGE_CACHE_SHARE 542 - set_opt(&sbi->opt, INODE_SHARE); 543 - #else 544 - errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 545 - #endif 547 + if (!IS_ENABLED(CONFIG_EROFS_FS_PAGE_CACHE_SHARE)) 548 + errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 549 + else 550 + set_opt(&sbi->opt, INODE_SHARE); 546 551 break; 547 552 } 548 553 return 0; ··· 798 809 ret = get_tree_bdev_flags(fc, erofs_fc_fill_super, 799 810 IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ? 800 811 GET_TREE_BDEV_QUIET_LOOKUP : 0); 801 - #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE 802 - if (ret == -ENOTBLK) { 812 + if (IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && ret == -ENOTBLK) { 803 813 struct file *file; 804 814 805 815 if (!fc->source) ··· 812 824 sbi->dif0.file->f_mapping->a_ops->read_folio) 813 825 return get_tree_nodev(fc, erofs_fc_fill_super); 814 826 } 815 - #endif 816 827 return ret; 817 828 } 818 829 ··· 1095 1108 seq_puts(seq, ",dax=never"); 1096 1109 if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO)) 1097 1110 seq_puts(seq, ",directio"); 1098 - #ifdef CONFIG_EROFS_FS_ONDEMAND 1099 - if (sbi->fsid) 1100 - seq_printf(seq, ",fsid=%s", sbi->fsid); 1101 - if (sbi->domain_id) 1102 - seq_printf(seq, ",domain_id=%s", sbi->domain_id); 1103 - #endif 1111 + if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND)) { 1112 + if (sbi->fsid) 1113 + seq_printf(seq, ",fsid=%s", sbi->fsid); 1114 + if (sbi->domain_id) 1115 + seq_printf(seq, ",domain_id=%s", sbi->domain_id); 1116 + } 1104 1117 if (sbi->dif0.fsoff) 1105 1118 seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff); 1106 1119 if (test_opt(opt, INODE_SHARE))
+5 -4
fs/erofs/zmap.c
··· 513 513 unsigned int recsz = z_erofs_extent_recsize(vi->z_advise); 514 514 erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) + 515 515 vi->inode_isize + vi->xattr_isize), recsz); 516 + unsigned int bmask = sb->s_blocksize - 1; 516 517 bool in_mbox = erofs_inode_in_metabox(inode); 517 518 erofs_off_t lend = inode->i_size; 518 519 erofs_off_t l, r, mid, pa, la, lstart; ··· 597 596 map->m_flags |= EROFS_MAP_MAPPED | 598 597 EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED; 599 598 fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT; 599 + if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL) 600 + map->m_flags |= EROFS_MAP_PARTIAL_REF; 601 + map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK; 600 602 if (fmt) 601 603 map->m_algorithmformat = fmt - 1; 602 - else if (interlaced && !erofs_blkoff(sb, map->m_pa)) 604 + else if (interlaced && !((map->m_pa | map->m_plen) & bmask)) 603 605 map->m_algorithmformat = 604 606 Z_EROFS_COMPRESSION_INTERLACED; 605 607 else 606 608 map->m_algorithmformat = 607 609 Z_EROFS_COMPRESSION_SHIFTED; 608 - if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL) 609 - map->m_flags |= EROFS_MAP_PARTIAL_REF; 610 - map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK; 611 610 } 612 611 } 613 612 map->m_llen = lend - map->m_la;
+3 -2
fs/eventpoll.c
··· 2061 2061 * @ep: the &struct eventpoll to be currently checked. 2062 2062 * @depth: Current depth of the path being checked. 2063 2063 * 2064 - * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep. 2064 + * Return: depth of the subtree, or a value bigger than EP_MAX_NESTS if we found 2065 + * a loop or went too deep. 2065 2066 */ 2066 2067 static int ep_loop_check_proc(struct eventpoll *ep, int depth) 2067 2068 { ··· 2081 2080 struct eventpoll *ep_tovisit; 2082 2081 ep_tovisit = epi->ffd.file->private_data; 2083 2082 if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) 2084 - result = INT_MAX; 2083 + result = EP_MAX_NESTS+1; 2085 2084 else 2086 2085 result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1); 2087 2086 if (result > EP_MAX_NESTS)
+1 -1
fs/file_attr.c
··· 378 378 struct path filepath __free(path_put) = {}; 379 379 unsigned int lookup_flags = 0; 380 380 struct file_attr fattr; 381 - struct file_kattr fa; 381 + struct file_kattr fa = { .flags_valid = true }; /* hint only */ 382 382 int error; 383 383 384 384 BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
+5 -4
fs/fs-writeback.c
··· 198 198 199 199 static bool wb_wait_for_completion_cb(struct wb_completion *done) 200 200 { 201 + unsigned long timeout = sysctl_hung_task_timeout_secs; 201 202 unsigned long waited_secs = (jiffies - done->wait_start) / HZ; 202 203 203 204 done->progress_stamp = jiffies; 204 - if (waited_secs > sysctl_hung_task_timeout_secs) 205 + if (timeout && (waited_secs > timeout)) 205 206 pr_info("INFO: The task %s:%d has been waiting for writeback " 206 207 "completion for more than %lu seconds.", 207 208 current->comm, current->pid, waited_secs); ··· 1955 1954 .range_end = LLONG_MAX, 1956 1955 }; 1957 1956 unsigned long start_time = jiffies; 1957 + unsigned long timeout = sysctl_hung_task_timeout_secs; 1958 1958 long write_chunk; 1959 1959 long total_wrote = 0; /* count both pages and inodes */ 1960 1960 unsigned long dirtied_before = jiffies; ··· 2042 2040 __writeback_single_inode(inode, &wbc); 2043 2041 2044 2042 /* Report progress to inform the hung task detector of the progress. */ 2045 - if (work->done && work->done->progress_stamp && 2046 - (jiffies - work->done->progress_stamp) > HZ * 2047 - sysctl_hung_task_timeout_secs / 2) 2043 + if (work->done && work->done->progress_stamp && timeout && 2044 + (jiffies - work->done->progress_stamp) > HZ * timeout / 2) 2048 2045 wake_up_all(work->done->waitq); 2049 2046 2050 2047 wbc_detach_inode(&wbc);
+1
fs/iomap/buffered-io.c
··· 624 624 * iomap_readahead - Attempt to read pages from a file. 625 625 * @ops: The operations vector for the filesystem. 626 626 * @ctx: The ctx used for issuing readahead. 627 + * @private: The filesystem-specific information for issuing iomap_iter. 627 628 * 628 629 * This function is for filesystems to call to implement their readahead 629 630 * address_space operation.
+46
fs/iomap/ioend.c
··· 69 69 return folio_count; 70 70 } 71 71 72 + static DEFINE_SPINLOCK(failed_ioend_lock); 73 + static LIST_HEAD(failed_ioend_list); 74 + 75 + static void 76 + iomap_fail_ioends( 77 + struct work_struct *work) 78 + { 79 + struct iomap_ioend *ioend; 80 + struct list_head tmp; 81 + unsigned long flags; 82 + 83 + spin_lock_irqsave(&failed_ioend_lock, flags); 84 + list_replace_init(&failed_ioend_list, &tmp); 85 + spin_unlock_irqrestore(&failed_ioend_lock, flags); 86 + 87 + while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, 88 + io_list))) { 89 + list_del_init(&ioend->io_list); 90 + iomap_finish_ioend_buffered(ioend); 91 + cond_resched(); 92 + } 93 + } 94 + 95 + static DECLARE_WORK(failed_ioend_work, iomap_fail_ioends); 96 + 97 + static void iomap_fail_ioend_buffered(struct iomap_ioend *ioend) 98 + { 99 + unsigned long flags; 100 + 101 + /* 102 + * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions 103 + * in the fserror code. The caller no longer owns the ioend reference 104 + * after the spinlock drops. 105 + */ 106 + spin_lock_irqsave(&failed_ioend_lock, flags); 107 + if (list_empty(&failed_ioend_list)) 108 + WARN_ON_ONCE(!schedule_work(&failed_ioend_work)); 109 + list_add_tail(&ioend->io_list, &failed_ioend_list); 110 + spin_unlock_irqrestore(&failed_ioend_lock, flags); 111 + } 112 + 72 113 static void ioend_writeback_end_bio(struct bio *bio) 73 114 { 74 115 struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); 75 116 76 117 ioend->io_error = blk_status_to_errno(bio->bi_status); 118 + if (ioend->io_error) { 119 + iomap_fail_ioend_buffered(ioend); 120 + return; 121 + } 122 + 77 123 iomap_finish_ioend_buffered(ioend); 78 124 } 79 125
+1 -1
fs/minix/bitmap.c
··· 247 247 j += i * bits_per_zone; 248 248 if (!j || j > sbi->s_ninodes) { 249 249 iput(inode); 250 - return ERR_PTR(-ENOSPC); 250 + return ERR_PTR(-EFSCORRUPTED); 251 251 } 252 252 inode_init_owner(&nop_mnt_idmap, inode, dir, mode); 253 253 inode->i_ino = j;
+77 -62
fs/namespace.c
··· 1531 1531 static void *m_start(struct seq_file *m, loff_t *pos) 1532 1532 { 1533 1533 struct proc_mounts *p = m->private; 1534 + struct mount *mnt; 1534 1535 1535 1536 down_read(&namespace_sem); 1536 1537 1537 - return mnt_find_id_at(p->ns, *pos); 1538 + mnt = mnt_find_id_at(p->ns, *pos); 1539 + if (mnt) 1540 + *pos = mnt->mnt_id_unique; 1541 + return mnt; 1538 1542 } 1539 1543 1540 1544 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1541 1545 { 1542 - struct mount *next = NULL, *mnt = v; 1546 + struct mount *mnt = v; 1543 1547 struct rb_node *node = rb_next(&mnt->mnt_node); 1544 1548 1545 - ++*pos; 1546 1549 if (node) { 1547 - next = node_to_mount(node); 1550 + struct mount *next = node_to_mount(node); 1548 1551 *pos = next->mnt_id_unique; 1552 + return next; 1549 1553 } 1550 - return next; 1554 + 1555 + /* 1556 + * No more mounts. Set pos past current mount's ID so that if 1557 + * iteration restarts, mnt_find_id_at() returns NULL. 1558 + */ 1559 + *pos = mnt->mnt_id_unique + 1; 1560 + return NULL; 1551 1561 } 1552 1562 1553 1563 static void m_stop(struct seq_file *m, void *v) ··· 2801 2791 } 2802 2792 2803 2793 static void lock_mount_exact(const struct path *path, 2804 - struct pinned_mountpoint *mp); 2794 + struct pinned_mountpoint *mp, bool copy_mount, 2795 + unsigned int copy_flags); 2805 2796 2806 2797 #define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \ 2807 2798 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ ··· 2810 2799 #define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false) 2811 2800 #define LOCK_MOUNT_EXACT(mp, path) \ 2812 2801 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2813 - lock_mount_exact((path), &mp) 2802 + lock_mount_exact((path), &mp, false, 0) 2803 + #define LOCK_MOUNT_EXACT_COPY(mp, path, copy_flags) \ 2804 + struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2805 + lock_mount_exact((path), &mp, true, (copy_flags)) 2814 2806 2815 2807 static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp) 2816 2808 { ··· 3087 3073 return file; 3088 3074 } 3089 3075 3090 - DEFINE_FREE(put_empty_mnt_ns, struct mnt_namespace *, 3091 - if (!IS_ERR_OR_NULL(_T)) free_mnt_ns(_T)) 3092 - 3093 3076 static struct mnt_namespace *create_new_namespace(struct path *path, unsigned int flags) 3094 3077 { 3095 - struct mnt_namespace *new_ns __free(put_empty_mnt_ns) = NULL; 3096 - struct path to_path __free(path_put) = {}; 3097 3078 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 3098 3079 struct user_namespace *user_ns = current_user_ns(); 3099 - struct mount *new_ns_root; 3080 + struct mnt_namespace *new_ns; 3081 + struct mount *new_ns_root, *old_ns_root; 3082 + struct path to_path; 3100 3083 struct mount *mnt; 3101 3084 unsigned int copy_flags = 0; 3102 3085 bool locked = false; ··· 3105 3094 if (IS_ERR(new_ns)) 3106 3095 return ERR_CAST(new_ns); 3107 3096 3108 - scoped_guard(namespace_excl) { 3109 - new_ns_root = clone_mnt(ns->root, ns->root->mnt.mnt_root, copy_flags); 3110 - if (IS_ERR(new_ns_root)) 3111 - return ERR_CAST(new_ns_root); 3097 + old_ns_root = ns->root; 3098 + to_path.mnt = &old_ns_root->mnt; 3099 + to_path.dentry = old_ns_root->mnt.mnt_root; 3112 3100 3113 - /* 3114 - * If the real rootfs had a locked mount on top of it somewhere 3115 - * in the stack, lock the new mount tree as well so it can't be 3116 - * exposed. 3117 - */ 3118 - mnt = ns->root; 3119 - while (mnt->overmount) { 3120 - mnt = mnt->overmount; 3121 - if (mnt->mnt.mnt_flags & MNT_LOCKED) 3122 - locked = true; 3123 - } 3101 + VFS_WARN_ON_ONCE(old_ns_root->mnt.mnt_sb->s_type != &nullfs_fs_type); 3102 + 3103 + LOCK_MOUNT_EXACT_COPY(mp, &to_path, copy_flags); 3104 + if (IS_ERR(mp.parent)) { 3105 + free_mnt_ns(new_ns); 3106 + return ERR_CAST(mp.parent); 3107 + } 3108 + new_ns_root = mp.parent; 3109 + 3110 + /* 3111 + * If the real rootfs had a locked mount on top of it somewhere 3112 + * in the stack, lock the new mount tree as well so it can't be 3113 + * exposed. 3114 + */ 3115 + mnt = old_ns_root; 3116 + while (mnt->overmount) { 3117 + mnt = mnt->overmount; 3118 + if (mnt->mnt.mnt_flags & MNT_LOCKED) 3119 + locked = true; 3124 3120 } 3125 3121 3126 3122 /* 3127 - * We dropped the namespace semaphore so we can actually lock 3128 - * the copy for mounting. The copied mount isn't attached to any 3129 - * mount namespace and it is thus excluded from any propagation. 3130 - * So realistically we're isolated and the mount can't be 3131 - * overmounted. 3132 - */ 3133 - 3134 - /* Borrow the reference from clone_mnt(). */ 3135 - to_path.mnt = &new_ns_root->mnt; 3136 - to_path.dentry = dget(new_ns_root->mnt.mnt_root); 3137 - 3138 - /* Now lock for actual mounting. */ 3139 - LOCK_MOUNT_EXACT(mp, &to_path); 3140 - if (unlikely(IS_ERR(mp.parent))) 3141 - return ERR_CAST(mp.parent); 3142 - 3143 - /* 3144 - * We don't emulate unshare()ing a mount namespace. We stick to the 3145 - * restrictions of creating detached bind-mounts. It has a lot 3146 - * saner and simpler semantics. 3123 + * We don't emulate unshare()ing a mount namespace. We stick 3124 + * to the restrictions of creating detached bind-mounts. It 3125 + * has a lot saner and simpler semantics. 3147 3126 */ 3148 3127 mnt = __do_loopback(path, flags, copy_flags); 3149 - if (IS_ERR(mnt)) 3150 - return ERR_CAST(mnt); 3151 - 3152 3128 scoped_guard(mount_writer) { 3129 + if (IS_ERR(mnt)) { 3130 + emptied_ns = new_ns; 3131 + umount_tree(new_ns_root, 0); 3132 + return ERR_CAST(mnt); 3133 + } 3134 + 3153 3135 if (locked) 3154 3136 mnt->mnt.mnt_flags |= MNT_LOCKED; 3155 3137 /* 3156 - * Now mount the detached tree on top of the copy of the 3157 - * real rootfs we created. 3138 + * now mount the detached tree on top of the copy 3139 + * of the real rootfs we created. 3158 3140 */ 3159 3141 attach_mnt(mnt, new_ns_root, mp.mp); 3160 3142 if (user_ns != ns->user_ns) 3161 3143 lock_mnt_tree(new_ns_root); 3162 3144 } 3163 3145 3164 - /* Add all mounts to the new namespace. */ 3165 - for (struct mount *p = new_ns_root; p; p = next_mnt(p, new_ns_root)) { 3166 - mnt_add_to_ns(new_ns, p); 3146 + for (mnt = new_ns_root; mnt; mnt = next_mnt(mnt, new_ns_root)) { 3147 + mnt_add_to_ns(new_ns, mnt); 3167 3148 new_ns->nr_mounts++; 3168 3149 } 3169 3150 3170 - new_ns->root = real_mount(no_free_ptr(to_path.mnt)); 3151 + new_ns->root = new_ns_root; 3171 3152 ns_tree_add_raw(new_ns); 3172 - return no_free_ptr(new_ns); 3153 + return new_ns; 3173 3154 } 3174 3155 3175 3156 static struct file *open_new_namespace(struct path *path, unsigned int flags) ··· 3843 3840 } 3844 3841 3845 3842 static void lock_mount_exact(const struct path *path, 3846 - struct pinned_mountpoint *mp) 3843 + struct pinned_mountpoint *mp, bool copy_mount, 3844 + unsigned int copy_flags) 3847 3845 { 3848 3846 struct dentry *dentry = path->dentry; 3849 3847 int err; 3848 + 3849 + /* Assert that inode_lock() locked the correct inode. */ 3850 + VFS_WARN_ON_ONCE(copy_mount && !path_mounted(path)); 3850 3851 3851 3852 inode_lock(dentry->d_inode); 3852 3853 namespace_lock(); 3853 3854 if (unlikely(cant_mount(dentry))) 3854 3855 err = -ENOENT; 3855 - else if (path_overmounted(path)) 3856 + else if (!copy_mount && path_overmounted(path)) 3856 3857 err = -EBUSY; 3857 3858 else 3858 3859 err = get_mountpoint(dentry, mp); ··· 3864 3857 namespace_unlock(); 3865 3858 inode_unlock(dentry->d_inode); 3866 3859 mp->parent = ERR_PTR(err); 3867 - } else { 3868 - mp->parent = real_mount(path->mnt); 3860 + return; 3869 3861 } 3862 + 3863 + if (copy_mount) 3864 + mp->parent = clone_mnt(real_mount(path->mnt), dentry, copy_flags); 3865 + else 3866 + mp->parent = real_mount(path->mnt); 3867 + if (unlikely(IS_ERR(mp->parent))) 3868 + __unlock_mount(mp); 3870 3869 } 3871 3870 3872 3871 int finish_automount(struct vfsmount *__m, const struct path *path) ··· 5691 5678 5692 5679 s->mnt = mnt_file->f_path.mnt; 5693 5680 ns = real_mount(s->mnt)->mnt_ns; 5681 + if (IS_ERR(ns)) 5682 + return PTR_ERR(ns); 5694 5683 if (!ns) 5695 5684 /* 5696 5685 * We can't set mount point and mnt_ns_id since we don't have a
+4 -6
fs/pidfs.c
··· 608 608 struct user_namespace *user_ns; 609 609 610 610 user_ns = task_cred_xxx(task, user_ns); 611 - if (!ns_ref_get(user_ns)) 612 - break; 613 - ns_common = to_ns_common(user_ns); 611 + if (ns_ref_get(user_ns)) 612 + ns_common = to_ns_common(user_ns); 614 613 } 615 614 #endif 616 615 break; ··· 619 620 struct pid_namespace *pid_ns; 620 621 621 622 pid_ns = task_active_pid_ns(task); 622 - if (!ns_ref_get(pid_ns)) 623 - break; 624 - ns_common = to_ns_common(pid_ns); 623 + if (ns_ref_get(pid_ns)) 624 + ns_common = to_ns_common(pid_ns); 625 625 } 626 626 #endif 627 627 break;
+3
fs/proc/base.c
··· 2128 2128 ino_t ino = 1; 2129 2129 2130 2130 child = try_lookup_noperm(&qname, dir); 2131 + if (IS_ERR(child)) 2132 + goto end_instantiate; 2133 + 2131 2134 if (!child) { 2132 2135 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 2133 2136 child = d_alloc_parallel(dir, &qname, &wq);
+1 -1
fs/smb/client/cached_dir.c
··· 118 118 if (!*path) 119 119 return path; 120 120 121 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 121 + if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH) && 122 122 cifs_sb->prepath) { 123 123 len = strlen(cifs_sb->prepath) + 1; 124 124 if (unlikely(len > strlen(path)))
+1 -1
fs/smb/client/cifs_fs_sb.h
··· 55 55 struct nls_table *local_nls; 56 56 struct smb3_fs_context *ctx; 57 57 atomic_t active; 58 - unsigned int mnt_cifs_flags; 58 + atomic_t mnt_cifs_flags; 59 59 struct delayed_work prune_tlinks; 60 60 struct rcu_head rcu; 61 61
-8
fs/smb/client/cifs_ioctl.h
··· 122 122 #define CIFS_GOING_FLAGS_DEFAULT 0x0 /* going down */ 123 123 #define CIFS_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ 124 124 #define CIFS_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ 125 - 126 - static inline bool cifs_forced_shutdown(struct cifs_sb_info *sbi) 127 - { 128 - if (CIFS_MOUNT_SHUTDOWN & sbi->mnt_cifs_flags) 129 - return true; 130 - else 131 - return false; 132 - }
-14
fs/smb/client/cifs_unicode.c
··· 11 11 #include "cifsglob.h" 12 12 #include "cifs_debug.h" 13 13 14 - int cifs_remap(struct cifs_sb_info *cifs_sb) 15 - { 16 - int map_type; 17 - 18 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 19 - map_type = SFM_MAP_UNI_RSVD; 20 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 21 - map_type = SFU_MAP_UNI_RSVD; 22 - else 23 - map_type = NO_MAP_UNI_RSVD; 24 - 25 - return map_type; 26 - } 27 - 28 14 /* Convert character using the SFU - "Services for Unix" remapping range */ 29 15 static bool 30 16 convert_sfu_char(const __u16 src_char, char *target)
+13 -1
fs/smb/client/cifs_unicode.h
··· 22 22 #include <linux/types.h> 23 23 #include <linux/nls.h> 24 24 #include "../../nls/nls_ucs2_utils.h" 25 + #include "cifsglob.h" 25 26 26 27 /* 27 28 * Macs use an older "SFM" mapping of the symbols above. Fortunately it does ··· 66 65 const struct nls_table *codepage); 67 66 int cifsConvertToUTF16(__le16 *target, const char *source, int srclen, 68 67 const struct nls_table *cp, int map_chars); 69 - int cifs_remap(struct cifs_sb_info *cifs_sb); 70 68 __le16 *cifs_strndup_to_utf16(const char *src, const int maxlen, 71 69 int *utf16_len, const struct nls_table *cp, 72 70 int remap); 73 71 wchar_t cifs_toupper(wchar_t in); 72 + 73 + static inline int cifs_remap(const struct cifs_sb_info *cifs_sb) 74 + { 75 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 76 + 77 + if (sbflags & CIFS_MOUNT_MAP_SFM_CHR) 78 + return SFM_MAP_UNI_RSVD; 79 + if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR) 80 + return SFU_MAP_UNI_RSVD; 81 + 82 + return NO_MAP_UNI_RSVD; 83 + } 74 84 75 85 #endif /* _CIFS_UNICODE_H */
+6 -11
fs/smb/client/cifsacl.c
··· 356 356 psid->num_subauth, SID_MAX_SUB_AUTHORITIES); 357 357 } 358 358 359 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) || 359 + if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_UID_FROM_ACL) || 360 360 (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) { 361 361 uint32_t unix_id; 362 362 bool is_group; ··· 1612 1612 struct smb_acl *dacl_ptr = NULL; 1613 1613 struct smb_ntsd *pntsd = NULL; /* acl obtained from server */ 1614 1614 struct smb_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1615 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1615 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 1616 + unsigned int sbflags; 1616 1617 struct tcon_link *tlink; 1617 1618 struct smb_version_operations *ops; 1618 1619 bool mode_from_sid, id_from_sid; ··· 1644 1643 return rc; 1645 1644 } 1646 1645 1647 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 1648 - mode_from_sid = true; 1649 - else 1650 - mode_from_sid = false; 1651 - 1652 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 1653 - id_from_sid = true; 1654 - else 1655 - id_from_sid = false; 1646 + sbflags = cifs_sb_flags(cifs_sb); 1647 + mode_from_sid = sbflags & CIFS_MOUNT_MODE_FROM_SID; 1648 + id_from_sid = sbflags & CIFS_MOUNT_UID_FROM_ACL; 1656 1649 1657 1650 /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */ 1658 1651 if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+44 -40
fs/smb/client/cifsfs.c
··· 226 226 static int 227 227 cifs_read_super(struct super_block *sb) 228 228 { 229 - struct inode *inode; 230 229 struct cifs_sb_info *cifs_sb; 231 230 struct cifs_tcon *tcon; 231 + unsigned int sbflags; 232 232 struct timespec64 ts; 233 + struct inode *inode; 233 234 int rc = 0; 234 235 235 236 cifs_sb = CIFS_SB(sb); 236 237 tcon = cifs_sb_master_tcon(cifs_sb); 238 + sbflags = cifs_sb_flags(cifs_sb); 237 239 238 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 240 + if (sbflags & CIFS_MOUNT_POSIXACL) 239 241 sb->s_flags |= SB_POSIXACL; 240 242 241 243 if (tcon->snapshot_time) ··· 313 311 } 314 312 315 313 #ifdef CONFIG_CIFS_NFSD_EXPORT 316 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 314 + if (sbflags & CIFS_MOUNT_SERVER_INUM) { 317 315 cifs_dbg(FYI, "export ops supported\n"); 318 316 sb->s_export_op = &cifs_export_ops; 319 317 } ··· 391 389 392 390 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 393 391 { 394 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 395 - struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 392 + struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file)); 396 393 struct TCP_Server_Info *server = tcon->ses->server; 397 394 struct inode *inode = file_inode(file); 398 395 int rc; ··· 419 418 static int cifs_permission(struct mnt_idmap *idmap, 420 419 struct inode *inode, int mask) 421 420 { 422 - struct cifs_sb_info *cifs_sb; 421 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode)); 423 422 424 - cifs_sb = CIFS_SB(inode->i_sb); 425 - 426 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 423 + if (sbflags & CIFS_MOUNT_NO_PERM) { 427 424 if ((mask & MAY_EXEC) && !execute_ok(inode)) 428 425 return -EACCES; 429 426 else ··· 567 568 static void 568 569 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 569 570 { 571 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 572 + 570 573 seq_puts(s, ",cache="); 571 574 572 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 575 + if (sbflags & CIFS_MOUNT_STRICT_IO) 573 576 seq_puts(s, "strict"); 574 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 577 + else if (sbflags & CIFS_MOUNT_DIRECT_IO) 575 578 seq_puts(s, "none"); 576 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 579 + else if (sbflags & CIFS_MOUNT_RW_CACHE) 577 580 seq_puts(s, "singleclient"); /* assume only one client access */ 578 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 581 + else if (sbflags & CIFS_MOUNT_RO_CACHE) 579 582 seq_puts(s, "ro"); /* read only caching assumed */ 580 583 else 581 584 seq_puts(s, "loose"); ··· 638 637 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 639 638 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 640 639 struct sockaddr *srcaddr; 640 + unsigned int sbflags; 641 + 641 642 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 642 643 643 644 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); ··· 673 670 (int)(srcaddr->sa_family)); 674 671 } 675 672 673 + sbflags = cifs_sb_flags(cifs_sb); 676 674 seq_printf(s, ",uid=%u", 677 675 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 678 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 676 + if (sbflags & CIFS_MOUNT_OVERR_UID) 679 677 seq_puts(s, ",forceuid"); 680 678 else 681 679 seq_puts(s, ",noforceuid"); 682 680 683 681 seq_printf(s, ",gid=%u", 684 682 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 685 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 683 + if (sbflags & CIFS_MOUNT_OVERR_GID) 686 684 seq_puts(s, ",forcegid"); 687 685 else 688 686 seq_puts(s, ",noforcegid"); ··· 726 722 seq_puts(s, ",unix"); 727 723 else 728 724 seq_puts(s, ",nounix"); 729 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 725 + if (sbflags & CIFS_MOUNT_NO_DFS) 730 726 seq_puts(s, ",nodfs"); 731 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 727 + if (sbflags & CIFS_MOUNT_POSIX_PATHS) 732 728 seq_puts(s, ",posixpaths"); 733 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 729 + if (sbflags & CIFS_MOUNT_SET_UID) 734 730 seq_puts(s, ",setuids"); 735 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 731 + if (sbflags & CIFS_MOUNT_UID_FROM_ACL) 736 732 seq_puts(s, ",idsfromsid"); 737 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 733 + if (sbflags & CIFS_MOUNT_SERVER_INUM) 738 734 seq_puts(s, ",serverino"); 739 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 735 + if (sbflags & CIFS_MOUNT_RWPIDFORWARD) 740 736 seq_puts(s, ",rwpidforward"); 741 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 737 + if (sbflags & CIFS_MOUNT_NOPOSIXBRL) 742 738 seq_puts(s, ",forcemand"); 743 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 739 + if (sbflags & CIFS_MOUNT_NO_XATTR) 744 740 seq_puts(s, ",nouser_xattr"); 745 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 741 + if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR) 746 742 seq_puts(s, ",mapchars"); 747 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 743 + if (sbflags & CIFS_MOUNT_MAP_SFM_CHR) 748 744 seq_puts(s, ",mapposix"); 749 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 745 + if (sbflags & CIFS_MOUNT_UNX_EMUL) 750 746 seq_puts(s, ",sfu"); 751 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 747 + if (sbflags & CIFS_MOUNT_NO_BRL) 752 748 seq_puts(s, ",nobrl"); 753 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 749 + if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE) 754 750 seq_puts(s, ",nohandlecache"); 755 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 751 + if (sbflags & CIFS_MOUNT_MODE_FROM_SID) 756 752 seq_puts(s, ",modefromsid"); 757 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 753 + if (sbflags & CIFS_MOUNT_CIFS_ACL) 758 754 seq_puts(s, ",cifsacl"); 759 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 755 + if (sbflags & CIFS_MOUNT_DYNPERM) 760 756 seq_puts(s, ",dynperm"); 761 757 if (root->d_sb->s_flags & SB_POSIXACL) 762 758 seq_puts(s, ",acl"); 763 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 759 + if (sbflags & CIFS_MOUNT_MF_SYMLINKS) 764 760 seq_puts(s, ",mfsymlinks"); 765 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 761 + if (sbflags & CIFS_MOUNT_FSCACHE) 766 762 seq_puts(s, ",fsc"); 767 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 763 + if (sbflags & CIFS_MOUNT_NOSSYNC) 768 764 seq_puts(s, ",nostrictsync"); 769 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 765 + if (sbflags & CIFS_MOUNT_NO_PERM) 770 766 seq_puts(s, ",noperm"); 771 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 767 + if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID) 772 768 seq_printf(s, ",backupuid=%u", 773 769 from_kuid_munged(&init_user_ns, 774 770 cifs_sb->ctx->backupuid)); 775 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 771 + if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID) 776 772 seq_printf(s, ",backupgid=%u", 777 773 from_kgid_munged(&init_user_ns, 778 774 cifs_sb->ctx->backupgid)); ··· 913 909 914 910 static int cifs_drop_inode(struct inode *inode) 915 911 { 916 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 912 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode)); 917 913 918 914 /* no serverino => unconditional eviction */ 919 - return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 915 + return !(sbflags & CIFS_MOUNT_SERVER_INUM) || 920 916 inode_generic_drop(inode); 921 917 } 922 918 ··· 954 950 char *s, *p; 955 951 char sep; 956 952 957 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 953 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH) 958 954 return dget(sb->s_root); 959 955 960 956 full_path = cifs_build_path_to_root(ctx, cifs_sb,
+50 -11
fs/smb/client/cifsglob.h
··· 1580 1580 return container_of(inode, struct cifsInodeInfo, netfs.inode); 1581 1581 } 1582 1582 1583 - static inline struct cifs_sb_info * 1584 - CIFS_SB(struct super_block *sb) 1583 + static inline void *cinode_to_fsinfo(struct cifsInodeInfo *cinode) 1584 + { 1585 + return cinode->netfs.inode.i_sb->s_fs_info; 1586 + } 1587 + 1588 + static inline void *super_to_fsinfo(struct super_block *sb) 1585 1589 { 1586 1590 return sb->s_fs_info; 1587 1591 } 1588 1592 1589 - static inline struct cifs_sb_info * 1590 - CIFS_FILE_SB(struct file *file) 1593 + static inline void *inode_to_fsinfo(struct inode *inode) 1591 1594 { 1592 - return CIFS_SB(file_inode(file)->i_sb); 1595 + return inode->i_sb->s_fs_info; 1596 + } 1597 + 1598 + static inline void *file_to_fsinfo(struct file *file) 1599 + { 1600 + return file_inode(file)->i_sb->s_fs_info; 1601 + } 1602 + 1603 + static inline void *dentry_to_fsinfo(struct dentry *dentry) 1604 + { 1605 + return dentry->d_sb->s_fs_info; 1606 + } 1607 + 1608 + static inline void *const_dentry_to_fsinfo(const struct dentry *dentry) 1609 + { 1610 + return dentry->d_sb->s_fs_info; 1611 + } 1612 + 1613 + #define CIFS_SB(_ptr) \ 1614 + ((struct cifs_sb_info *) \ 1615 + _Generic((_ptr), \ 1616 + struct cifsInodeInfo * : cinode_to_fsinfo, \ 1617 + const struct dentry * : const_dentry_to_fsinfo, \ 1618 + struct super_block * : super_to_fsinfo, \ 1619 + struct dentry * : dentry_to_fsinfo, \ 1620 + struct inode * : inode_to_fsinfo, \ 1621 + struct file * : file_to_fsinfo)(_ptr)) 1622 + 1623 + /* 1624 + * Use atomic_t for @cifs_sb->mnt_cifs_flags as it is currently accessed 1625 + * locklessly and may be changed concurrently by mount/remount and reconnect 1626 + * paths. 1627 + */ 1628 + static inline unsigned int cifs_sb_flags(const struct cifs_sb_info *cifs_sb) 1629 + { 1630 + return atomic_read(&cifs_sb->mnt_cifs_flags); 1593 1631 } 1594 1632 1595 1633 static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb) 1596 1634 { 1597 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 1598 - return '/'; 1599 - else 1600 - return '\\'; 1635 + return (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS) ? '/' : '\\'; 1601 1636 } 1602 1637 1603 1638 static inline void ··· 2349 2314 unsigned int oplock_flags, 2350 2315 unsigned int sb_flags) 2351 2316 { 2352 - struct cifs_sb_info *cifs_sb = CIFS_SB(cinode->netfs.inode.i_sb); 2317 + unsigned int sflags = cifs_sb_flags(CIFS_SB(cinode)); 2353 2318 unsigned int oplock = READ_ONCE(cinode->oplock); 2354 - unsigned int sflags = cifs_sb->mnt_cifs_flags; 2355 2319 2356 2320 return (oplock & oplock_flags) || (sflags & sb_flags); 2357 2321 } ··· 2368 2334 { 2369 2335 scoped_guard(spinlock, &cinode->open_file_lock) 2370 2336 WRITE_ONCE(cinode->oplock, 0); 2337 + } 2338 + 2339 + static inline bool cifs_forced_shutdown(const struct cifs_sb_info *sbi) 2340 + { 2341 + return cifs_sb_flags(sbi) & CIFS_MOUNT_SHUTDOWN; 2371 2342 } 2372 2343 2373 2344 #endif /* _CIFS_GLOB_H */
+44 -36
fs/smb/client/connect.c
··· 2167 2167 2168 2168 #ifdef CONFIG_KEYS 2169 2169 2170 - /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ 2171 - #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) 2172 - 2173 2170 /* Populate username and pw fields from keyring if possible */ 2174 2171 static int 2175 2172 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) ··· 2174 2177 int rc = 0; 2175 2178 int is_domain = 0; 2176 2179 const char *delim, *payload; 2180 + size_t desc_sz; 2177 2181 char *desc; 2178 2182 ssize_t len; 2179 2183 struct key *key; ··· 2183 2185 struct sockaddr_in6 *sa6; 2184 2186 const struct user_key_payload *upayload; 2185 2187 2186 - desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); 2188 + /* "cifs:a:" and "cifs:d:" are the same length; +1 for NUL terminator */ 2189 + desc_sz = strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1; 2190 + desc = kmalloc(desc_sz, GFP_KERNEL); 2187 2191 if (!desc) 2188 2192 return -ENOMEM; 2189 2193 ··· 2193 2193 switch (server->dstaddr.ss_family) { 2194 2194 case AF_INET: 2195 2195 sa = (struct sockaddr_in *)&server->dstaddr; 2196 - sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); 2196 + snprintf(desc, desc_sz, "cifs:a:%pI4", &sa->sin_addr.s_addr); 2197 2197 break; 2198 2198 case AF_INET6: 2199 2199 sa6 = (struct sockaddr_in6 *)&server->dstaddr; 2200 - sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); 2200 + snprintf(desc, desc_sz, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); 2201 2201 break; 2202 2202 default: 2203 2203 cifs_dbg(FYI, "Bad ss_family (%hu)\n", ··· 2216 2216 } 2217 2217 2218 2218 /* didn't work, try to find a domain key */ 2219 - sprintf(desc, "cifs:d:%s", ses->domainName); 2219 + snprintf(desc, desc_sz, "cifs:d:%s", ses->domainName); 2220 2220 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2221 2221 key = request_key(&key_type_logon, desc, ""); 2222 2222 if (IS_ERR(key)) { ··· 2236 2236 /* find first : in payload */ 2237 2237 payload = upayload->data; 2238 2238 delim = strnchr(payload, upayload->datalen, ':'); 2239 - cifs_dbg(FYI, "payload=%s\n", payload); 2240 2239 if (!delim) { 2241 2240 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", 2242 2241 upayload->datalen); ··· 2914 2915 { 2915 2916 struct cifs_sb_info *old = CIFS_SB(sb); 2916 2917 struct cifs_sb_info *new = mnt_data->cifs_sb; 2917 - unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK; 2918 - unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK; 2918 + unsigned int oldflags = cifs_sb_flags(old) & CIFS_MOUNT_MASK; 2919 + unsigned int newflags = cifs_sb_flags(new) & CIFS_MOUNT_MASK; 2919 2920 2920 2921 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) 2921 2922 return 0; ··· 2970 2971 struct smb3_fs_context *ctx = mnt_data->ctx; 2971 2972 struct cifs_sb_info *old = CIFS_SB(sb); 2972 2973 struct cifs_sb_info *new = mnt_data->cifs_sb; 2973 - bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2974 + bool old_set = (cifs_sb_flags(old) & CIFS_MOUNT_USE_PREFIX_PATH) && 2974 2975 old->prepath; 2975 - bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2976 + bool new_set = (cifs_sb_flags(new) & CIFS_MOUNT_USE_PREFIX_PATH) && 2976 2977 new->prepath; 2977 2978 2978 2979 if (tcon->origin_fullpath && ··· 3003 3004 cifs_sb = CIFS_SB(sb); 3004 3005 3005 3006 /* We do not want to use a superblock that has been shutdown */ 3006 - if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { 3007 + if (cifs_forced_shutdown(cifs_sb)) { 3007 3008 spin_unlock(&cifs_tcp_ses_lock); 3008 3009 return 0; 3009 3010 } ··· 3468 3469 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) 3469 3470 { 3470 3471 struct smb3_fs_context *ctx = cifs_sb->ctx; 3472 + unsigned int sbflags; 3473 + int rc = 0; 3471 3474 3472 3475 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 3473 3476 INIT_LIST_HEAD(&cifs_sb->tcon_sb_link); ··· 3494 3493 } 3495 3494 ctx->local_nls = cifs_sb->local_nls; 3496 3495 3497 - smb3_update_mnt_flags(cifs_sb); 3496 + sbflags = smb3_update_mnt_flags(cifs_sb); 3498 3497 3499 3498 if (ctx->direct_io) 3500 3499 cifs_dbg(FYI, "mounting share using direct i/o\n"); 3501 3500 if (ctx->cache_ro) { 3502 3501 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n"); 3503 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE; 3502 + sbflags |= CIFS_MOUNT_RO_CACHE; 3504 3503 } else if (ctx->cache_rw) { 3505 3504 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n"); 3506 - cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE | 3507 - CIFS_MOUNT_RW_CACHE); 3505 + sbflags |= CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE; 3508 3506 } 3509 3507 3510 3508 if ((ctx->cifs_acl) && (ctx->dynperm)) ··· 3512 3512 if (ctx->prepath) { 3513 3513 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL); 3514 3514 if (cifs_sb->prepath == NULL) 3515 - return -ENOMEM; 3516 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3515 + rc = -ENOMEM; 3516 + else 3517 + sbflags |= CIFS_MOUNT_USE_PREFIX_PATH; 3517 3518 } 3518 3519 3519 - return 0; 3520 + atomic_set(&cifs_sb->mnt_cifs_flags, sbflags); 3521 + return rc; 3520 3522 } 3521 3523 3522 3524 /* Release all succeed connections */ 3523 3525 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) 3524 3526 { 3527 + struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; 3525 3528 int rc = 0; 3526 3529 3527 3530 if (mnt_ctx->tcon) ··· 3536 3533 mnt_ctx->ses = NULL; 3537 3534 mnt_ctx->tcon = NULL; 3538 3535 mnt_ctx->server = NULL; 3539 - mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; 3536 + atomic_andnot(CIFS_MOUNT_POSIX_PATHS, &cifs_sb->mnt_cifs_flags); 3540 3537 free_xid(mnt_ctx->xid); 3541 3538 } 3542 3539 ··· 3590 3587 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) 3591 3588 { 3592 3589 struct TCP_Server_Info *server; 3590 + struct cifs_tcon *tcon = NULL; 3593 3591 struct cifs_sb_info *cifs_sb; 3594 3592 struct smb3_fs_context *ctx; 3595 - struct cifs_tcon *tcon = NULL; 3593 + unsigned int sbflags; 3596 3594 int rc = 0; 3597 3595 3598 - if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx || 3599 - !mnt_ctx->cifs_sb)) { 3600 - rc = -EINVAL; 3601 - goto out; 3596 + if (WARN_ON_ONCE(!mnt_ctx)) 3597 + return -EINVAL; 3598 + if (WARN_ON_ONCE(!mnt_ctx->server || !mnt_ctx->ses || 3599 + !mnt_ctx->fs_ctx || !mnt_ctx->cifs_sb)) { 3600 + mnt_ctx->tcon = NULL; 3601 + return -EINVAL; 3602 3602 } 3603 3603 server = mnt_ctx->server; 3604 3604 ctx = mnt_ctx->fs_ctx; 3605 3605 cifs_sb = mnt_ctx->cifs_sb; 3606 + sbflags = cifs_sb_flags(cifs_sb); 3606 3607 3607 3608 /* search for existing tcon to this server share */ 3608 3609 tcon = cifs_get_tcon(mnt_ctx->ses, ctx); ··· 3621 3614 * path (i.e., do not remap / and \ and do not map any special characters) 3622 3615 */ 3623 3616 if (tcon->posix_extensions) { 3624 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 3625 - cifs_sb->mnt_cifs_flags &= ~(CIFS_MOUNT_MAP_SFM_CHR | 3626 - CIFS_MOUNT_MAP_SPECIAL_CHR); 3617 + sbflags |= CIFS_MOUNT_POSIX_PATHS; 3618 + sbflags &= ~(CIFS_MOUNT_MAP_SFM_CHR | 3619 + CIFS_MOUNT_MAP_SPECIAL_CHR); 3627 3620 } 3628 3621 3629 3622 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY ··· 3650 3643 /* do not care if a following call succeed - informational */ 3651 3644 if (!tcon->pipe && server->ops->qfs_tcon) { 3652 3645 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb); 3653 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) { 3646 + if (sbflags & CIFS_MOUNT_RO_CACHE) { 3654 3647 if (tcon->fsDevInfo.DeviceCharacteristics & 3655 3648 cpu_to_le32(FILE_READ_ONLY_DEVICE)) 3656 3649 cifs_dbg(VFS, "mounted to read only share\n"); 3657 - else if ((cifs_sb->mnt_cifs_flags & 3658 - CIFS_MOUNT_RW_CACHE) == 0) 3650 + else if (!(sbflags & CIFS_MOUNT_RW_CACHE)) 3659 3651 cifs_dbg(VFS, "read only mount of RW share\n"); 3660 3652 /* no need to log a RW mount of a typical RW share */ 3661 3653 } ··· 3666 3660 * Inside cifs_fscache_get_super_cookie it checks 3667 3661 * that we do not get super cookie twice. 3668 3662 */ 3669 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 3663 + if (sbflags & CIFS_MOUNT_FSCACHE) 3670 3664 cifs_fscache_get_super_cookie(tcon); 3671 3665 3672 3666 out: 3673 3667 mnt_ctx->tcon = tcon; 3668 + atomic_set(&cifs_sb->mnt_cifs_flags, sbflags); 3674 3669 return rc; 3675 3670 } 3676 3671 ··· 3790 3783 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS); 3791 3784 if (rc != 0) { 3792 3785 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); 3793 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3786 + atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, 3787 + &cifs_sb->mnt_cifs_flags); 3794 3788 rc = 0; 3795 3789 } 3796 3790 } ··· 3871 3863 * Force the use of prefix path to support failover on DFS paths that resolve to targets 3872 3864 * that have different prefix paths. 3873 3865 */ 3874 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3866 + atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags); 3875 3867 kfree(cifs_sb->prepath); 3876 3868 cifs_sb->prepath = ctx->prepath; 3877 3869 ctx->prepath = NULL; ··· 4365 4357 kuid_t fsuid = current_fsuid(); 4366 4358 int err; 4367 4359 4368 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 4360 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER)) 4369 4361 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 4370 4362 4371 4363 spin_lock(&cifs_sb->tlink_tree_lock);
+1 -1
fs/smb/client/dfs_cache.c
··· 1333 1333 * Force the use of prefix path to support failover on DFS paths that resolve to targets 1334 1334 * that have different prefix paths. 1335 1335 */ 1336 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 1336 + atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags); 1337 1337 1338 1338 refresh_tcon_referral(tcon, true); 1339 1339 return 0;
+29 -24
fs/smb/client/dir.c
··· 82 82 const char *tree, int tree_len, 83 83 bool prefix) 84 84 { 85 - int dfsplen; 86 - int pplen = 0; 87 - struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 85 + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry); 86 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 88 87 char dirsep = CIFS_DIR_SEP(cifs_sb); 88 + int pplen = 0; 89 + int dfsplen; 89 90 char *s; 90 91 91 92 if (unlikely(!page)) ··· 97 96 else 98 97 dfsplen = 0; 99 98 100 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 99 + if (sbflags & CIFS_MOUNT_USE_PREFIX_PATH) 101 100 pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; 102 101 103 102 s = dentry_path_raw(direntry, page, PATH_MAX); ··· 124 123 if (dfsplen) { 125 124 s -= dfsplen; 126 125 memcpy(s, tree, dfsplen); 127 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { 126 + if (sbflags & CIFS_MOUNT_POSIX_PATHS) { 128 127 int i; 129 128 for (i = 0; i < dfsplen; i++) { 130 129 if (s[i] == '\\') ··· 153 152 static int 154 153 check_name(struct dentry *direntry, struct cifs_tcon *tcon) 155 154 { 156 - struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 155 + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry); 157 156 int i; 158 157 159 158 if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength && ··· 161 160 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) 162 161 return -ENAMETOOLONG; 163 162 164 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { 163 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS)) { 165 164 for (i = 0; i < direntry->d_name.len; i++) { 166 165 if (direntry->d_name.name[i] == '\\') { 167 166 cifs_dbg(FYI, "Invalid file name\n"); ··· 182 181 int rc = -ENOENT; 183 182 int create_options = CREATE_NOT_DIR; 184 183 int desired_access; 185 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 184 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 186 185 struct cifs_tcon *tcon = tlink_tcon(tlink); 187 186 const char *full_path; 188 187 void *page = alloc_dentry_path(); 189 188 struct inode *newinode = NULL; 189 + unsigned int sbflags; 190 190 int disposition; 191 191 struct TCP_Server_Info *server = tcon->ses->server; 192 192 struct cifs_open_parms oparms; ··· 367 365 * If Open reported that we actually created a file then we now have to 368 366 * set the mode if possible. 369 367 */ 368 + sbflags = cifs_sb_flags(cifs_sb); 370 369 if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) { 371 370 struct cifs_unix_set_info_args args = { 372 371 .mode = mode, ··· 377 374 .device = 0, 378 375 }; 379 376 380 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 377 + if (sbflags & CIFS_MOUNT_SET_UID) { 381 378 args.uid = current_fsuid(); 382 379 if (inode->i_mode & S_ISGID) 383 380 args.gid = inode->i_gid; ··· 414 411 if (server->ops->set_lease_key) 415 412 server->ops->set_lease_key(newinode, fid); 416 413 if ((*oplock & CIFS_CREATE_ACTION) && S_ISREG(newinode->i_mode)) { 417 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 414 + if (sbflags & CIFS_MOUNT_DYNPERM) 418 415 newinode->i_mode = mode; 419 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 416 + if (sbflags & CIFS_MOUNT_SET_UID) { 420 417 newinode->i_uid = current_fsuid(); 421 418 if (inode->i_mode & S_ISGID) 422 419 newinode->i_gid = inode->i_gid; ··· 461 458 cifs_atomic_open(struct inode *inode, struct dentry *direntry, 462 459 struct file *file, unsigned int oflags, umode_t mode) 463 460 { 464 - int rc; 465 - unsigned int xid; 461 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 462 + struct cifs_open_info_data buf = {}; 463 + struct TCP_Server_Info *server; 464 + struct cifsFileInfo *file_info; 465 + struct cifs_pending_open open; 466 + struct cifs_fid fid = {}; 466 467 struct tcon_link *tlink; 467 468 struct cifs_tcon *tcon; 468 - struct TCP_Server_Info *server; 469 - struct cifs_fid fid = {}; 470 - struct cifs_pending_open open; 469 + unsigned int sbflags; 470 + unsigned int xid; 471 471 __u32 oplock; 472 - struct cifsFileInfo *file_info; 473 - struct cifs_open_info_data buf = {}; 472 + int rc; 474 473 475 - if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) 474 + if (unlikely(cifs_forced_shutdown(cifs_sb))) 476 475 return smb_EIO(smb_eio_trace_forced_shutdown); 477 476 478 477 /* ··· 504 499 cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", 505 500 inode, direntry, direntry); 506 501 507 - tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); 502 + tlink = cifs_sb_tlink(cifs_sb); 508 503 if (IS_ERR(tlink)) { 509 504 rc = PTR_ERR(tlink); 510 505 goto out_free_xid; ··· 541 536 goto out; 542 537 } 543 538 544 - if (file->f_flags & O_DIRECT && 545 - CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 546 - if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 539 + sbflags = cifs_sb_flags(cifs_sb); 540 + if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) { 541 + if (sbflags & CIFS_MOUNT_NO_BRL) 547 542 file->f_op = &cifs_file_direct_nobrl_ops; 548 543 else 549 544 file->f_op = &cifs_file_direct_ops; 550 - } 545 + } 551 546 552 547 file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target); 553 548 if (file_info == NULL) {
+44 -46
fs/smb/client/file.c
··· 270 270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) 271 271 { 272 272 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 273 - struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 273 + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode); 274 274 struct cifsFileInfo *open_file = NULL; 275 275 276 276 rreq->rsize = cifs_sb->ctx->rsize; ··· 281 281 open_file = file->private_data; 282 282 rreq->netfs_priv = file->private_data; 283 283 req->cfile = cifsFileInfo_get(open_file); 284 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 284 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD) 285 285 req->pid = req->cfile->pid; 286 286 } else if (rreq->origin != NETFS_WRITEBACK) { 287 287 WARN_ON_ONCE(1); ··· 906 906 * close because it may cause a error when we open this file 907 907 * again and get at least level II oplock. 908 908 */ 909 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 909 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO) 910 910 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); 911 911 cifs_set_oplock_level(cifsi, 0); 912 912 } ··· 955 955 int cifs_file_flush(const unsigned int xid, struct inode *inode, 956 956 struct cifsFileInfo *cfile) 957 957 { 958 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 958 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 959 959 struct cifs_tcon *tcon; 960 960 int rc; 961 961 962 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 962 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC) 963 963 return 0; 964 964 965 965 if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) { ··· 1015 1015 int cifs_open(struct inode *inode, struct file *file) 1016 1016 1017 1017 { 1018 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 1019 + struct cifs_open_info_data data = {}; 1020 + struct cifsFileInfo *cfile = NULL; 1021 + struct TCP_Server_Info *server; 1022 + struct cifs_pending_open open; 1023 + bool posix_open_ok = false; 1024 + struct cifs_fid fid = {}; 1025 + struct tcon_link *tlink; 1026 + struct cifs_tcon *tcon; 1027 + const char *full_path; 1028 + unsigned int sbflags; 1018 1029 int rc = -EACCES; 1019 1030 unsigned int xid; 1020 1031 __u32 oplock; 1021 - struct cifs_sb_info *cifs_sb; 1022 - struct TCP_Server_Info *server; 1023 - struct cifs_tcon *tcon; 1024 - struct tcon_link *tlink; 1025 - struct cifsFileInfo *cfile = NULL; 1026 1032 void *page; 1027 - const char *full_path; 1028 - bool posix_open_ok = false; 1029 - struct cifs_fid fid = {}; 1030 - struct cifs_pending_open open; 1031 - struct cifs_open_info_data data = {}; 1032 1033 1033 1034 xid = get_xid(); 1034 1035 1035 - cifs_sb = CIFS_SB(inode->i_sb); 1036 1036 if (unlikely(cifs_forced_shutdown(cifs_sb))) { 1037 1037 free_xid(xid); 1038 1038 return smb_EIO(smb_eio_trace_forced_shutdown); ··· 1056 1056 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", 1057 1057 inode, file->f_flags, full_path); 1058 1058 1059 - if (file->f_flags & O_DIRECT && 1060 - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 1061 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 1059 + sbflags = cifs_sb_flags(cifs_sb); 1060 + if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) { 1061 + if (sbflags & CIFS_MOUNT_NO_BRL) 1062 1062 file->f_op = &cifs_file_direct_nobrl_ops; 1063 1063 else 1064 1064 file->f_op = &cifs_file_direct_ops; ··· 1209 1209 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1210 1210 int rc = 0; 1211 1211 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1212 - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1212 + struct cifs_sb_info *cifs_sb = CIFS_SB(cinode); 1213 1213 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1214 1214 1215 1215 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); ··· 1222 1222 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1223 1223 if (cap_unix(tcon->ses) && 1224 1224 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1225 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1225 + ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1226 1226 rc = cifs_push_posix_locks(cfile); 1227 1227 else 1228 1228 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ ··· 2011 2011 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2012 2012 int rc = 0; 2013 2013 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2014 - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 2014 + struct cifs_sb_info *cifs_sb = CIFS_SB(cinode); 2015 2015 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2016 2016 2017 2017 /* we are going to update can_cache_brlcks here - need a write access */ ··· 2024 2024 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2025 2025 if (cap_unix(tcon->ses) && 2026 2026 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2027 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2027 + ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2028 2028 rc = cifs_push_posix_locks(cfile); 2029 2029 else 2030 2030 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ ··· 2428 2428 2429 2429 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag, 2430 2430 tcon->ses->server); 2431 - cifs_sb = CIFS_FILE_SB(file); 2431 + cifs_sb = CIFS_SB(file); 2432 2432 2433 2433 if (cap_unix(tcon->ses) && 2434 2434 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2435 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2435 + ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2436 2436 posix_lck = true; 2437 2437 2438 2438 if (!lock && !unlock) { ··· 2455 2455 2456 2456 int cifs_lock(struct file *file, int cmd, struct file_lock *flock) 2457 2457 { 2458 - int rc, xid; 2458 + struct cifs_sb_info *cifs_sb = CIFS_SB(file); 2459 + struct cifsFileInfo *cfile; 2459 2460 int lock = 0, unlock = 0; 2460 2461 bool wait_flag = false; 2461 2462 bool posix_lck = false; 2462 - struct cifs_sb_info *cifs_sb; 2463 2463 struct cifs_tcon *tcon; 2464 - struct cifsFileInfo *cfile; 2465 2464 __u32 type; 2465 + int rc, xid; 2466 2466 2467 2467 rc = -EACCES; 2468 2468 xid = get_xid(); ··· 2477 2477 2478 2478 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, 2479 2479 tcon->ses->server); 2480 - cifs_sb = CIFS_FILE_SB(file); 2481 2480 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); 2482 2481 2483 2482 if (cap_unix(tcon->ses) && 2484 2483 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2485 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2484 + ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2486 2485 posix_lck = true; 2487 2486 /* 2488 2487 * BB add code here to normalize offset and length to account for ··· 2531 2532 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2532 2533 bool fsuid_only) 2533 2534 { 2535 + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode); 2534 2536 struct cifsFileInfo *open_file = NULL; 2535 - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2536 2537 2537 2538 /* only filter by fsuid on multiuser mounts */ 2538 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2539 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER)) 2539 2540 fsuid_only = false; 2540 2541 2541 2542 spin_lock(&cifs_inode->open_file_lock); ··· 2588 2589 return rc; 2589 2590 } 2590 2591 2591 - cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2592 + cifs_sb = CIFS_SB(cifs_inode); 2592 2593 2593 2594 /* only filter by fsuid on multiuser mounts */ 2594 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2595 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER)) 2595 2596 fsuid_only = false; 2596 2597 2597 2598 spin_lock(&cifs_inode->open_file_lock); ··· 2786 2787 struct TCP_Server_Info *server; 2787 2788 struct cifsFileInfo *smbfile = file->private_data; 2788 2789 struct inode *inode = file_inode(file); 2789 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 2790 + struct cifs_sb_info *cifs_sb = CIFS_SB(file); 2790 2791 2791 2792 rc = file_write_and_wait_range(file, start, end); 2792 2793 if (rc) { ··· 2800 2801 file, datasync); 2801 2802 2802 2803 tcon = tlink_tcon(smbfile->tlink); 2803 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 2804 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) { 2804 2805 server = tcon->ses->server; 2805 2806 if (server->ops->flush == NULL) { 2806 2807 rc = -ENOSYS; ··· 2852 2853 struct inode *inode = file->f_mapping->host; 2853 2854 struct cifsInodeInfo *cinode = CIFS_I(inode); 2854 2855 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2855 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2856 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 2856 2857 ssize_t rc; 2857 2858 2858 2859 rc = netfs_start_io_write(inode); ··· 2869 2870 if (rc <= 0) 2870 2871 goto out; 2871 2872 2872 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) && 2873 + if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) && 2873 2874 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), 2874 2875 server->vals->exclusive_lock_type, 0, 2875 2876 NULL, CIFS_WRITE_OP))) { ··· 2892 2893 { 2893 2894 struct inode *inode = file_inode(iocb->ki_filp); 2894 2895 struct cifsInodeInfo *cinode = CIFS_I(inode); 2895 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2896 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 2896 2897 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2897 2898 iocb->ki_filp->private_data; 2898 2899 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); ··· 2905 2906 if (CIFS_CACHE_WRITE(cinode)) { 2906 2907 if (cap_unix(tcon->ses) && 2907 2908 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2908 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 2909 + ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 2909 2910 written = netfs_file_write_iter(iocb, from); 2910 2911 goto out; 2911 2912 } ··· 2993 2994 { 2994 2995 struct inode *inode = file_inode(iocb->ki_filp); 2995 2996 struct cifsInodeInfo *cinode = CIFS_I(inode); 2996 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2997 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 2997 2998 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2998 2999 iocb->ki_filp->private_data; 2999 3000 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); ··· 3010 3011 if (!CIFS_CACHE_READ(cinode)) 3011 3012 return netfs_unbuffered_read_iter(iocb, to); 3012 3013 3013 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) { 3014 + if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) { 3014 3015 if (iocb->ki_flags & IOCB_DIRECT) 3015 3016 return netfs_unbuffered_read_iter(iocb, to); 3016 3017 return netfs_buffered_read_iter(iocb, to); ··· 3129 3130 if (is_inode_writable(cifsInode) || 3130 3131 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { 3131 3132 /* This inode is open for write at least once */ 3132 - struct cifs_sb_info *cifs_sb; 3133 + struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode); 3133 3134 3134 - cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); 3135 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 3135 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) { 3136 3136 /* since no page cache to corrupt on directio 3137 3137 we can change size safely */ 3138 3138 return true; ··· 3179 3181 server = tcon->ses->server; 3180 3182 3181 3183 scoped_guard(spinlock, &cinode->open_file_lock) { 3182 - unsigned int sbflags = cifs_sb->mnt_cifs_flags; 3184 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 3183 3185 3184 3186 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, 3185 3187 cfile->oplock_epoch, &purge_cache);
+74 -75
fs/smb/client/fs_context.c
··· 2062 2062 kfree(ctx); 2063 2063 } 2064 2064 2065 - void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb) 2065 + unsigned int smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb) 2066 2066 { 2067 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 2067 2068 struct smb3_fs_context *ctx = cifs_sb->ctx; 2068 2069 2069 2070 if (ctx->nodfs) 2070 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS; 2071 + sbflags |= CIFS_MOUNT_NO_DFS; 2071 2072 else 2072 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS; 2073 + sbflags &= ~CIFS_MOUNT_NO_DFS; 2073 2074 2074 2075 if (ctx->noperm) 2075 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 2076 + sbflags |= CIFS_MOUNT_NO_PERM; 2076 2077 else 2077 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM; 2078 + sbflags &= ~CIFS_MOUNT_NO_PERM; 2078 2079 2079 2080 if (ctx->setuids) 2080 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; 2081 + sbflags |= CIFS_MOUNT_SET_UID; 2081 2082 else 2082 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID; 2083 + sbflags &= ~CIFS_MOUNT_SET_UID; 2083 2084 2084 2085 if (ctx->setuidfromacl) 2085 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL; 2086 + sbflags |= CIFS_MOUNT_UID_FROM_ACL; 2086 2087 else 2087 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL; 2088 + sbflags &= ~CIFS_MOUNT_UID_FROM_ACL; 2088 2089 2089 2090 if (ctx->server_ino) 2090 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; 2091 + sbflags |= CIFS_MOUNT_SERVER_INUM; 2091 2092 else 2092 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; 2093 + sbflags &= ~CIFS_MOUNT_SERVER_INUM; 2093 2094 2094 2095 if (ctx->remap) 2095 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR; 2096 + sbflags |= CIFS_MOUNT_MAP_SFM_CHR; 2096 2097 else 2097 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR; 2098 + sbflags &= ~CIFS_MOUNT_MAP_SFM_CHR; 2098 2099 2099 2100 if (ctx->sfu_remap) 2100 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; 2101 + sbflags |= CIFS_MOUNT_MAP_SPECIAL_CHR; 2101 2102 else 2102 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR; 2103 + sbflags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR; 2103 2104 2104 2105 if (ctx->no_xattr) 2105 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; 2106 + sbflags |= CIFS_MOUNT_NO_XATTR; 2106 2107 else 2107 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR; 2108 + sbflags &= ~CIFS_MOUNT_NO_XATTR; 2108 2109 2109 2110 if (ctx->sfu_emul) 2110 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; 2111 + sbflags |= CIFS_MOUNT_UNX_EMUL; 2111 2112 else 2112 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL; 2113 + sbflags &= ~CIFS_MOUNT_UNX_EMUL; 2113 2114 2114 2115 if (ctx->nobrl) 2115 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; 2116 + sbflags |= CIFS_MOUNT_NO_BRL; 2116 2117 else 2117 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL; 2118 + sbflags &= ~CIFS_MOUNT_NO_BRL; 2118 2119 2119 2120 if (ctx->nohandlecache) 2120 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE; 2121 + sbflags |= CIFS_MOUNT_NO_HANDLE_CACHE; 2121 2122 else 2122 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE; 2123 + sbflags &= ~CIFS_MOUNT_NO_HANDLE_CACHE; 2123 2124 2124 2125 if (ctx->nostrictsync) 2125 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; 2126 + sbflags |= CIFS_MOUNT_NOSSYNC; 2126 2127 else 2127 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC; 2128 + sbflags &= ~CIFS_MOUNT_NOSSYNC; 2128 2129 2129 2130 if (ctx->mand_lock) 2130 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; 2131 + sbflags |= CIFS_MOUNT_NOPOSIXBRL; 2131 2132 else 2132 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL; 2133 + sbflags &= ~CIFS_MOUNT_NOPOSIXBRL; 2133 2134 2134 2135 if (ctx->rwpidforward) 2135 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; 2136 + sbflags |= CIFS_MOUNT_RWPIDFORWARD; 2136 2137 else 2137 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD; 2138 + sbflags &= ~CIFS_MOUNT_RWPIDFORWARD; 2138 2139 2139 2140 if (ctx->mode_ace) 2140 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID; 2141 + sbflags |= CIFS_MOUNT_MODE_FROM_SID; 2141 2142 else 2142 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID; 2143 + sbflags &= ~CIFS_MOUNT_MODE_FROM_SID; 2143 2144 2144 2145 if (ctx->cifs_acl) 2145 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 2146 + sbflags |= CIFS_MOUNT_CIFS_ACL; 2146 2147 else 2147 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL; 2148 + sbflags &= ~CIFS_MOUNT_CIFS_ACL; 2148 2149 2149 2150 if (ctx->backupuid_specified) 2150 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; 2151 + sbflags |= CIFS_MOUNT_CIFS_BACKUPUID; 2151 2152 else 2152 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID; 2153 + sbflags &= ~CIFS_MOUNT_CIFS_BACKUPUID; 2153 2154 2154 2155 if (ctx->backupgid_specified) 2155 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; 2156 + sbflags |= CIFS_MOUNT_CIFS_BACKUPGID; 2156 2157 else 2157 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID; 2158 + sbflags &= ~CIFS_MOUNT_CIFS_BACKUPGID; 2158 2159 2159 2160 if (ctx->override_uid) 2160 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; 2161 + sbflags |= CIFS_MOUNT_OVERR_UID; 2161 2162 else 2162 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID; 2163 + sbflags &= ~CIFS_MOUNT_OVERR_UID; 2163 2164 2164 2165 if (ctx->override_gid) 2165 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; 2166 + sbflags |= CIFS_MOUNT_OVERR_GID; 2166 2167 else 2167 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID; 2168 + sbflags &= ~CIFS_MOUNT_OVERR_GID; 2168 2169 2169 2170 if (ctx->dynperm) 2170 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; 2171 + sbflags |= CIFS_MOUNT_DYNPERM; 2171 2172 else 2172 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM; 2173 + sbflags &= ~CIFS_MOUNT_DYNPERM; 2173 2174 2174 2175 if (ctx->fsc) 2175 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; 2176 + sbflags |= CIFS_MOUNT_FSCACHE; 2176 2177 else 2177 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE; 2178 + sbflags &= ~CIFS_MOUNT_FSCACHE; 2178 2179 2179 2180 if (ctx->multiuser) 2180 - cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | 2181 - CIFS_MOUNT_NO_PERM); 2181 + sbflags |= CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM; 2182 2182 else 2183 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER; 2183 + sbflags &= ~CIFS_MOUNT_MULTIUSER; 2184 2184 2185 2185 2186 2186 if (ctx->strict_io) 2187 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; 2187 + sbflags |= CIFS_MOUNT_STRICT_IO; 2188 2188 else 2189 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO; 2189 + sbflags &= ~CIFS_MOUNT_STRICT_IO; 2190 2190 2191 2191 if (ctx->direct_io) 2192 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; 2192 + sbflags |= CIFS_MOUNT_DIRECT_IO; 2193 2193 else 2194 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO; 2194 + sbflags &= ~CIFS_MOUNT_DIRECT_IO; 2195 2195 2196 2196 if (ctx->mfsymlinks) 2197 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; 2197 + sbflags |= CIFS_MOUNT_MF_SYMLINKS; 2198 2198 else 2199 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS; 2200 - if (ctx->mfsymlinks) { 2201 - if (ctx->sfu_emul) { 2202 - /* 2203 - * Our SFU ("Services for Unix") emulation allows now 2204 - * creating new and reading existing SFU symlinks. 2205 - * Older Linux kernel versions were not able to neither 2206 - * read existing nor create new SFU symlinks. But 2207 - * creating and reading SFU style mknod and FIFOs was 2208 - * supported for long time. When "mfsymlinks" and 2209 - * "sfu" are both enabled at the same time, it allows 2210 - * reading both types of symlinks, but will only create 2211 - * them with mfsymlinks format. This allows better 2212 - * Apple compatibility, compatibility with older Linux 2213 - * kernel clients (probably better for Samba too) 2214 - * while still recognizing old Windows style symlinks. 2215 - */ 2216 - cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); 2217 - } 2218 - } 2219 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN; 2199 + sbflags &= ~CIFS_MOUNT_MF_SYMLINKS; 2220 2200 2221 - return; 2201 + if (ctx->mfsymlinks && ctx->sfu_emul) { 2202 + /* 2203 + * Our SFU ("Services for Unix") emulation allows now 2204 + * creating new and reading existing SFU symlinks. 2205 + * Older Linux kernel versions were not able to neither 2206 + * read existing nor create new SFU symlinks. But 2207 + * creating and reading SFU style mknod and FIFOs was 2208 + * supported for long time. When "mfsymlinks" and 2209 + * "sfu" are both enabled at the same time, it allows 2210 + * reading both types of symlinks, but will only create 2211 + * them with mfsymlinks format. This allows better 2212 + * Apple compatibility, compatibility with older Linux 2213 + * kernel clients (probably better for Samba too) 2214 + * while still recognizing old Windows style symlinks. 2215 + */ 2216 + cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); 2217 + } 2218 + sbflags &= ~CIFS_MOUNT_SHUTDOWN; 2219 + atomic_set(&cifs_sb->mnt_cifs_flags, sbflags); 2220 + return sbflags; 2222 2221 }
+1 -1
fs/smb/client/fs_context.h
··· 374 374 struct smb3_fs_context *ctx); 375 375 int smb3_sync_session_ctx_passwords(struct cifs_sb_info *cifs_sb, 376 376 struct cifs_ses *ses); 377 - void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb); 377 + unsigned int smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb); 378 378 379 379 /* 380 380 * max deferred close timeout (jiffies) - 2^30
+76 -70
fs/smb/client/inode.c
··· 40 40 41 41 static void cifs_set_ops(struct inode *inode) 42 42 { 43 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 43 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 44 + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 44 45 struct netfs_inode *ictx = netfs_inode(inode); 46 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 45 47 46 48 switch (inode->i_mode & S_IFMT) { 47 49 case S_IFREG: 48 50 inode->i_op = &cifs_file_inode_ops; 49 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 51 + if (sbflags & CIFS_MOUNT_DIRECT_IO) { 50 52 set_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags); 51 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 53 + if (sbflags & CIFS_MOUNT_NO_BRL) 52 54 inode->i_fop = &cifs_file_direct_nobrl_ops; 53 55 else 54 56 inode->i_fop = &cifs_file_direct_ops; 55 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 56 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 57 + } else if (sbflags & CIFS_MOUNT_STRICT_IO) { 58 + if (sbflags & CIFS_MOUNT_NO_BRL) 57 59 inode->i_fop = &cifs_file_strict_nobrl_ops; 58 60 else 59 61 inode->i_fop = &cifs_file_strict_ops; 60 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 62 + } else if (sbflags & CIFS_MOUNT_NO_BRL) 61 63 inode->i_fop = &cifs_file_nobrl_ops; 62 64 else { /* not direct, send byte range locks */ 63 65 inode->i_fop = &cifs_file_ops; 64 66 } 65 67 66 68 /* check if server can support readahead */ 67 - if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read < 68 - PAGE_SIZE + MAX_CIFS_HDR_SIZE) 69 + if (tcon->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE) 69 70 inode->i_data.a_ops = &cifs_addr_ops_smallbuf; 70 71 else 71 72 inode->i_data.a_ops = &cifs_addr_ops; ··· 195 194 inode->i_gid = fattr->cf_gid; 196 195 197 196 /* if dynperm is set, don't clobber existing mode */ 198 - if (inode_state_read(inode) & I_NEW || 199 - !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) 197 + if ((inode_state_read(inode) & I_NEW) || 198 + !(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DYNPERM)) 200 199 inode->i_mode = fattr->cf_mode; 201 200 202 201 cifs_i->cifsAttrs = fattr->cf_cifsattrs; ··· 249 248 { 250 249 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 251 250 252 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 253 - return; 254 - 255 - fattr->cf_uniqueid = iunique(sb, ROOT_I); 251 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM)) 252 + fattr->cf_uniqueid = iunique(sb, ROOT_I); 256 253 } 257 254 258 255 /* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */ ··· 258 259 cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, 259 260 struct cifs_sb_info *cifs_sb) 260 261 { 262 + unsigned int sbflags; 263 + 261 264 memset(fattr, 0, sizeof(*fattr)); 262 265 fattr->cf_uniqueid = le64_to_cpu(info->UniqueId); 263 266 fattr->cf_bytes = le64_to_cpu(info->NumOfBytes); ··· 318 317 break; 319 318 } 320 319 320 + sbflags = cifs_sb_flags(cifs_sb); 321 321 fattr->cf_uid = cifs_sb->ctx->linux_uid; 322 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) { 322 + if (!(sbflags & CIFS_MOUNT_OVERR_UID)) { 323 323 u64 id = le64_to_cpu(info->Uid); 324 324 if (id < ((uid_t)-1)) { 325 325 kuid_t uid = make_kuid(&init_user_ns, id); ··· 330 328 } 331 329 332 330 fattr->cf_gid = cifs_sb->ctx->linux_gid; 333 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) { 331 + if (!(sbflags & CIFS_MOUNT_OVERR_GID)) { 334 332 u64 id = le64_to_cpu(info->Gid); 335 333 if (id < ((gid_t)-1)) { 336 334 kgid_t gid = make_kgid(&init_user_ns, id); ··· 384 382 * 385 383 * If file type or uniqueid is different, return error. 386 384 */ 387 - if (unlikely((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && 385 + if (unlikely((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM) && 388 386 CIFS_I(*inode)->uniqueid != fattr->cf_uniqueid)) { 389 387 CIFS_I(*inode)->time = 0; /* force reval */ 390 388 return -ESTALE; ··· 470 468 cifs_fill_uniqueid(sb, fattr); 471 469 472 470 /* check for Minshall+French symlinks */ 473 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 471 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MF_SYMLINKS) { 474 472 tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); 475 473 cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); 476 474 } ··· 1083 1081 else if ((tcon->ses->capabilities & 1084 1082 tcon->ses->server->vals->cap_nt_find) == 0) 1085 1083 info.info_level = SMB_FIND_FILE_INFO_STANDARD; 1086 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 1084 + else if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM) 1087 1085 info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; 1088 1086 else /* no srvino useful for fallback to some netapp */ 1089 1087 info.info_level = SMB_FIND_FILE_DIRECTORY_INFO; ··· 1111 1109 struct TCP_Server_Info *server = tcon->ses->server; 1112 1110 int rc; 1113 1111 1114 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { 1112 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM)) { 1115 1113 if (*inode) 1116 1114 fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid; 1117 1115 else ··· 1265 1263 struct inode **inode, 1266 1264 const char *full_path) 1267 1265 { 1268 - struct cifs_open_info_data tmp_data = {}; 1269 - struct cifs_tcon *tcon; 1270 - struct TCP_Server_Info *server; 1271 - struct tcon_link *tlink; 1272 1266 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 1267 + struct cifs_open_info_data tmp_data = {}; 1273 1268 void *smb1_backup_rsp_buf = NULL; 1274 - int rc = 0; 1269 + struct TCP_Server_Info *server; 1270 + struct cifs_tcon *tcon; 1271 + struct tcon_link *tlink; 1272 + unsigned int sbflags; 1275 1273 int tmprc = 0; 1274 + int rc = 0; 1276 1275 1277 1276 tlink = cifs_sb_tlink(cifs_sb); 1278 1277 if (IS_ERR(tlink)) ··· 1373 1370 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1374 1371 handle_mnt_opt: 1375 1372 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1373 + sbflags = cifs_sb_flags(cifs_sb); 1376 1374 /* query for SFU type info if supported and needed */ 1377 1375 if ((fattr->cf_cifsattrs & ATTR_SYSTEM) && 1378 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { 1376 + (sbflags & CIFS_MOUNT_UNX_EMUL)) { 1379 1377 tmprc = cifs_sfu_type(fattr, full_path, cifs_sb, xid); 1380 1378 if (tmprc) 1381 1379 cifs_dbg(FYI, "cifs_sfu_type failed: %d\n", tmprc); 1382 1380 } 1383 1381 1384 1382 /* fill in 0777 bits from ACL */ 1385 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { 1383 + if (sbflags & CIFS_MOUNT_MODE_FROM_SID) { 1386 1384 rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode, 1387 1385 true, full_path, fid); 1388 1386 if (rc == -EREMOTE) ··· 1393 1389 __func__, rc); 1394 1390 goto out; 1395 1391 } 1396 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 1392 + } else if (sbflags & CIFS_MOUNT_CIFS_ACL) { 1397 1393 rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode, 1398 1394 false, full_path, fid); 1399 1395 if (rc == -EREMOTE) ··· 1403 1399 __func__, rc); 1404 1400 goto out; 1405 1401 } 1406 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 1402 + } else if (sbflags & CIFS_MOUNT_UNX_EMUL) 1407 1403 /* fill in remaining high mode bits e.g. SUID, VTX */ 1408 1404 cifs_sfu_mode(fattr, full_path, cifs_sb, xid); 1409 1405 else if (!(tcon->posix_extensions)) ··· 1413 1409 1414 1410 1415 1411 /* check for Minshall+French symlinks */ 1416 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 1412 + if (sbflags & CIFS_MOUNT_MF_SYMLINKS) { 1417 1413 tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); 1418 1414 cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); 1419 1415 } ··· 1513 1509 * 3. Tweak fattr based on mount options 1514 1510 */ 1515 1511 /* check for Minshall+French symlinks */ 1516 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 1512 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MF_SYMLINKS) { 1517 1513 tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path); 1518 1514 cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc); 1519 1515 } ··· 1664 1660 int len; 1665 1661 int rc; 1666 1662 1667 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 1663 + if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH) 1668 1664 && cifs_sb->prepath) { 1669 1665 len = strlen(cifs_sb->prepath); 1670 1666 path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); ··· 2102 2098 const char *full_path, struct cifs_sb_info *cifs_sb, 2103 2099 struct cifs_tcon *tcon, const unsigned int xid) 2104 2100 { 2105 - int rc = 0; 2106 2101 struct inode *inode = NULL; 2102 + unsigned int sbflags; 2103 + int rc = 0; 2107 2104 2108 2105 if (tcon->posix_extensions) { 2109 2106 rc = smb311_posix_get_inode_info(&inode, full_path, ··· 2144 2139 if (parent->i_mode & S_ISGID) 2145 2140 mode |= S_ISGID; 2146 2141 2142 + sbflags = cifs_sb_flags(cifs_sb); 2147 2143 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2148 2144 if (tcon->unix_ext) { 2149 2145 struct cifs_unix_set_info_args args = { ··· 2154 2148 .mtime = NO_CHANGE_64, 2155 2149 .device = 0, 2156 2150 }; 2157 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 2151 + if (sbflags & CIFS_MOUNT_SET_UID) { 2158 2152 args.uid = current_fsuid(); 2159 2153 if (parent->i_mode & S_ISGID) 2160 2154 args.gid = parent->i_gid; ··· 2172 2166 { 2173 2167 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2174 2168 struct TCP_Server_Info *server = tcon->ses->server; 2175 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && 2169 + if (!(sbflags & CIFS_MOUNT_CIFS_ACL) && 2176 2170 (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) 2177 2171 server->ops->mkdir_setinfo(inode, full_path, cifs_sb, 2178 2172 tcon, xid); 2179 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 2173 + if (sbflags & CIFS_MOUNT_DYNPERM) 2180 2174 inode->i_mode = (mode | S_IFDIR); 2181 2175 2182 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 2176 + if (sbflags & CIFS_MOUNT_SET_UID) { 2183 2177 inode->i_uid = current_fsuid(); 2184 2178 if (inode->i_mode & S_ISGID) 2185 2179 inode->i_gid = parent->i_gid; ··· 2692 2686 { 2693 2687 struct inode *inode = d_inode(dentry); 2694 2688 struct cifsInodeInfo *cifs_i = CIFS_I(inode); 2695 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2689 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 2696 2690 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 2697 2691 struct cached_fid *cfid = NULL; 2698 2692 ··· 2733 2727 } 2734 2728 2735 2729 /* hardlinked files w/ noserverino get "special" treatment */ 2736 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && 2730 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM) && 2737 2731 S_ISREG(inode->i_mode) && inode->i_nlink != 1) 2738 2732 return true; 2739 2733 ··· 2758 2752 int 2759 2753 cifs_revalidate_mapping(struct inode *inode) 2760 2754 { 2761 - int rc; 2762 2755 struct cifsInodeInfo *cifs_inode = CIFS_I(inode); 2756 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 2763 2757 unsigned long *flags = &cifs_inode->flags; 2764 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2758 + int rc; 2765 2759 2766 2760 /* swapfiles are not supposed to be shared */ 2767 2761 if (IS_SWAPFILE(inode)) ··· 2774 2768 2775 2769 if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) { 2776 2770 /* for cache=singleclient, do not invalidate */ 2777 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 2771 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RW_CACHE) 2778 2772 goto skip_invalidate; 2779 2773 2780 2774 cifs_inode->netfs.zero_point = cifs_inode->netfs.remote_i_size; ··· 2898 2892 int cifs_getattr(struct mnt_idmap *idmap, const struct path *path, 2899 2893 struct kstat *stat, u32 request_mask, unsigned int flags) 2900 2894 { 2901 - struct dentry *dentry = path->dentry; 2902 - struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); 2895 + struct cifs_sb_info *cifs_sb = CIFS_SB(path->dentry); 2903 2896 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 2897 + struct dentry *dentry = path->dentry; 2904 2898 struct inode *inode = d_inode(dentry); 2899 + unsigned int sbflags; 2905 2900 int rc; 2906 2901 2907 2902 if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) ··· 2959 2952 * enabled, and the admin hasn't overridden them, set the ownership 2960 2953 * to the fsuid/fsgid of the current process. 2961 2954 */ 2962 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) && 2963 - !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && 2955 + sbflags = cifs_sb_flags(cifs_sb); 2956 + if ((sbflags & CIFS_MOUNT_MULTIUSER) && 2957 + !(sbflags & CIFS_MOUNT_CIFS_ACL) && 2964 2958 !tcon->unix_ext) { 2965 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) 2959 + if (!(sbflags & CIFS_MOUNT_OVERR_UID)) 2966 2960 stat->uid = current_fsuid(); 2967 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) 2961 + if (!(sbflags & CIFS_MOUNT_OVERR_GID)) 2968 2962 stat->gid = current_fsgid(); 2969 2963 } 2970 2964 return 0; ··· 3110 3102 void *page = alloc_dentry_path(); 3111 3103 struct inode *inode = d_inode(direntry); 3112 3104 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3113 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3105 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 3114 3106 struct tcon_link *tlink; 3115 3107 struct cifs_tcon *pTcon; 3116 3108 struct cifs_unix_set_info_args *args = NULL; ··· 3121 3113 3122 3114 xid = get_xid(); 3123 3115 3124 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 3116 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_PERM) 3125 3117 attrs->ia_valid |= ATTR_FORCE; 3126 3118 3127 3119 rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); ··· 3274 3266 static int 3275 3267 cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) 3276 3268 { 3277 - unsigned int xid; 3269 + struct inode *inode = d_inode(direntry); 3270 + struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3271 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 3272 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 3273 + struct cifsFileInfo *cfile = NULL; 3274 + void *page = alloc_dentry_path(); 3275 + __u64 mode = NO_CHANGE_64; 3278 3276 kuid_t uid = INVALID_UID; 3279 3277 kgid_t gid = INVALID_GID; 3280 - struct inode *inode = d_inode(direntry); 3281 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3282 - struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3283 - struct cifsFileInfo *cfile = NULL; 3284 3278 const char *full_path; 3285 - void *page = alloc_dentry_path(); 3286 - int rc = -EACCES; 3287 3279 __u32 dosattr = 0; 3288 - __u64 mode = NO_CHANGE_64; 3289 - bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions; 3280 + int rc = -EACCES; 3281 + unsigned int xid; 3290 3282 3291 3283 xid = get_xid(); 3292 3284 3293 3285 cifs_dbg(FYI, "setattr on file %pd attrs->ia_valid 0x%x\n", 3294 3286 direntry, attrs->ia_valid); 3295 3287 3296 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 3288 + if (sbflags & CIFS_MOUNT_NO_PERM) 3297 3289 attrs->ia_valid |= ATTR_FORCE; 3298 3290 3299 3291 rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); ··· 3354 3346 if (attrs->ia_valid & ATTR_GID) 3355 3347 gid = attrs->ia_gid; 3356 3348 3357 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || 3358 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) { 3349 + if (sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID)) { 3359 3350 if (uid_valid(uid) || gid_valid(gid)) { 3360 3351 mode = NO_CHANGE_64; 3361 3352 rc = id_mode_to_cifs_acl(inode, full_path, &mode, ··· 3365 3358 goto cifs_setattr_exit; 3366 3359 } 3367 3360 } 3368 - } else 3369 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) 3361 + } else if (!(sbflags & CIFS_MOUNT_SET_UID)) { 3370 3362 attrs->ia_valid &= ~(ATTR_UID | ATTR_GID); 3363 + } 3371 3364 3372 3365 /* skip mode change if it's just for clearing setuid/setgid */ 3373 3366 if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) ··· 3376 3369 if (attrs->ia_valid & ATTR_MODE) { 3377 3370 mode = attrs->ia_mode; 3378 3371 rc = 0; 3379 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || 3380 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) || 3381 - posix) { 3372 + if ((sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID)) || 3373 + cifs_sb_master_tcon(cifs_sb)->posix_extensions) { 3382 3374 rc = id_mode_to_cifs_acl(inode, full_path, &mode, 3383 3375 INVALID_UID, INVALID_GID); 3384 3376 if (rc) { ··· 3399 3393 dosattr = cifsInode->cifsAttrs | ATTR_READONLY; 3400 3394 3401 3395 /* fix up mode if we're not using dynperm */ 3402 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) 3396 + if ((sbflags & CIFS_MOUNT_DYNPERM) == 0) 3403 3397 attrs->ia_mode = inode->i_mode & ~S_IWUGO; 3404 3398 } else if ((mode & S_IWUGO) && 3405 3399 (cifsInode->cifsAttrs & ATTR_READONLY)) { ··· 3410 3404 dosattr |= ATTR_NORMAL; 3411 3405 3412 3406 /* reset local inode permissions to normal */ 3413 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { 3407 + if (!(sbflags & CIFS_MOUNT_DYNPERM)) { 3414 3408 attrs->ia_mode &= ~(S_IALLUGO); 3415 3409 if (S_ISDIR(inode->i_mode)) 3416 3410 attrs->ia_mode |= ··· 3419 3413 attrs->ia_mode |= 3420 3414 cifs_sb->ctx->file_mode; 3421 3415 } 3422 - } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { 3416 + } else if (!(sbflags & CIFS_MOUNT_DYNPERM)) { 3423 3417 /* ignore mode change - ATTR_READONLY hasn't changed */ 3424 3418 attrs->ia_valid &= ~ATTR_MODE; 3425 3419 }
+1 -1
fs/smb/client/ioctl.c
··· 216 216 */ 217 217 case CIFS_GOING_FLAGS_LOGFLUSH: 218 218 case CIFS_GOING_FLAGS_NOLOGFLUSH: 219 - sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN; 219 + atomic_or(CIFS_MOUNT_SHUTDOWN, &sbi->mnt_cifs_flags); 220 220 goto shutdown_good; 221 221 default: 222 222 rc = -EINVAL;
+8 -6
fs/smb/client/link.c
··· 544 544 cifs_symlink(struct mnt_idmap *idmap, struct inode *inode, 545 545 struct dentry *direntry, const char *symname) 546 546 { 547 - int rc = -EOPNOTSUPP; 548 - unsigned int xid; 549 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 547 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 548 + struct inode *newinode = NULL; 550 549 struct tcon_link *tlink; 551 550 struct cifs_tcon *pTcon; 552 551 const char *full_path; 552 + int rc = -EOPNOTSUPP; 553 + unsigned int sbflags; 554 + unsigned int xid; 553 555 void *page; 554 - struct inode *newinode = NULL; 555 556 556 557 if (unlikely(cifs_forced_shutdown(cifs_sb))) 557 558 return smb_EIO(smb_eio_trace_forced_shutdown); ··· 581 580 cifs_dbg(FYI, "symname is %s\n", symname); 582 581 583 582 /* BB what if DFS and this volume is on different share? BB */ 583 + sbflags = cifs_sb_flags(cifs_sb); 584 584 rc = -EOPNOTSUPP; 585 585 switch (cifs_symlink_type(cifs_sb)) { 586 586 case CIFS_SYMLINK_TYPE_UNIX: ··· 596 594 break; 597 595 598 596 case CIFS_SYMLINK_TYPE_MFSYMLINKS: 599 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { 597 + if (sbflags & CIFS_MOUNT_MF_SYMLINKS) { 600 598 rc = create_mf_symlink(xid, pTcon, cifs_sb, 601 599 full_path, symname); 602 600 } 603 601 break; 604 602 605 603 case CIFS_SYMLINK_TYPE_SFU: 606 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { 604 + if (sbflags & CIFS_MOUNT_UNX_EMUL) { 607 605 rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon, 608 606 full_path, S_IFLNK, 609 607 0, symname);
+10 -6
fs/smb/client/misc.c
··· 275 275 void 276 276 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) 277 277 { 278 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 278 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 279 + 280 + if (sbflags & CIFS_MOUNT_SERVER_INUM) { 279 281 struct cifs_tcon *tcon = NULL; 280 282 281 283 if (cifs_sb->master_tlink) 282 284 tcon = cifs_sb_master_tcon(cifs_sb); 283 285 284 - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; 286 + atomic_andnot(CIFS_MOUNT_SERVER_INUM, &cifs_sb->mnt_cifs_flags); 285 287 cifs_sb->mnt_cifs_serverino_autodisabled = true; 286 288 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n", 287 289 tcon ? tcon->tree_name : "new server"); ··· 384 382 bool 385 383 backup_cred(struct cifs_sb_info *cifs_sb) 386 384 { 387 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) { 385 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 386 + 387 + if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID) { 388 388 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid())) 389 389 return true; 390 390 } 391 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) { 391 + if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID) { 392 392 if (in_group_p(cifs_sb->ctx->backupgid)) 393 393 return true; 394 394 } ··· 959 955 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); 960 956 } 961 957 962 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 958 + atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags); 963 959 return 0; 964 960 } 965 961 ··· 988 984 * look up or tcon is not DFS. 989 985 */ 990 986 if (strlen(full_path) < 2 || !cifs_sb || 991 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) || 987 + (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_DFS) || 992 988 !is_tcon_dfs(tcon)) 993 989 return 0; 994 990
+21 -18
fs/smb/client/readdir.c
··· 121 121 * want to clobber the existing one with the one that 122 122 * the readdir code created. 123 123 */ 124 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) 124 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM)) 125 125 fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; 126 126 127 127 /* ··· 177 177 struct cifs_open_info_data data = { 178 178 .reparse = { .tag = fattr->cf_cifstag, }, 179 179 }; 180 + unsigned int sbflags; 180 181 181 182 fattr->cf_uid = cifs_sb->ctx->linux_uid; 182 183 fattr->cf_gid = cifs_sb->ctx->linux_gid; ··· 216 215 * may look wrong since the inodes may not have timed out by the time 217 216 * "ls" does a stat() call on them. 218 217 */ 219 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) || 220 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) 218 + sbflags = cifs_sb_flags(cifs_sb); 219 + if (sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID)) 221 220 fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; 222 221 223 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL && 224 - fattr->cf_cifsattrs & ATTR_SYSTEM) { 222 + if ((sbflags & CIFS_MOUNT_UNX_EMUL) && 223 + (fattr->cf_cifsattrs & ATTR_SYSTEM)) { 225 224 if (fattr->cf_eof == 0) { 226 225 fattr->cf_mode &= ~S_IFMT; 227 226 fattr->cf_mode |= S_IFIFO; ··· 346 345 _initiate_cifs_search(const unsigned int xid, struct file *file, 347 346 const char *full_path) 348 347 { 348 + struct cifs_sb_info *cifs_sb = CIFS_SB(file); 349 + struct tcon_link *tlink = NULL; 350 + struct TCP_Server_Info *server; 351 + struct cifsFileInfo *cifsFile; 352 + struct cifs_tcon *tcon; 353 + unsigned int sbflags; 349 354 __u16 search_flags; 350 355 int rc = 0; 351 - struct cifsFileInfo *cifsFile; 352 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 353 - struct tcon_link *tlink = NULL; 354 - struct cifs_tcon *tcon; 355 - struct TCP_Server_Info *server; 356 356 357 357 if (file->private_data == NULL) { 358 358 tlink = cifs_sb_tlink(cifs_sb); ··· 387 385 cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos); 388 386 389 387 ffirst_retry: 388 + sbflags = cifs_sb_flags(cifs_sb); 390 389 /* test for Unix extensions */ 391 390 /* but now check for them on the share/mount not on the SMB session */ 392 391 /* if (cap_unix(tcon->ses) { */ ··· 398 395 else if ((tcon->ses->capabilities & 399 396 tcon->ses->server->vals->cap_nt_find) == 0) { 400 397 cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; 401 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 398 + } else if (sbflags & CIFS_MOUNT_SERVER_INUM) { 402 399 cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; 403 400 } else /* not srvinos - BB fixme add check for backlevel? */ { 404 401 cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; ··· 414 411 415 412 if (rc == 0) { 416 413 cifsFile->invalidHandle = false; 417 - } else if ((rc == -EOPNOTSUPP) && 418 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { 414 + } else if (rc == -EOPNOTSUPP && (sbflags & CIFS_MOUNT_SERVER_INUM)) { 419 415 cifs_autodisable_serverino(cifs_sb); 420 416 goto ffirst_retry; 421 417 } ··· 692 690 loff_t first_entry_in_buffer; 693 691 loff_t index_to_find = pos; 694 692 struct cifsFileInfo *cfile = file->private_data; 695 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 693 + struct cifs_sb_info *cifs_sb = CIFS_SB(file); 696 694 struct TCP_Server_Info *server = tcon->ses->server; 697 695 /* check if index in the buffer */ 698 696 ··· 957 955 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 958 956 struct cifs_dirent de = { NULL, }; 959 957 struct cifs_fattr fattr; 958 + unsigned int sbflags; 960 959 struct qstr name; 961 960 int rc = 0; 962 961 ··· 1022 1019 break; 1023 1020 } 1024 1021 1025 - if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { 1022 + sbflags = cifs_sb_flags(cifs_sb); 1023 + if (de.ino && (sbflags & CIFS_MOUNT_SERVER_INUM)) { 1026 1024 fattr.cf_uniqueid = de.ino; 1027 1025 } else { 1028 1026 fattr.cf_uniqueid = iunique(sb, ROOT_I); 1029 1027 cifs_autodisable_serverino(cifs_sb); 1030 1028 } 1031 1029 1032 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) && 1033 - couldbe_mf_symlink(&fattr)) 1030 + if ((sbflags & CIFS_MOUNT_MF_SYMLINKS) && couldbe_mf_symlink(&fattr)) 1034 1031 /* 1035 1032 * trying to get the type and mode can be slow, 1036 1033 * so just call those regular files for now, and mark ··· 1061 1058 const char *full_path; 1062 1059 void *page = alloc_dentry_path(); 1063 1060 struct cached_fid *cfid = NULL; 1064 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 1061 + struct cifs_sb_info *cifs_sb = CIFS_SB(file); 1065 1062 1066 1063 xid = get_xid(); 1067 1064
+15 -14
fs/smb/client/reparse.c
··· 55 55 const char *full_path, const char *symname) 56 56 { 57 57 struct reparse_symlink_data_buffer *buf = NULL; 58 - struct cifs_open_info_data data = {}; 59 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 58 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 60 59 const char *symroot = cifs_sb->ctx->symlinkroot; 61 - struct inode *new; 62 - struct kvec iov; 63 - __le16 *path = NULL; 64 - bool directory; 65 - char *symlink_target = NULL; 66 - char *sym = NULL; 60 + struct cifs_open_info_data data = {}; 67 61 char sep = CIFS_DIR_SEP(cifs_sb); 62 + char *symlink_target = NULL; 68 63 u16 len, plen, poff, slen; 64 + unsigned int sbflags; 65 + __le16 *path = NULL; 66 + struct inode *new; 67 + char *sym = NULL; 68 + struct kvec iov; 69 + bool directory; 69 70 int rc = 0; 70 71 71 72 if (strlen(symname) > REPARSE_SYM_PATH_MAX) ··· 84 83 .symlink_target = symlink_target, 85 84 }; 86 85 87 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && 88 - symroot && symname[0] == '/') { 86 + sbflags = cifs_sb_flags(cifs_sb); 87 + if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symroot && symname[0] == '/') { 89 88 /* 90 89 * This is a request to create an absolute symlink on the server 91 90 * which does not support POSIX paths, and expects symlink in ··· 165 164 * mask these characters in NT object prefix by '_' and then change 166 165 * them back. 167 166 */ 168 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') 167 + if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') 169 168 sym[0] = sym[1] = sym[2] = sym[5] = '_'; 170 169 171 170 path = cifs_convert_path_to_utf16(sym, cifs_sb); ··· 174 173 goto out; 175 174 } 176 175 177 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { 176 + if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { 178 177 sym[0] = '\\'; 179 178 sym[1] = sym[2] = '?'; 180 179 sym[5] = ':'; ··· 198 197 slen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX); 199 198 poff = 0; 200 199 plen = slen; 201 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { 200 + if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { 202 201 /* 203 202 * For absolute NT symlinks skip leading "\\??\\" in PrintName as 204 203 * PrintName is user visible location in DOS/Win32 format (not in NT format). ··· 825 824 goto out; 826 825 } 827 826 828 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && 827 + if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS) && 829 828 symroot && !relative) { 830 829 /* 831 830 * This is an absolute symlink from the server which does not
+2 -2
fs/smb/client/reparse.h
··· 33 33 { 34 34 u32 uid = le32_to_cpu(*(__le32 *)ptr); 35 35 36 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 36 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_OVERR_UID) 37 37 return cifs_sb->ctx->linux_uid; 38 38 return make_kuid(current_user_ns(), uid); 39 39 } ··· 43 43 { 44 44 u32 gid = le32_to_cpu(*(__le32 *)ptr); 45 45 46 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 46 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_OVERR_GID) 47 47 return cifs_sb->ctx->linux_gid; 48 48 return make_kgid(current_user_ns(), gid); 49 49 }
+14 -8
fs/smb/client/smb1ops.c
··· 49 49 50 50 if (!CIFSSMBQFSUnixInfo(xid, tcon)) { 51 51 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 52 + unsigned int sbflags; 52 53 53 54 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); 54 55 /* ··· 76 75 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 77 76 cifs_dbg(VFS, "per-share encryption not supported yet\n"); 78 77 78 + if (cifs_sb) 79 + sbflags = cifs_sb_flags(cifs_sb); 80 + 79 81 cap &= CIFS_UNIX_CAP_MASK; 80 82 if (ctx && ctx->no_psx_acl) 81 83 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 82 84 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 83 85 cifs_dbg(FYI, "negotiated posix acl support\n"); 84 86 if (cifs_sb) 85 - cifs_sb->mnt_cifs_flags |= 86 - CIFS_MOUNT_POSIXACL; 87 + sbflags |= CIFS_MOUNT_POSIXACL; 87 88 } 88 89 89 90 if (ctx && ctx->posix_paths == 0) ··· 93 90 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 94 91 cifs_dbg(FYI, "negotiate posix pathnames\n"); 95 92 if (cifs_sb) 96 - cifs_sb->mnt_cifs_flags |= 97 - CIFS_MOUNT_POSIX_PATHS; 93 + sbflags |= CIFS_MOUNT_POSIX_PATHS; 98 94 } 95 + 96 + if (cifs_sb) 97 + atomic_set(&cifs_sb->mnt_cifs_flags, sbflags); 99 98 100 99 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); 101 100 #ifdef CONFIG_CIFS_DEBUG2 ··· 1152 1147 __u64 volatile_fid, __u16 net_fid, 1153 1148 struct cifsInodeInfo *cinode, unsigned int oplock) 1154 1149 { 1155 - unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags; 1150 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode)); 1156 1151 __u8 op; 1157 1152 1158 1153 op = !!((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE)); ··· 1287 1282 struct dentry *dentry, struct cifs_tcon *tcon, 1288 1283 const char *full_path, umode_t mode, dev_t dev) 1289 1284 { 1290 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1285 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode); 1286 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 1291 1287 struct inode *newinode = NULL; 1292 1288 int rc; 1293 1289 ··· 1304 1298 .mtime = NO_CHANGE_64, 1305 1299 .device = dev, 1306 1300 }; 1307 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { 1301 + if (sbflags & CIFS_MOUNT_SET_UID) { 1308 1302 args.uid = current_fsuid(); 1309 1303 args.gid = current_fsgid(); 1310 1304 } else { ··· 1323 1317 if (rc == 0) 1324 1318 d_instantiate(dentry, newinode); 1325 1319 return rc; 1326 - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { 1320 + } else if (sbflags & CIFS_MOUNT_UNX_EMUL) { 1327 1321 /* 1328 1322 * Check if mounted with mount parm 'sfu' mount parm. 1329 1323 * SFU emulation should work with all servers
+1 -1
fs/smb/client/smb2file.c
··· 72 72 * POSIX server does not distinguish between symlinks to file and 73 73 * symlink directory. So nothing is needed to fix on the client side. 74 74 */ 75 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 75 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS) 76 76 return 0; 77 77 78 78 if (!*target)
+4 -14
fs/smb/client/smb2misc.c
··· 455 455 __le16 * 456 456 cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb) 457 457 { 458 - int len; 459 458 const char *start_of_path; 460 - __le16 *to; 461 - int map_type; 462 - 463 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 464 - map_type = SFM_MAP_UNI_RSVD; 465 - else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 466 - map_type = SFU_MAP_UNI_RSVD; 467 - else 468 - map_type = NO_MAP_UNI_RSVD; 459 + int len; 469 460 470 461 /* Windows doesn't allow paths beginning with \ */ 471 462 if (from[0] == '\\') ··· 470 479 } else 471 480 start_of_path = from; 472 481 473 - to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len, 474 - cifs_sb->local_nls, map_type); 475 - return to; 482 + return cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len, 483 + cifs_sb->local_nls, cifs_remap(cifs_sb)); 476 484 } 477 485 478 486 __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode, unsigned int oplock) 479 487 { 480 - unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags; 488 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode)); 481 489 __le32 lease = 0; 482 490 483 491 if ((oplock & CIFS_CACHE_WRITE_FLG) || (sbflags & CIFS_MOUNT_RW_CACHE))
+4 -4
fs/smb/client/smb2ops.c
··· 986 986 rc = -EREMOTE; 987 987 } 988 988 if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && 989 - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)) 989 + (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_DFS)) 990 990 rc = -EOPNOTSUPP; 991 991 goto out; 992 992 } ··· 2691 2691 __u64 volatile_fid, __u16 net_fid, 2692 2692 struct cifsInodeInfo *cinode, unsigned int oplock) 2693 2693 { 2694 - unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags; 2694 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode)); 2695 2695 __u8 op; 2696 2696 2697 2697 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ··· 5332 5332 struct dentry *dentry, struct cifs_tcon *tcon, 5333 5333 const char *full_path, umode_t mode, dev_t dev) 5334 5334 { 5335 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5335 + unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode)); 5336 5336 int rc = -EOPNOTSUPP; 5337 5337 5338 5338 /* ··· 5341 5341 * supports block and char device, socket & fifo, 5342 5342 * and was used by default in earlier versions of Windows 5343 5343 */ 5344 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { 5344 + if (sbflags & CIFS_MOUNT_UNX_EMUL) { 5345 5345 rc = cifs_sfu_make_node(xid, inode, dentry, tcon, 5346 5346 full_path, mode, dev); 5347 5347 } else if (CIFS_REPARSE_SUPPORT(tcon)) {
+15 -20
fs/smb/client/smb2pdu.c
··· 1714 1714 is_binding = (ses->ses_status == SES_GOOD); 1715 1715 spin_unlock(&ses->ses_lock); 1716 1716 1717 - /* keep session key if binding */ 1718 - if (!is_binding) { 1719 - kfree_sensitive(ses->auth_key.response); 1720 - ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1721 - GFP_KERNEL); 1722 - if (!ses->auth_key.response) { 1723 - cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", 1724 - msg->sesskey_len); 1725 - rc = -ENOMEM; 1726 - goto out_put_spnego_key; 1727 - } 1728 - ses->auth_key.len = msg->sesskey_len; 1717 + kfree_sensitive(ses->auth_key.response); 1718 + ses->auth_key.response = kmemdup(msg->data, 1719 + msg->sesskey_len, 1720 + GFP_KERNEL); 1721 + if (!ses->auth_key.response) { 1722 + cifs_dbg(VFS, "%s: can't allocate (%u bytes) memory\n", 1723 + __func__, msg->sesskey_len); 1724 + rc = -ENOMEM; 1725 + goto out_put_spnego_key; 1729 1726 } 1727 + ses->auth_key.len = msg->sesskey_len; 1730 1728 1731 1729 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; 1732 1730 sess_data->iov[1].iov_len = msg->secblob_len; ··· 3180 3182 } 3181 3183 3182 3184 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { 3185 + unsigned int sbflags = cifs_sb_flags(oparms->cifs_sb); 3183 3186 bool set_mode; 3184 3187 bool set_owner; 3185 3188 3186 - if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) && 3187 - (oparms->mode != ACL_NO_MODE)) 3189 + if ((sbflags & CIFS_MOUNT_MODE_FROM_SID) && 3190 + oparms->mode != ACL_NO_MODE) { 3188 3191 set_mode = true; 3189 - else { 3192 + } else { 3190 3193 set_mode = false; 3191 3194 oparms->mode = ACL_NO_MODE; 3192 3195 } 3193 3196 3194 - if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 3195 - set_owner = true; 3196 - else 3197 - set_owner = false; 3198 - 3197 + set_owner = sbflags & CIFS_MOUNT_UID_FROM_ACL; 3199 3198 if (set_owner | set_mode) { 3200 3199 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); 3201 3200 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
+10 -11
fs/smb/client/transport.c
··· 807 807 } 808 808 809 809 /* 810 - * Return a channel (master if none) of @ses that can be used to send 811 - * regular requests. 810 + * cifs_pick_channel - pick an eligible channel for network operations 812 811 * 813 - * If we are currently binding a new channel (negprot/sess.setup), 814 - * return the new incomplete channel. 812 + * @ses: session reference 813 + * 814 + * Select an eligible channel (not terminating and not marked as needing 815 + * reconnect), preferring the least loaded one. If no eligible channel is 816 + * found, fall back to the primary channel (index 0). 817 + * 818 + * Return: TCP_Server_Info pointer for the chosen channel, or NULL if @ses is 819 + * NULL. 815 820 */ 816 821 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) 817 822 { 818 823 uint index = 0; 819 - unsigned int min_in_flight = UINT_MAX, max_in_flight = 0; 824 + unsigned int min_in_flight = UINT_MAX; 820 825 struct TCP_Server_Info *server = NULL; 821 826 int i, start, cur; 822 827 ··· 851 846 min_in_flight = server->in_flight; 852 847 index = cur; 853 848 } 854 - if (server->in_flight > max_in_flight) 855 - max_in_flight = server->in_flight; 856 849 } 857 - 858 - /* if all channels are equally loaded, fall back to round-robin */ 859 - if (min_in_flight == max_in_flight) 860 - index = (uint)start % ses->chan_count; 861 850 862 851 server = ses->chans[index].server; 863 852 spin_unlock(&ses->chan_lock);
+3 -3
fs/smb/client/xattr.c
··· 149 149 break; 150 150 } 151 151 152 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 152 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR) 153 153 goto out; 154 154 155 155 if (pTcon->ses->server->ops->set_EA) { ··· 309 309 break; 310 310 } 311 311 312 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 312 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR) 313 313 goto out; 314 314 315 315 if (pTcon->ses->server->ops->query_all_EAs) ··· 398 398 if (unlikely(cifs_forced_shutdown(cifs_sb))) 399 399 return smb_EIO(smb_eio_trace_forced_shutdown); 400 400 401 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 401 + if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR) 402 402 return -EOPNOTSUPP; 403 403 404 404 tlink = cifs_sb_tlink(cifs_sb);
+1
fs/smb/server/Kconfig
··· 13 13 select CRYPTO_LIB_MD5 14 14 select CRYPTO_LIB_SHA256 15 15 select CRYPTO_LIB_SHA512 16 + select CRYPTO_LIB_UTILS 16 17 select CRYPTO_CMAC 17 18 select CRYPTO_AEAD2 18 19 select CRYPTO_CCM
+3 -1
fs/smb/server/auth.c
··· 15 15 #include <crypto/aead.h> 16 16 #include <crypto/md5.h> 17 17 #include <crypto/sha2.h> 18 + #include <crypto/utils.h> 18 19 #include <linux/random.h> 19 20 #include <linux/scatterlist.h> 20 21 ··· 166 165 ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE, 167 166 sess->sess_key); 168 167 169 - if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0) 168 + if (crypto_memneq(ntlmv2->ntlmv2_hash, ntlmv2_rsp, 169 + CIFS_HMAC_MD5_HASH_SIZE)) 170 170 return -EINVAL; 171 171 return 0; 172 172 }
+3 -2
fs/smb/server/smb2pdu.c
··· 4 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 5 */ 6 6 7 + #include <crypto/utils.h> 7 8 #include <linux/inetdevice.h> 8 9 #include <net/addrconf.h> 9 10 #include <linux/syscalls.h> ··· 8881 8880 ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1, 8882 8881 signature); 8883 8882 8884 - if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) { 8883 + if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) { 8885 8884 pr_err("bad smb2 signature\n"); 8886 8885 return 0; 8887 8886 } ··· 8969 8968 if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature)) 8970 8969 return 0; 8971 8970 8972 - if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) { 8971 + if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) { 8973 8972 pr_err("bad smb2 signature\n"); 8974 8973 return 0; 8975 8974 }
+2 -2
fs/smb/server/transport_rdma.c
··· 2540 2540 goto put; 2541 2541 2542 2542 req = (struct smbdirect_negotiate_req *)recvmsg->packet; 2543 - sp->max_recv_size = min_t(int, sp->max_recv_size, 2543 + sp->max_recv_size = min_t(u32, sp->max_recv_size, 2544 2544 le32_to_cpu(req->preferred_send_size)); 2545 - sp->max_send_size = min_t(int, sp->max_send_size, 2545 + sp->max_send_size = min_t(u32, sp->max_send_size, 2546 2546 le32_to_cpu(req->max_receive_size)); 2547 2547 sp->max_fragmented_send_size = 2548 2548 le32_to_cpu(req->max_fragmented_size);
+3
fs/squashfs/cache.c
··· 344 344 if (unlikely(length < 0)) 345 345 return -EIO; 346 346 347 + if (unlikely(*offset < 0 || *offset >= SQUASHFS_METADATA_SIZE)) 348 + return -EIO; 349 + 347 350 while (length) { 348 351 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); 349 352 if (entry->error) {
+28
fs/xfs/libxfs/xfs_ag.c
··· 872 872 return err2; 873 873 } 874 874 875 + void 876 + xfs_growfs_compute_deltas( 877 + struct xfs_mount *mp, 878 + xfs_rfsblock_t nb, 879 + int64_t *deltap, 880 + xfs_agnumber_t *nagcountp) 881 + { 882 + xfs_rfsblock_t nb_div, nb_mod; 883 + int64_t delta; 884 + xfs_agnumber_t nagcount; 885 + 886 + nb_div = nb; 887 + nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); 888 + if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS) 889 + nb_div++; 890 + else if (nb_mod) 891 + nb = nb_div * mp->m_sb.sb_agblocks; 892 + 893 + if (nb_div > XFS_MAX_AGNUMBER + 1) { 894 + nb_div = XFS_MAX_AGNUMBER + 1; 895 + nb = nb_div * mp->m_sb.sb_agblocks; 896 + } 897 + nagcount = nb_div; 898 + delta = nb - mp->m_sb.sb_dblocks; 899 + *deltap = delta; 900 + *nagcountp = nagcount; 901 + } 902 + 875 903 /* 876 904 * Extent the AG indicated by the @id by the length passed in 877 905 */
+3
fs/xfs/libxfs/xfs_ag.h
··· 331 331 int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id); 332 332 int xfs_ag_shrink_space(struct xfs_perag *pag, struct xfs_trans **tpp, 333 333 xfs_extlen_t delta); 334 + void 335 + xfs_growfs_compute_deltas(struct xfs_mount *mp, xfs_rfsblock_t nb, 336 + int64_t *deltap, xfs_agnumber_t *nagcountp); 334 337 int xfs_ag_extend_space(struct xfs_perag *pag, struct xfs_trans *tp, 335 338 xfs_extlen_t len); 336 339 int xfs_ag_get_geometry(struct xfs_perag *pag, struct xfs_ag_geometry *ageo);
+4
fs/xfs/libxfs/xfs_inode_buf.c
··· 268 268 } 269 269 if (xfs_is_reflink_inode(ip)) 270 270 xfs_ifork_init_cow(ip); 271 + if (xfs_is_metadir_inode(ip)) { 272 + XFS_STATS_DEC(ip->i_mount, xs_inodes_active); 273 + XFS_STATS_INC(ip->i_mount, xs_inodes_meta); 274 + } 271 275 return 0; 272 276 273 277 out_destroy_data_fork:
+5
fs/xfs/libxfs/xfs_metafile.c
··· 61 61 ip->i_diflags2 |= XFS_DIFLAG2_METADATA; 62 62 ip->i_metatype = metafile_type; 63 63 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 64 + 65 + XFS_STATS_DEC(ip->i_mount, xs_inodes_active); 66 + XFS_STATS_INC(ip->i_mount, xs_inodes_meta); 64 67 } 65 68 66 69 /* Clear the metadata directory inode flag. */ ··· 77 74 78 75 ip->i_diflags2 &= ~XFS_DIFLAG2_METADATA; 79 76 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 77 + XFS_STATS_INC(ip->i_mount, xs_inodes_active); 78 + XFS_STATS_DEC(ip->i_mount, xs_inodes_meta); 80 79 } 81 80 82 81 /*
+36 -16
fs/xfs/libxfs/xfs_ondisk.h
··· 73 73 XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_free_hdr, 64); 74 74 XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_leaf, 64); 75 75 XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_leaf_hdr, 64); 76 - XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_entry, 8); 76 + XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_entry, 8); 77 77 XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_hdr, 32); 78 78 XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_map, 4); 79 79 XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_name_local, 4); ··· 116 116 XFS_CHECK_STRUCT_SIZE(struct xfs_da_intnode, 16); 117 117 XFS_CHECK_STRUCT_SIZE(struct xfs_da_node_entry, 8); 118 118 XFS_CHECK_STRUCT_SIZE(struct xfs_da_node_hdr, 16); 119 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4); 119 + XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4); 120 120 XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_hdr, 16); 121 121 XFS_CHECK_OFFSET(struct xfs_dir2_data_unused, freetag, 0); 122 122 XFS_CHECK_OFFSET(struct xfs_dir2_data_unused, length, 2); ··· 136 136 /* ondisk dir/attr structures from xfs/122 */ 137 137 XFS_CHECK_STRUCT_SIZE(struct xfs_attr_sf_entry, 3); 138 138 XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4); 139 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_hdr, 16); 140 139 XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_unused, 6); 141 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_free, 16); 142 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_free_hdr, 16); 143 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf, 16); 144 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_entry, 8); 145 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_hdr, 16); 146 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_tail, 4); 147 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_sf_entry, 3); 148 - XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_sf_hdr, 10); 149 140 150 141 /* log structures */ 151 142 XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format, 88); ··· 207 216 XFS_CHECK_OFFSET(struct xfs_dir3_data_hdr, hdr.magic, 0); 208 217 XFS_CHECK_OFFSET(struct xfs_dir3_free, hdr.hdr.magic, 0); 209 218 XFS_CHECK_OFFSET(struct xfs_attr3_leafblock, hdr.info.hdr, 0); 210 - 211 - XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat, 192); 212 - XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers, 24); 213 - XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat_req, 64); 214 - XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers_req, 64); 215 219 216 220 /* 217 221 * Make sure the incore inode timestamp range corresponds to hand ··· 287 301 XFS_CHECK_SB_OFFSET(sb_pad, 281); 288 302 XFS_CHECK_SB_OFFSET(sb_rtstart, 288); 289 303 XFS_CHECK_SB_OFFSET(sb_rtreserved, 296); 304 + 305 + /* 306 + * ioctl UABI 307 + * 308 + * Due to different padding/alignment requirements across 309 + * different architectures, some structures are ommited from 310 + * the size checks. In addition, structures with architecture 311 + * dependent size fields are also ommited (e.g. __kernel_long_t). 312 + */ 313 + XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat, 192); 314 + XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers, 24); 315 + XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat_req, 64); 316 + XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers_req, 64); 317 + XFS_CHECK_STRUCT_SIZE(struct dioattr, 12); 318 + XFS_CHECK_STRUCT_SIZE(struct getbmap, 32); 319 + XFS_CHECK_STRUCT_SIZE(struct getbmapx, 48); 320 + XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist_cursor, 16); 321 + XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist, 8); 322 + XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist, 8); 323 + XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist_ent, 4); 324 + XFS_CHECK_STRUCT_SIZE(struct xfs_ag_geometry, 128); 325 + XFS_CHECK_STRUCT_SIZE(struct xfs_rtgroup_geometry, 128); 326 + XFS_CHECK_STRUCT_SIZE(struct xfs_error_injection, 8); 327 + XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_geom, 256); 328 + XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_geom_v4, 112); 329 + XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_counts, 32); 330 + XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_resblks, 16); 331 + XFS_CHECK_STRUCT_SIZE(struct xfs_growfs_log, 8); 332 + XFS_CHECK_STRUCT_SIZE(struct xfs_bulk_ireq, 64); 333 + XFS_CHECK_STRUCT_SIZE(struct xfs_fs_eofblocks, 128); 334 + XFS_CHECK_STRUCT_SIZE(struct xfs_fsid, 8); 335 + XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_metadata, 64); 336 + XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_vec, 16); 337 + XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_vec_head, 40); 290 338 } 291 339 292 340 #endif /* __XFS_ONDISK_H */
+3
fs/xfs/libxfs/xfs_sb.c
··· 1347 1347 * feature was introduced. This counter can go negative due to the way 1348 1348 * we handle nearly-lockless reservations, so we must use the _positive 1349 1349 * variant here to avoid writing out nonsense frextents. 1350 + * 1351 + * RT groups are only supported on v5 file systems, which always 1352 + * have lazy SB counters. 1350 1353 */ 1351 1354 if (xfs_has_rtgroups(mp) && !xfs_has_zoned(mp)) { 1352 1355 mp->m_sb.sb_frextents =
+1 -1
fs/xfs/scrub/dir_repair.c
··· 177 177 rd->dir_names = NULL; 178 178 if (rd->dir_entries) 179 179 xfarray_destroy(rd->dir_entries); 180 - rd->dir_names = NULL; 180 + rd->dir_entries = NULL; 181 181 } 182 182 183 183 /* Set up for a directory repair. */
+6 -1
fs/xfs/scrub/orphanage.c
··· 442 442 return 0; 443 443 444 444 d_child = try_lookup_noperm(&qname, d_orphanage); 445 + if (IS_ERR(d_child)) { 446 + dput(d_orphanage); 447 + return PTR_ERR(d_child); 448 + } 449 + 445 450 if (d_child) { 446 451 trace_xrep_adoption_check_child(sc->mp, d_child); 447 452 ··· 484 479 return; 485 480 486 481 d_child = try_lookup_noperm(&qname, d_orphanage); 487 - while (d_child != NULL) { 482 + while (!IS_ERR_OR_NULL(d_child)) { 488 483 trace_xrep_adoption_invalidate_child(sc->mp, d_child); 489 484 490 485 ASSERT(d_is_negative(d_child));
+2 -15
fs/xfs/xfs_fsops.c
··· 95 95 struct xfs_growfs_data *in) /* growfs data input struct */ 96 96 { 97 97 xfs_agnumber_t oagcount = mp->m_sb.sb_agcount; 98 + xfs_rfsblock_t nb = in->newblocks; 98 99 struct xfs_buf *bp; 99 100 int error; 100 101 xfs_agnumber_t nagcount; 101 102 xfs_agnumber_t nagimax = 0; 102 - xfs_rfsblock_t nb, nb_div, nb_mod; 103 103 int64_t delta; 104 104 bool lastag_extended = false; 105 105 struct xfs_trans *tp; 106 106 struct aghdr_init_data id = {}; 107 107 struct xfs_perag *last_pag; 108 108 109 - nb = in->newblocks; 110 109 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb); 111 110 if (error) 112 111 return error; ··· 124 125 mp->m_sb.sb_rextsize); 125 126 if (error) 126 127 return error; 128 + xfs_growfs_compute_deltas(mp, nb, &delta, &nagcount); 127 129 128 - nb_div = nb; 129 - nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); 130 - if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS) 131 - nb_div++; 132 - else if (nb_mod) 133 - nb = nb_div * mp->m_sb.sb_agblocks; 134 - 135 - if (nb_div > XFS_MAX_AGNUMBER + 1) { 136 - nb_div = XFS_MAX_AGNUMBER + 1; 137 - nb = nb_div * mp->m_sb.sb_agblocks; 138 - } 139 - nagcount = nb_div; 140 - delta = nb - mp->m_sb.sb_dblocks; 141 130 /* 142 131 * Reject filesystems with a single AG because they are not 143 132 * supported, and reject a shrink operation that would cause a
+18 -2
fs/xfs/xfs_health.c
··· 314 314 xfs_rtgroup_put(rtg); 315 315 } 316 316 317 + static inline void xfs_inode_report_fserror(struct xfs_inode *ip) 318 + { 319 + /* 320 + * Do not report inodes being constructed or freed, or metadata inodes, 321 + * to fsnotify. 322 + */ 323 + if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIM) || 324 + xfs_is_internal_inode(ip)) { 325 + fserror_report_metadata(ip->i_mount->m_super, -EFSCORRUPTED, 326 + GFP_NOFS); 327 + return; 328 + } 329 + 330 + fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS); 331 + } 332 + 317 333 /* Mark the unhealthy parts of an inode. */ 318 334 void 319 335 xfs_inode_mark_sick( ··· 355 339 inode_state_clear(VFS_I(ip), I_DONTCACHE); 356 340 spin_unlock(&VFS_I(ip)->i_lock); 357 341 358 - fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS); 342 + xfs_inode_report_fserror(ip); 359 343 if (mask) 360 344 xfs_healthmon_report_inode(ip, XFS_HEALTHMON_SICK, old_mask, 361 345 mask); ··· 387 371 inode_state_clear(VFS_I(ip), I_DONTCACHE); 388 372 spin_unlock(&VFS_I(ip)->i_lock); 389 373 390 - fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS); 374 + xfs_inode_report_fserror(ip); 391 375 if (mask) 392 376 xfs_healthmon_report_inode(ip, XFS_HEALTHMON_CORRUPT, old_mask, 393 377 mask);
+7 -4
fs/xfs/xfs_healthmon.c
··· 69 69 struct xfs_healthmon *hm; 70 70 71 71 rcu_read_lock(); 72 - hm = mp->m_healthmon; 72 + hm = rcu_dereference(mp->m_healthmon); 73 73 if (hm && !refcount_inc_not_zero(&hm->ref)) 74 74 hm = NULL; 75 75 rcu_read_unlock(); ··· 110 110 struct xfs_healthmon *hm) 111 111 { 112 112 spin_lock(&xfs_healthmon_lock); 113 - if (mp->m_healthmon != NULL) { 113 + if (rcu_access_pointer(mp->m_healthmon) != NULL) { 114 114 spin_unlock(&xfs_healthmon_lock); 115 115 return -EEXIST; 116 116 } 117 117 118 118 refcount_inc(&hm->ref); 119 - mp->m_healthmon = hm; 119 + rcu_assign_pointer(mp->m_healthmon, hm); 120 120 hm->mount_cookie = (uintptr_t)mp->m_super; 121 121 spin_unlock(&xfs_healthmon_lock); 122 122 ··· 128 128 xfs_healthmon_detach( 129 129 struct xfs_healthmon *hm) 130 130 { 131 + struct xfs_mount *mp; 132 + 131 133 spin_lock(&xfs_healthmon_lock); 132 134 if (hm->mount_cookie == DETACHED_MOUNT_COOKIE) { 133 135 spin_unlock(&xfs_healthmon_lock); 134 136 return; 135 137 } 136 138 137 - XFS_M((struct super_block *)hm->mount_cookie)->m_healthmon = NULL; 139 + mp = XFS_M((struct super_block *)hm->mount_cookie); 140 + rcu_assign_pointer(mp->m_healthmon, NULL); 138 141 hm->mount_cookie = DETACHED_MOUNT_COOKIE; 139 142 spin_unlock(&xfs_healthmon_lock); 140 143
+14 -4
fs/xfs/xfs_icache.c
··· 106 106 mapping_set_folio_min_order(VFS_I(ip)->i_mapping, 107 107 M_IGEO(mp)->min_folio_order); 108 108 109 - XFS_STATS_INC(mp, vn_active); 109 + XFS_STATS_INC(mp, xs_inodes_active); 110 110 ASSERT(atomic_read(&ip->i_pincount) == 0); 111 111 ASSERT(ip->i_ino == 0); 112 112 ··· 172 172 /* asserts to verify all state is correct here */ 173 173 ASSERT(atomic_read(&ip->i_pincount) == 0); 174 174 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); 175 - XFS_STATS_DEC(ip->i_mount, vn_active); 175 + if (xfs_is_metadir_inode(ip)) 176 + XFS_STATS_DEC(ip->i_mount, xs_inodes_meta); 177 + else 178 + XFS_STATS_DEC(ip->i_mount, xs_inodes_active); 176 179 177 180 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 178 181 } ··· 639 636 if (!ip) 640 637 return -ENOMEM; 641 638 639 + /* 640 + * Set XFS_INEW as early as possible so that the health code won't pass 641 + * the inode to the fserror code if the ondisk inode cannot be loaded. 642 + * We're going to free the xfs_inode immediately if that happens, which 643 + * would lead to UAF problems. 644 + */ 645 + xfs_iflags_set(ip, XFS_INEW); 646 + 642 647 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags); 643 648 if (error) 644 649 goto out_destroy; ··· 724 713 ip->i_udquot = NULL; 725 714 ip->i_gdquot = NULL; 726 715 ip->i_pdquot = NULL; 727 - xfs_iflags_set(ip, XFS_INEW); 728 716 729 717 /* insert the new inode */ 730 718 spin_lock(&pag->pag_ici_lock); ··· 2244 2234 struct xfs_mount *mp = ip->i_mount; 2245 2235 bool need_inactive; 2246 2236 2247 - XFS_STATS_INC(mp, vn_reclaim); 2237 + XFS_STATS_INC(mp, xs_inode_mark_reclaimable); 2248 2238 2249 2239 /* 2250 2240 * We should never get here with any of the reclaim flags already set.
+1 -1
fs/xfs/xfs_mount.h
··· 345 345 struct xfs_hooks m_dir_update_hooks; 346 346 347 347 /* Private data referring to a health monitor object. */ 348 - struct xfs_healthmon *m_healthmon; 348 + struct xfs_healthmon __rcu *m_healthmon; 349 349 } xfs_mount_t; 350 350 351 351 #define M_IGEO(mp) (&(mp)->m_ino_geo)
+2 -2
fs/xfs/xfs_notify_failure.c
··· 304 304 305 305 error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp); 306 306 if (error) { 307 - xfs_perag_put(pag); 307 + xfs_perag_rele(pag); 308 308 break; 309 309 } 310 310 ··· 340 340 if (rtg) 341 341 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); 342 342 if (error) { 343 - xfs_group_put(xg); 343 + xfs_group_rele(xg); 344 344 break; 345 345 } 346 346 }
+9
fs/xfs/xfs_platform.h
··· 235 235 236 236 #ifdef XFS_WARN 237 237 238 + /* 239 + * Please note that this ASSERT doesn't kill the kernel. It will if the kernel 240 + * has panic_on_warn set. 241 + */ 238 242 #define ASSERT(expr) \ 239 243 (likely(expr) ? (void)0 : asswarn(NULL, #expr, __FILE__, __LINE__)) 240 244 ··· 249 245 #endif /* XFS_WARN */ 250 246 #endif /* DEBUG */ 251 247 248 + /* 249 + * Use this to catch metadata corruptions that are not caught by block or 250 + * structure verifiers. The reason is that the verifiers check corruptions only 251 + * within the scope of the object being verified. 252 + */ 252 253 #define XFS_IS_CORRUPT(mp, expr) \ 253 254 (unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \ 254 255 NULL, 0, __FILE__, __LINE__, \
+37 -7
fs/xfs/xfs_rtalloc.c
··· 112 112 error = xfs_rtget_summary(oargs, log, bbno, &sum); 113 113 if (error) 114 114 goto out; 115 + if (XFS_IS_CORRUPT(oargs->mp, sum < 0)) { 116 + error = -EFSCORRUPTED; 117 + goto out; 118 + } 115 119 if (sum == 0) 116 120 continue; 117 121 error = xfs_rtmodify_summary(oargs, log, bbno, -sum); ··· 124 120 error = xfs_rtmodify_summary(nargs, log, bbno, sum); 125 121 if (error) 126 122 goto out; 127 - ASSERT(sum > 0); 128 123 } 129 124 } 130 125 error = 0; ··· 1050 1047 */ 1051 1048 xfs_trans_resv_calc(mp, &mp->m_resv); 1052 1049 1050 + /* 1051 + * Sync sb counters now to reflect the updated values. Lazy counters are 1052 + * not always updated and in order to avoid inconsistencies between 1053 + * frextents and rtextents, it is better to sync the counters. 1054 + */ 1055 + 1056 + if (xfs_has_lazysbcount(mp)) 1057 + xfs_log_sb(args.tp); 1058 + 1053 1059 error = xfs_trans_commit(args.tp); 1054 1060 if (error) 1055 1061 goto out_free; ··· 1091 1079 } 1092 1080 1093 1081 /* 1094 - * Calculate the last rbmblock currently used. 1082 + * This will return the bitmap block number (indexed at 0) that will be 1083 + * extended/modified. There are 2 cases here: 1084 + * 1. The size of the rtg is such that it is a multiple of 1085 + * xfs_rtbitmap_rtx_per_rbmblock() i.e, an integral number of bitmap blocks 1086 + * are completely filled up. In this case, we should return 1087 + * 1 + (the last used bitmap block number). 1088 + * 2. The size of the rtg is not an multiple of xfs_rtbitmap_rtx_per_rbmblock(). 1089 + * Here we will return the block number of last used block number. In this 1090 + * case, we will modify the last used bitmap block to extend the size of the 1091 + * rtgroup. 1095 1092 * 1096 1093 * This also deals with the case where there were no rtextents before. 1097 1094 */ 1098 1095 static xfs_fileoff_t 1099 - xfs_last_rt_bmblock( 1096 + xfs_last_rt_bmblock_to_extend( 1100 1097 struct xfs_rtgroup *rtg) 1101 1098 { 1102 1099 struct xfs_mount *mp = rtg_mount(rtg); 1103 1100 xfs_rgnumber_t rgno = rtg_rgno(rtg); 1104 1101 xfs_fileoff_t bmbno = 0; 1102 + unsigned int mod = 0; 1105 1103 1106 1104 ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1); 1107 1105 ··· 1119 1097 xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp); 1120 1098 1121 1099 /* Also fill up the previous block if not entirely full. */ 1122 - bmbno = xfs_rtbitmap_blockcount_len(mp, nrext); 1123 - if (xfs_rtx_to_rbmword(mp, nrext) != 0) 1124 - bmbno--; 1100 + /* We are doing a -1 to convert it to a 0 based index */ 1101 + bmbno = xfs_rtbitmap_blockcount_len(mp, nrext) - 1; 1102 + div_u64_rem(nrext, xfs_rtbitmap_rtx_per_rbmblock(mp), &mod); 1103 + /* 1104 + * mod = 0 means that all the current blocks are full. So 1105 + * return the next block number to be used for the rtgroup 1106 + * growth. 1107 + */ 1108 + if (mod == 0) 1109 + bmbno++; 1125 1110 } 1126 1111 1127 1112 return bmbno; ··· 1233 1204 goto out_rele; 1234 1205 } 1235 1206 1236 - for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) { 1207 + for (bmbno = xfs_last_rt_bmblock_to_extend(rtg); bmbno < bmblocks; 1208 + bmbno++) { 1237 1209 error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno); 1238 1210 if (error) 1239 1211 goto out_error;
+11 -6
fs/xfs/xfs_stats.c
··· 42 42 { "xstrat", xfsstats_offset(xs_write_calls) }, 43 43 { "rw", xfsstats_offset(xs_attr_get) }, 44 44 { "attr", xfsstats_offset(xs_iflush_count)}, 45 - { "icluster", xfsstats_offset(vn_active) }, 45 + { "icluster", xfsstats_offset(xs_inodes_active) }, 46 46 { "vnodes", xfsstats_offset(xb_get) }, 47 47 { "buf", xfsstats_offset(xs_abtb_2) }, 48 48 { "abtb2", xfsstats_offset(xs_abtc_2) }, ··· 59 59 { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)}, 60 60 /* we print both series of quota information together */ 61 61 { "qm", xfsstats_offset(xs_gc_read_calls)}, 62 - { "zoned", xfsstats_offset(__pad1)}, 62 + { "zoned", xfsstats_offset(xs_inodes_meta)}, 63 + { "metafile", xfsstats_offset(xs_xstrat_bytes)}, 63 64 }; 64 65 65 66 /* Loop over all stats groups */ ··· 100 99 101 100 void xfs_stats_clearall(struct xfsstats __percpu *stats) 102 101 { 102 + uint32_t xs_inodes_active, xs_inodes_meta; 103 103 int c; 104 - uint32_t vn_active; 105 104 106 105 xfs_notice(NULL, "Clearing xfsstats"); 107 106 for_each_possible_cpu(c) { 108 107 preempt_disable(); 109 - /* save vn_active, it's a universal truth! */ 110 - vn_active = per_cpu_ptr(stats, c)->s.vn_active; 108 + /* 109 + * Save the active / meta inode counters, as they are stateful. 110 + */ 111 + xs_inodes_active = per_cpu_ptr(stats, c)->s.xs_inodes_active; 112 + xs_inodes_meta = per_cpu_ptr(stats, c)->s.xs_inodes_meta; 111 113 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); 112 - per_cpu_ptr(stats, c)->s.vn_active = vn_active; 114 + per_cpu_ptr(stats, c)->s.xs_inodes_active = xs_inodes_active; 115 + per_cpu_ptr(stats, c)->s.xs_inodes_meta = xs_inodes_meta; 113 116 preempt_enable(); 114 117 } 115 118 }
+10 -9
fs/xfs/xfs_stats.h
··· 100 100 uint32_t xs_iflush_count; 101 101 uint32_t xs_icluster_flushcnt; 102 102 uint32_t xs_icluster_flushinode; 103 - uint32_t vn_active; /* # vnodes not on free lists */ 104 - uint32_t vn_alloc; /* # times vn_alloc called */ 105 - uint32_t vn_get; /* # times vn_get called */ 106 - uint32_t vn_hold; /* # times vn_hold called */ 107 - uint32_t vn_rele; /* # times vn_rele called */ 108 - uint32_t vn_reclaim; /* # times vn_reclaim called */ 109 - uint32_t vn_remove; /* # times vn_remove called */ 110 - uint32_t vn_free; /* # times vn_free called */ 103 + uint32_t xs_inodes_active; 104 + uint32_t __unused_vn_alloc; 105 + uint32_t __unused_vn_get; 106 + uint32_t __unused_vn_hold; 107 + uint32_t xs_inode_destroy; 108 + uint32_t xs_inode_destroy2; /* same as xs_inode_destroy */ 109 + uint32_t xs_inode_mark_reclaimable; 110 + uint32_t __unused_vn_free; 111 111 uint32_t xb_get; 112 112 uint32_t xb_create; 113 113 uint32_t xb_get_locked; ··· 142 142 uint32_t xs_gc_read_calls; 143 143 uint32_t xs_gc_write_calls; 144 144 uint32_t xs_gc_zone_reset_calls; 145 - uint32_t __pad1; 145 + /* Metafile counters */ 146 + uint32_t xs_inodes_meta; 146 147 /* Extra precision counters */ 147 148 uint64_t xs_xstrat_bytes; 148 149 uint64_t xs_write_bytes;
+2 -2
fs/xfs/xfs_super.c
··· 712 712 trace_xfs_destroy_inode(ip); 713 713 714 714 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 715 - XFS_STATS_INC(ip->i_mount, vn_rele); 716 - XFS_STATS_INC(ip->i_mount, vn_remove); 715 + XFS_STATS_INC(ip->i_mount, xs_inode_destroy); 716 + XFS_STATS_INC(ip->i_mount, xs_inode_destroy2); 717 717 xfs_inode_mark_reclaimable(ip); 718 718 } 719 719
+2 -2
fs/xfs/xfs_verify_media.c
··· 122 122 123 123 error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp); 124 124 if (error) { 125 - xfs_perag_put(pag); 125 + xfs_perag_rele(pag); 126 126 break; 127 127 } 128 128 ··· 158 158 if (rtg) 159 159 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); 160 160 if (error) { 161 - xfs_group_put(xg); 161 + xfs_group_rele(xg); 162 162 break; 163 163 } 164 164 }
+3 -3
fs/xfs/xfs_zone_alloc.c
··· 78 78 struct xfs_rtgroup *rtg, 79 79 uint32_t freed) 80 80 { 81 - struct xfs_group *xg = &rtg->rtg_group; 81 + struct xfs_group *xg = rtg_group(rtg); 82 82 struct xfs_mount *mp = rtg_mount(rtg); 83 83 struct xfs_zone_info *zi = mp->m_zone_info; 84 84 uint32_t used = rtg_rmap(rtg)->i_used_blocks; ··· 759 759 760 760 trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb); 761 761 762 - *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); 762 + *sector = xfs_gbno_to_daddr(rtg_group(rtg), 0); 763 763 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector); 764 764 if (!*is_seq) 765 765 *sector += XFS_FSB_TO_BB(mp, allocated); ··· 1080 1080 if (write_pointer == 0) { 1081 1081 /* zone is empty */ 1082 1082 atomic_inc(&zi->zi_nr_free_zones); 1083 - xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE); 1083 + xfs_group_set_mark(rtg_group(rtg), XFS_RTG_FREE); 1084 1084 iz->available += rtg_blocks(rtg); 1085 1085 } else if (write_pointer < rtg_blocks(rtg)) { 1086 1086 /* zone is open */
+5 -5
fs/xfs/xfs_zone_gc.c
··· 627 627 if (!*count_fsb) 628 628 return NULL; 629 629 630 - *daddr = xfs_gbno_to_daddr(&oz->oz_rtg->rtg_group, 0); 630 + *daddr = xfs_gbno_to_daddr(rtg_group(oz->oz_rtg), 0); 631 631 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *daddr); 632 632 if (!*is_seq) 633 633 *daddr += XFS_FSB_TO_BB(mp, oz->oz_allocated); ··· 702 702 chunk->data = data; 703 703 chunk->oz = oz; 704 704 chunk->victim_rtg = iter->victim_rtg; 705 - atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); 705 + atomic_inc(&rtg_group(chunk->victim_rtg)->xg_active_ref); 706 706 atomic_inc(&chunk->victim_rtg->rtg_gccount); 707 707 708 708 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); ··· 788 788 atomic_inc(&chunk->oz->oz_ref); 789 789 790 790 split_chunk->victim_rtg = chunk->victim_rtg; 791 - atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); 791 + atomic_inc(&rtg_group(chunk->victim_rtg)->xg_active_ref); 792 792 atomic_inc(&chunk->victim_rtg->rtg_gccount); 793 793 794 794 chunk->offset += split_len; ··· 888 888 goto out; 889 889 } 890 890 891 - xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE); 891 + xfs_group_set_mark(rtg_group(rtg), XFS_RTG_FREE); 892 892 atomic_inc(&zi->zi_nr_free_zones); 893 893 894 894 xfs_zoned_add_available(mp, rtg_blocks(rtg)); ··· 917 917 918 918 XFS_STATS_INC(mp, xs_gc_zone_reset_calls); 919 919 920 - bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); 920 + bio->bi_iter.bi_sector = xfs_gbno_to_daddr(rtg_group(rtg), 0); 921 921 if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { 922 922 /* 923 923 * Also use the bio to drive the state machine when neither
+2 -2
include/linux/bpf.h
··· 124 124 u32 (*map_fd_sys_lookup_elem)(void *ptr); 125 125 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 126 126 struct seq_file *m); 127 - int (*map_check_btf)(const struct bpf_map *map, 127 + int (*map_check_btf)(struct bpf_map *map, 128 128 const struct btf *btf, 129 129 const struct btf_type *key_type, 130 130 const struct btf_type *value_type); ··· 656 656 map->ops->map_seq_show_elem; 657 657 } 658 658 659 - int map_check_no_btf(const struct bpf_map *map, 659 + int map_check_no_btf(struct bpf_map *map, 660 660 const struct btf *btf, 661 661 const struct btf_type *key_type, 662 662 const struct btf_type *value_type);
+1 -1
include/linux/bpf_local_storage.h
··· 176 176 void bpf_local_storage_map_free(struct bpf_map *map, 177 177 struct bpf_local_storage_cache *cache); 178 178 179 - int bpf_local_storage_map_check_btf(const struct bpf_map *map, 179 + int bpf_local_storage_map_check_btf(struct bpf_map *map, 180 180 const struct btf *btf, 181 181 const struct btf_type *key_type, 182 182 const struct btf_type *value_type);
+6
include/linux/bpf_mem_alloc.h
··· 14 14 struct obj_cgroup *objcg; 15 15 bool percpu; 16 16 struct work_struct work; 17 + void (*dtor_ctx_free)(void *ctx); 18 + void *dtor_ctx; 17 19 }; 18 20 19 21 /* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects. ··· 34 32 /* The percpu allocation with a specific unit size. */ 35 33 int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size); 36 34 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); 35 + void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma, 36 + void (*dtor)(void *obj, void *ctx), 37 + void (*dtor_ctx_free)(void *ctx), 38 + void *ctx); 37 39 38 40 /* Check the allocation size for kmalloc equivalent allocator */ 39 41 int bpf_mem_alloc_check_size(bool percpu, size_t size);
-13
include/linux/fsnotify.h
··· 495 495 fsnotify_dentry(dentry, mask); 496 496 } 497 497 498 - static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode, 499 - int error) 500 - { 501 - struct fs_error_report report = { 502 - .error = error, 503 - .inode = inode, 504 - .sb = sb, 505 - }; 506 - 507 - return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR, 508 - NULL, NULL, NULL, 0); 509 - } 510 - 511 498 static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt) 512 499 { 513 500 fsnotify_mnt(FS_MNT_ATTACH, ns, mnt);
+7 -4
include/linux/gfp.h
··· 14 14 struct mempolicy; 15 15 16 16 /* Helper macro to avoid gfp flags if they are the default one */ 17 - #define __default_gfp(a,...) a 18 - #define default_gfp(...) __default_gfp(__VA_ARGS__ __VA_OPT__(,) GFP_KERNEL) 17 + #define __default_gfp(a,b,...) b 18 + #define default_gfp(...) __default_gfp(,##__VA_ARGS__,GFP_KERNEL) 19 19 20 20 /* Convert GFP flags to their corresponding migrate type */ 21 21 #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) ··· 339 339 { 340 340 return folio_alloc_noprof(gfp, order); 341 341 } 342 - #define vma_alloc_folio_noprof(gfp, order, vma, addr) \ 343 - folio_alloc_noprof(gfp, order) 342 + static inline struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, 343 + struct vm_area_struct *vma, unsigned long addr) 344 + { 345 + return folio_alloc_noprof(gfp, order); 346 + } 344 347 #endif 345 348 346 349 #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
+2
include/linux/gfp_types.h
··· 139 139 * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 140 140 * 141 141 * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. 142 + * mark_obj_codetag_empty() should be called upon freeing for objects allocated 143 + * with this flag to indicate that their NULL tags are expected and normal. 142 144 */ 143 145 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 144 146 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+1
include/linux/irqchip/arm-gic-v3.h
··· 394 394 #define GITS_TYPER_VLPIS (1UL << 1) 395 395 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 396 396 #define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) 397 + #define GITS_TYPER_IDBITS GENMASK_ULL(12, 8) 397 398 #define GITS_TYPER_IDBITS_SHIFT 8 398 399 #define GITS_TYPER_DEVBITS_SHIFT 13 399 400 #define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
+38 -2
include/linux/jiffies.h
··· 434 434 /* 435 435 * Convert various time units to each other: 436 436 */ 437 - extern unsigned int jiffies_to_msecs(const unsigned long j); 438 - extern unsigned int jiffies_to_usecs(const unsigned long j); 437 + 438 + #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 439 + /** 440 + * jiffies_to_msecs - Convert jiffies to milliseconds 441 + * @j: jiffies value 442 + * 443 + * This inline version takes care of HZ in {100,250,1000}. 444 + * 445 + * Return: milliseconds value 446 + */ 447 + static inline unsigned int jiffies_to_msecs(const unsigned long j) 448 + { 449 + return (MSEC_PER_SEC / HZ) * j; 450 + } 451 + #else 452 + unsigned int jiffies_to_msecs(const unsigned long j); 453 + #endif 454 + 455 + #if !(USEC_PER_SEC % HZ) 456 + /** 457 + * jiffies_to_usecs - Convert jiffies to microseconds 458 + * @j: jiffies value 459 + * 460 + * Return: microseconds value 461 + */ 462 + static inline unsigned int jiffies_to_usecs(const unsigned long j) 463 + { 464 + /* 465 + * Hz usually doesn't go much further MSEC_PER_SEC. 466 + * jiffies_to_usecs() and usecs_to_jiffies() depend on that. 467 + */ 468 + BUILD_BUG_ON(HZ > USEC_PER_SEC); 469 + 470 + return (USEC_PER_SEC / HZ) * j; 471 + } 472 + #else 473 + unsigned int jiffies_to_usecs(const unsigned long j); 474 + #endif 439 475 440 476 /** 441 477 * jiffies_to_nsecs - Convert jiffies to nanoseconds
+1 -6
include/linux/kvm_host.h
··· 253 253 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 254 254 #endif 255 255 256 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 257 256 union kvm_mmu_notifier_arg { 258 257 unsigned long attributes; 259 258 }; ··· 274 275 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 275 276 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 276 277 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 277 - #endif 278 278 279 279 enum { 280 280 OUTSIDE_GUEST_MODE, ··· 847 849 struct hlist_head irq_ack_notifier_list; 848 850 #endif 849 851 850 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 851 852 struct mmu_notifier mmu_notifier; 852 853 unsigned long mmu_invalidate_seq; 853 854 long mmu_invalidate_in_progress; 854 855 gfn_t mmu_invalidate_range_start; 855 856 gfn_t mmu_invalidate_range_end; 856 - #endif 857 + 857 858 struct list_head devices; 858 859 u64 manual_dirty_log_protect; 859 860 struct dentry *debugfs_dentry; ··· 2115 2118 extern const struct kvm_stats_header kvm_vcpu_stats_header; 2116 2119 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2117 2120 2118 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 2119 2121 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2120 2122 { 2121 2123 if (unlikely(kvm->mmu_invalidate_in_progress)) ··· 2192 2196 2193 2197 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; 2194 2198 } 2195 - #endif 2196 2199 2197 2200 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2198 2201
+6 -3
include/linux/liveupdate.h
··· 23 23 /** 24 24 * struct liveupdate_file_op_args - Arguments for file operation callbacks. 25 25 * @handler: The file handler being called. 26 - * @retrieved: The retrieve status for the 'can_finish / finish' 27 - * operation. 26 + * @retrieve_status: The retrieve status for the 'can_finish / finish' 27 + * operation. A value of 0 means the retrieve has not been 28 + * attempted, a positive value means the retrieve was 29 + * successful, and a negative value means the retrieve failed, 30 + * and the value is the error code of the call. 28 31 * @file: The file object. For retrieve: [OUT] The callback sets 29 32 * this to the new file. For other ops: [IN] The caller sets 30 33 * this to the file being operated on. ··· 43 40 */ 44 41 struct liveupdate_file_op_args { 45 42 struct liveupdate_file_handler *handler; 46 - bool retrieved; 43 + int retrieve_status; 47 44 struct file *file; 48 45 u64 serialized_data; 49 46 void *private_data;
+5 -4
include/linux/mmc/host.h
··· 486 486 487 487 struct mmc_ios ios; /* current io bus settings */ 488 488 489 + bool claimed; /* host exclusively claimed */ 490 + 489 491 /* group bitfields together to minimize padding */ 490 492 unsigned int use_spi_crc:1; 491 - unsigned int claimed:1; /* host exclusively claimed */ 492 493 unsigned int doing_init_tune:1; /* initial tuning in progress */ 493 - unsigned int can_retune:1; /* re-tuning can be used */ 494 494 unsigned int doing_retune:1; /* re-tuning in progress */ 495 - unsigned int retune_now:1; /* do re-tuning at next req */ 496 - unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ 497 495 unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ 498 496 unsigned int can_dma_map_merge:1; /* merging can be used */ 499 497 unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */ ··· 506 508 int rescan_disable; /* disable card detection */ 507 509 int rescan_entered; /* used with nonremovable devices */ 508 510 511 + bool can_retune; /* re-tuning can be used */ 512 + bool retune_now; /* do re-tuning at next req */ 513 + bool retune_paused; /* re-tuning is temporarily disabled */ 509 514 int need_retune; /* re-tuning is needed */ 510 515 int hold_retune; /* hold off re-tuning */ 511 516 unsigned int retune_period; /* re-tuning period in secs */
+4 -4
include/linux/overflow.h
··· 42 42 * both the type-agnostic benefits of the macros while also being able to 43 43 * enforce that the return value is, in fact, checked. 44 44 */ 45 - static inline bool __must_check __must_check_overflow(bool overflow) 45 + static __always_inline bool __must_check __must_check_overflow(bool overflow) 46 46 { 47 47 return unlikely(overflow); 48 48 } ··· 327 327 * with any overflow causing the return value to be SIZE_MAX. The 328 328 * lvalue must be size_t to avoid implicit type conversion. 329 329 */ 330 - static inline size_t __must_check size_mul(size_t factor1, size_t factor2) 330 + static __always_inline size_t __must_check size_mul(size_t factor1, size_t factor2) 331 331 { 332 332 size_t bytes; 333 333 ··· 346 346 * with any overflow causing the return value to be SIZE_MAX. The 347 347 * lvalue must be size_t to avoid implicit type conversion. 348 348 */ 349 - static inline size_t __must_check size_add(size_t addend1, size_t addend2) 349 + static __always_inline size_t __must_check size_add(size_t addend1, size_t addend2) 350 350 { 351 351 size_t bytes; 352 352 ··· 367 367 * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). 368 368 * The lvalue must be size_t to avoid implicit type conversion. 369 369 */ 370 - static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) 370 + static __always_inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) 371 371 { 372 372 size_t bytes; 373 373
+2 -14
include/linux/pm_runtime.h
··· 545 545 * 546 546 * Decrement the runtime PM usage counter of @dev and if it turns out to be 547 547 * equal to 0, queue up a work item for @dev like in pm_request_idle(). 548 - * 549 - * Return: 550 - * * 1: Success. Usage counter dropped to zero, but device was already suspended. 551 - * * 0: Success. 552 - * * -EINVAL: Runtime PM error. 553 - * * -EACCES: Runtime PM disabled. 554 - * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status 555 - * change ongoing. 556 - * * -EBUSY: Runtime PM child_count non-zero. 557 - * * -EPERM: Device PM QoS resume latency 0. 558 - * * -EINPROGRESS: Suspend already in progress. 559 - * * -ENOSYS: CONFIG_PM not enabled. 560 548 */ 561 - static inline int pm_runtime_put(struct device *dev) 549 + static inline void pm_runtime_put(struct device *dev) 562 550 { 563 - return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); 551 + __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); 564 552 } 565 553 566 554 /**
+12
include/linux/rseq.h
··· 146 146 t->rseq = current->rseq; 147 147 } 148 148 149 + /* 150 + * Value returned by getauxval(AT_RSEQ_ALIGN) and expected by rseq 151 + * registration. This is the active rseq area size rounded up to next 152 + * power of 2, which guarantees that the rseq structure will always be 153 + * aligned on the nearest power of two large enough to contain it, even 154 + * as it grows. 155 + */ 156 + static inline unsigned int rseq_alloc_align(void) 157 + { 158 + return 1U << get_count_order(offsetof(struct rseq, end)); 159 + } 160 + 149 161 #else /* CONFIG_RSEQ */ 150 162 static inline void rseq_handle_slowpath(struct pt_regs *regs) { } 151 163 static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
+4 -4
include/linux/rseq_entry.h
··· 216 216 } 217 217 218 218 #else /* CONFIG_RSEQ_SLICE_EXTENSION */ 219 - static inline bool rseq_slice_extension_enabled(void) { return false; } 220 - static inline bool rseq_arm_slice_extension_timer(void) { return false; } 221 - static inline void rseq_slice_clear_grant(struct task_struct *t) { } 222 - static inline bool rseq_grant_slice_extension(bool work_pending) { return false; } 219 + static __always_inline bool rseq_slice_extension_enabled(void) { return false; } 220 + static __always_inline bool rseq_arm_slice_extension_timer(void) { return false; } 221 + static __always_inline void rseq_slice_clear_grant(struct task_struct *t) { } 222 + static __always_inline bool rseq_grant_slice_extension(bool work_pending) { return false; } 223 223 #endif /* !CONFIG_RSEQ_SLICE_EXTENSION */ 224 224 225 225 bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
+1
include/linux/sched.h
··· 579 579 u64 deadline; 580 580 u64 min_vruntime; 581 581 u64 min_slice; 582 + u64 max_slice; 582 583 583 584 struct list_head group_node; 584 585 unsigned char on_rq;
-12
include/linux/slab.h
··· 517 517 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 518 518 DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) 519 519 520 - /** 521 - * ksize - Report actual allocation size of associated object 522 - * 523 - * @objp: Pointer returned from a prior kmalloc()-family allocation. 524 - * 525 - * This should not be used for writing beyond the originally requested 526 - * allocation size. Either use krealloc() or round up the allocation size 527 - * with kmalloc_size_roundup() prior to allocation. If this is used to 528 - * access beyond the originally requested allocation size, UBSAN_BOUNDS 529 - * and/or FORTIFY_SOURCE may trip, since they only know about the 530 - * originally allocated size via the __alloc_size attribute. 531 - */ 532 520 size_t ksize(const void *objp); 533 521 534 522 #ifdef CONFIG_PRINTK
+3
include/linux/tnum.h
··· 131 131 return !(tnum_subreg(a)).mask; 132 132 } 133 133 134 + /* Returns the smallest member of t larger than z */ 135 + u64 tnum_step(struct tnum t, u64 z); 136 + 134 137 #endif /* _LINUX_TNUM_H */
+11 -2
include/net/af_vsock.h
··· 276 276 return vsock_net_mode(sock_net(sk_vsock(vsk))) == VSOCK_NET_MODE_GLOBAL; 277 277 } 278 278 279 - static inline void vsock_net_set_child_mode(struct net *net, 279 + static inline bool vsock_net_set_child_mode(struct net *net, 280 280 enum vsock_net_mode mode) 281 281 { 282 - WRITE_ONCE(net->vsock.child_ns_mode, mode); 282 + int new_locked = mode + 1; 283 + int old_locked = 0; /* unlocked */ 284 + 285 + if (try_cmpxchg(&net->vsock.child_ns_mode_locked, 286 + &old_locked, new_locked)) { 287 + WRITE_ONCE(net->vsock.child_ns_mode, mode); 288 + return true; 289 + } 290 + 291 + return old_locked == new_locked; 283 292 } 284 293 285 294 static inline enum vsock_net_mode vsock_net_child_mode(struct net *net)
+5 -3
include/net/bluetooth/l2cap.h
··· 284 284 #define L2CAP_CR_LE_BAD_KEY_SIZE 0x0007 285 285 #define L2CAP_CR_LE_ENCRYPTION 0x0008 286 286 #define L2CAP_CR_LE_INVALID_SCID 0x0009 287 - #define L2CAP_CR_LE_SCID_IN_USE 0X000A 288 - #define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B 289 - #define L2CAP_CR_LE_INVALID_PARAMS 0X000C 287 + #define L2CAP_CR_LE_SCID_IN_USE 0x000A 288 + #define L2CAP_CR_LE_UNACCEPT_PARAMS 0x000B 289 + #define L2CAP_CR_LE_INVALID_PARAMS 0x000C 290 290 291 291 /* connect/create channel status */ 292 292 #define L2CAP_CS_NO_INFO 0x0000 ··· 493 493 #define L2CAP_RECONF_SUCCESS 0x0000 494 494 #define L2CAP_RECONF_INVALID_MTU 0x0001 495 495 #define L2CAP_RECONF_INVALID_MPS 0x0002 496 + #define L2CAP_RECONF_INVALID_CID 0x0003 497 + #define L2CAP_RECONF_INVALID_PARAMS 0x0004 496 498 497 499 struct l2cap_ecred_reconf_rsp { 498 500 __le16 result;
+3 -1
include/net/inet_connection_sock.h
··· 42 42 struct request_sock *req, 43 43 struct dst_entry *dst, 44 44 struct request_sock *req_unhash, 45 - bool *own_req); 45 + bool *own_req, 46 + void (*opt_child_init)(struct sock *newsk, 47 + const struct sock *sk)); 46 48 u16 net_header_len; 47 49 int (*setsockopt)(struct sock *sk, int level, int optname, 48 50 sockptr_t optval, unsigned int optlen);
+3
include/net/netns/vsock.h
··· 17 17 18 18 enum vsock_net_mode mode; 19 19 enum vsock_net_mode child_ns_mode; 20 + 21 + /* 0 = unlocked, 1 = locked to global, 2 = locked to local */ 22 + int child_ns_mode_locked; 20 23 }; 21 24 #endif /* __NET_NET_NAMESPACE_VSOCK_H */
+1 -1
include/net/sock.h
··· 2098 2098 2099 2099 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 2100 2100 { 2101 - sk->sk_socket = sock; 2101 + WRITE_ONCE(sk->sk_socket, sock); 2102 2102 if (sock) { 2103 2103 WRITE_ONCE(sk->sk_uid, SOCK_INODE(sock)->i_uid); 2104 2104 WRITE_ONCE(sk->sk_ino, SOCK_INODE(sock)->i_ino);
+3 -1
include/net/tcp.h
··· 544 544 struct request_sock *req, 545 545 struct dst_entry *dst, 546 546 struct request_sock *req_unhash, 547 - bool *own_req); 547 + bool *own_req, 548 + void (*opt_child_init)(struct sock *newsk, 549 + const struct sock *sk)); 548 550 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 549 551 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len); 550 552 int tcp_connect(struct sock *sk);
+1 -1
include/rdma/rdma_cm.h
··· 181 181 * 182 182 * It needs to be called before the RDMA identifier is bound 183 183 * to an device, which mean it should be called before 184 - * rdma_bind_addr(), rdma_bind_addr() and rdma_listen(). 184 + * rdma_bind_addr(), rdma_resolve_addr() and rdma_listen(). 185 185 */ 186 186 int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type); 187 187
+1
include/sound/cs35l56.h
··· 406 406 extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC]; 407 407 extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC]; 408 408 409 + int cs35l56_set_asp_patch(struct cs35l56_base *cs35l56_base); 409 410 int cs35l56_set_patch(struct cs35l56_base *cs35l56_base); 410 411 int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command); 411 412 int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
+7 -1
include/trace/events/kmem.h
··· 440 440 441 441 TP_fast_assign( 442 442 __entry->mm_id = mm_ptr_to_hash(mm); 443 - __entry->curr = !!(current->mm == mm); 443 + /* 444 + * curr is true if the mm matches the current task's mm_struct. 445 + * Since kthreads (PF_KTHREAD) have no mm_struct of their own 446 + * but can borrow one via kthread_use_mm(), we must filter them 447 + * out to avoid incorrectly attributing the RSS update to them. 448 + */ 449 + __entry->curr = current->mm == mm && !(current->flags & PF_KTHREAD); 444 450 __entry->member = member; 445 451 __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member]) 446 452 << PAGE_SHIFT);
+6 -6
include/uapi/drm/drm_fourcc.h
··· 401 401 * implementation can multiply the values by 2^6=64. For that reason the padding 402 402 * must only contain zeros. 403 403 * index 0 = Y plane, [15:0] z:Y [6:10] little endian 404 - * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian 405 - * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian 404 + * index 1 = Cb plane, [15:0] z:Cb [6:10] little endian 405 + * index 2 = Cr plane, [15:0] z:Cr [6:10] little endian 406 406 */ 407 407 #define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ 408 408 #define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ ··· 414 414 * implementation can multiply the values by 2^4=16. For that reason the padding 415 415 * must only contain zeros. 416 416 * index 0 = Y plane, [15:0] z:Y [4:12] little endian 417 - * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian 418 - * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian 417 + * index 1 = Cb plane, [15:0] z:Cb [4:12] little endian 418 + * index 2 = Cr plane, [15:0] z:Cr [4:12] little endian 419 419 */ 420 420 #define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ 421 421 #define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ ··· 424 424 /* 425 425 * 3 plane YCbCr 426 426 * index 0 = Y plane, [15:0] Y little endian 427 - * index 1 = Cr plane, [15:0] Cr little endian 428 - * index 2 = Cb plane, [15:0] Cb little endian 427 + * index 1 = Cb plane, [15:0] Cb little endian 428 + * index 2 = Cr plane, [15:0] Cr little endian 429 429 */ 430 430 #define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */ 431 431 #define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+1 -1
include/uapi/linux/pci_regs.h
··· 712 712 #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */ 713 713 #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */ 714 714 #define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */ 715 - #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */ 715 + #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */ 716 716 #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */ 717 717 #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */ 718 718 #define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+22 -4
include/uapi/linux/rseq.h
··· 87 87 }; 88 88 89 89 /* 90 - * struct rseq is aligned on 4 * 8 bytes to ensure it is always 91 - * contained within a single cache-line. 90 + * The original size and alignment of the allocation for struct rseq is 91 + * 32 bytes. 92 92 * 93 - * A single struct rseq per thread is allowed. 93 + * The allocation size needs to be greater or equal to 94 + * max(getauxval(AT_RSEQ_FEATURE_SIZE), 32), and the allocation needs to 95 + * be aligned on max(getauxval(AT_RSEQ_ALIGN), 32). 96 + * 97 + * As an alternative, userspace is allowed to use both the original size 98 + * and alignment of 32 bytes for backward compatibility. 99 + * 100 + * A single active struct rseq registration per thread is allowed. 94 101 */ 95 102 struct rseq { 96 103 /* ··· 188 181 struct rseq_slice_ctrl slice_ctrl; 189 182 190 183 /* 184 + * Before rseq became extensible, its original size was 32 bytes even 185 + * though the active rseq area was only 20 bytes. 186 + * Exposing a 32 bytes feature size would make life needlessly painful 187 + * for userspace. Therefore, add a reserved byte after byte 32 188 + * to bump the rseq feature size from 32 to 33. 189 + * The next field to be added to the rseq area will be larger 190 + * than one byte, and will replace this reserved byte. 191 + */ 192 + __u8 __reserved; 193 + 194 + /* 191 195 * Flexible array member at end of structure, after last feature field. 192 196 */ 193 197 char end[]; 194 - } __attribute__((aligned(4 * sizeof(__u64)))); 198 + } __attribute__((aligned(32))); 195 199 196 200 #endif /* _UAPI_LINUX_RSEQ_H */
+1 -1
init/Kconfig
··· 153 153 config CC_HAS_BROKEN_COUNTED_BY_REF 154 154 bool 155 155 # https://github.com/llvm/llvm-project/issues/182575 156 - default y if CC_IS_CLANG && CLANG_VERSION < 220000 156 + default y if CC_IS_CLANG && CLANG_VERSION < 220100 157 157 158 158 config CC_HAS_MULTIDIMENSIONAL_NONSTRING 159 159 def_bool $(success,echo 'char tag[][4] __attribute__((__nonstring__)) = { };' | $(CC) $(CLANG_FLAGS) -x c - -c -o /dev/null -Werror)
+1 -1
io_uring/cmd_net.c
··· 146 146 return -EINVAL; 147 147 148 148 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 149 - ulen = u64_to_user_ptr(sqe->addr3); 149 + ulen = u64_to_user_ptr(READ_ONCE(sqe->addr3)); 150 150 peer = READ_ONCE(sqe->optlen); 151 151 if (peer > 1) 152 152 return -EINVAL;
+2 -2
io_uring/timeout.c
··· 462 462 tr->ltimeout = true; 463 463 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 464 464 return -EINVAL; 465 - if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 465 + if (get_timespec64(&tr->ts, u64_to_user_ptr(READ_ONCE(sqe->addr2)))) 466 466 return -EFAULT; 467 467 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 468 468 return -EINVAL; ··· 557 557 data->req = req; 558 558 data->flags = flags; 559 559 560 - if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 560 + if (get_timespec64(&data->ts, u64_to_user_ptr(READ_ONCE(sqe->addr)))) 561 561 return -EFAULT; 562 562 563 563 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+1 -1
kernel/bpf/arena.c
··· 303 303 return -EOPNOTSUPP; 304 304 } 305 305 306 - static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf, 306 + static int arena_map_check_btf(struct bpf_map *map, const struct btf *btf, 307 307 const struct btf_type *key_type, const struct btf_type *value_type) 308 308 { 309 309 return 0;
+1 -1
kernel/bpf/arraymap.c
··· 548 548 rcu_read_unlock(); 549 549 } 550 550 551 - static int array_map_check_btf(const struct bpf_map *map, 551 + static int array_map_check_btf(struct bpf_map *map, 552 552 const struct btf *btf, 553 553 const struct btf_type *key_type, 554 554 const struct btf_type *value_type)
+1 -1
kernel/bpf/bloom_filter.c
··· 180 180 return -EINVAL; 181 181 } 182 182 183 - static int bloom_map_check_btf(const struct bpf_map *map, 183 + static int bloom_map_check_btf(struct bpf_map *map, 184 184 const struct btf *btf, 185 185 const struct btf_type *key_type, 186 186 const struct btf_type *value_type)
+1 -1
kernel/bpf/bpf_insn_array.c
··· 98 98 return -EINVAL; 99 99 } 100 100 101 - static int insn_array_check_btf(const struct bpf_map *map, 101 + static int insn_array_check_btf(struct bpf_map *map, 102 102 const struct btf *btf, 103 103 const struct btf_type *key_type, 104 104 const struct btf_type *value_type)
+40 -37
kernel/bpf/bpf_local_storage.c
··· 107 107 { 108 108 struct bpf_local_storage *local_storage; 109 109 110 - /* If RCU Tasks Trace grace period implies RCU grace period, do 111 - * kfree(), else do kfree_rcu(). 110 + /* 111 + * RCU Tasks Trace grace period implies RCU grace period, do 112 + * kfree() directly. 112 113 */ 113 114 local_storage = container_of(rcu, struct bpf_local_storage, rcu); 114 - if (rcu_trace_implies_rcu_gp()) 115 - kfree(local_storage); 116 - else 117 - kfree_rcu(local_storage, rcu); 115 + kfree(local_storage); 118 116 } 119 117 120 118 /* Handle use_kmalloc_nolock == false */ ··· 136 138 137 139 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu) 138 140 { 139 - if (rcu_trace_implies_rcu_gp()) 140 - bpf_local_storage_free_rcu(rcu); 141 - else 142 - call_rcu(rcu, bpf_local_storage_free_rcu); 141 + /* 142 + * RCU Tasks Trace grace period implies RCU grace period, do 143 + * kfree() directly. 144 + */ 145 + bpf_local_storage_free_rcu(rcu); 143 146 } 144 147 145 148 static void bpf_local_storage_free(struct bpf_local_storage *local_storage, ··· 163 164 bpf_local_storage_free_trace_rcu); 164 165 } 165 166 167 + /* rcu callback for use_kmalloc_nolock == false */ 168 + static void __bpf_selem_free_rcu(struct rcu_head *rcu) 169 + { 170 + struct bpf_local_storage_elem *selem; 171 + struct bpf_local_storage_map *smap; 172 + 173 + selem = container_of(rcu, struct bpf_local_storage_elem, rcu); 174 + /* bpf_selem_unlink_nofail may have already cleared smap and freed fields. */ 175 + smap = rcu_dereference_check(SDATA(selem)->smap, 1); 176 + 177 + if (smap) 178 + bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 179 + kfree(selem); 180 + } 181 + 166 182 /* rcu tasks trace callback for use_kmalloc_nolock == false */ 167 183 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu) 168 184 { 169 - struct bpf_local_storage_elem *selem; 170 - 171 - selem = container_of(rcu, struct bpf_local_storage_elem, rcu); 172 - if (rcu_trace_implies_rcu_gp()) 173 - kfree(selem); 174 - else 175 - kfree_rcu(selem, rcu); 185 + /* 186 + * RCU Tasks Trace grace period implies RCU grace period, do 187 + * kfree() directly. 188 + */ 189 + __bpf_selem_free_rcu(rcu); 176 190 } 177 191 178 192 /* Handle use_kmalloc_nolock == false */ ··· 193 181 bool vanilla_rcu) 194 182 { 195 183 if (vanilla_rcu) 196 - kfree_rcu(selem, rcu); 184 + call_rcu(&selem->rcu, __bpf_selem_free_rcu); 197 185 else 198 186 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu); 199 187 } ··· 207 195 /* The bpf_local_storage_map_free will wait for rcu_barrier */ 208 196 smap = rcu_dereference_check(SDATA(selem)->smap, 1); 209 197 210 - if (smap) { 211 - migrate_disable(); 198 + if (smap) 212 199 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 213 - migrate_enable(); 214 - } 215 200 kfree_nolock(selem); 216 201 } 217 202 218 203 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu) 219 204 { 220 - if (rcu_trace_implies_rcu_gp()) 221 - bpf_selem_free_rcu(rcu); 222 - else 223 - call_rcu(rcu, bpf_selem_free_rcu); 205 + /* 206 + * RCU Tasks Trace grace period implies RCU grace period, do 207 + * kfree() directly. 208 + */ 209 + bpf_selem_free_rcu(rcu); 224 210 } 225 211 226 212 void bpf_selem_free(struct bpf_local_storage_elem *selem, 227 213 bool reuse_now) 228 214 { 229 - struct bpf_local_storage_map *smap; 230 - 231 - smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 232 - 233 215 if (!selem->use_kmalloc_nolock) { 234 216 /* 235 217 * No uptr will be unpin even when reuse_now == false since uptr 236 218 * is only supported in task local storage, where 237 219 * smap->use_kmalloc_nolock == true. 238 220 */ 239 - if (smap) 240 - bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 241 221 __bpf_selem_free(selem, reuse_now); 242 222 return; 243 223 } ··· 801 797 return 0; 802 798 } 803 799 804 - int bpf_local_storage_map_check_btf(const struct bpf_map *map, 800 + int bpf_local_storage_map_check_btf(struct bpf_map *map, 805 801 const struct btf *btf, 806 802 const struct btf_type *key_type, 807 803 const struct btf_type *value_type) ··· 962 958 */ 963 959 synchronize_rcu(); 964 960 965 - if (smap->use_kmalloc_nolock) { 966 - rcu_barrier_tasks_trace(); 967 - rcu_barrier(); 968 - } 961 + /* smap remains in use regardless of kmalloc_nolock, so wait unconditionally. */ 962 + rcu_barrier_tasks_trace(); 963 + rcu_barrier(); 969 964 kvfree(smap->buckets); 970 965 bpf_map_area_free(smap); 971 966 }
+15 -2
kernel/bpf/cpumap.c
··· 29 29 #include <linux/sched.h> 30 30 #include <linux/workqueue.h> 31 31 #include <linux/kthread.h> 32 + #include <linux/local_lock.h> 32 33 #include <linux/completion.h> 33 34 #include <trace/events/xdp.h> 34 35 #include <linux/btf_ids.h> ··· 53 52 struct list_head flush_node; 54 53 struct bpf_cpu_map_entry *obj; 55 54 unsigned int count; 55 + local_lock_t bq_lock; 56 56 }; 57 57 58 58 /* Struct for every remote "destination" CPU in map */ ··· 453 451 for_each_possible_cpu(i) { 454 452 bq = per_cpu_ptr(rcpu->bulkq, i); 455 453 bq->obj = rcpu; 454 + local_lock_init(&bq->bq_lock); 456 455 } 457 456 458 457 /* Alloc queue */ ··· 725 722 struct ptr_ring *q; 726 723 int i; 727 724 725 + lockdep_assert_held(&bq->bq_lock); 726 + 728 727 if (unlikely(!bq->count)) 729 728 return; 730 729 ··· 754 749 } 755 750 756 751 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 757 - * Thus, safe percpu variable access. 752 + * Thus, safe percpu variable access. PREEMPT_RT relies on 753 + * local_lock_nested_bh() to serialise access to the per-CPU bq. 758 754 */ 759 755 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) 760 756 { 761 - struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 757 + struct xdp_bulk_queue *bq; 758 + 759 + local_lock_nested_bh(&rcpu->bulkq->bq_lock); 760 + bq = this_cpu_ptr(rcpu->bulkq); 762 761 763 762 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 764 763 bq_flush_to_queue(bq); ··· 783 774 784 775 list_add(&bq->flush_node, flush_list); 785 776 } 777 + 778 + local_unlock_nested_bh(&rcpu->bulkq->bq_lock); 786 779 } 787 780 788 781 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, ··· 821 810 struct xdp_bulk_queue *bq, *tmp; 822 811 823 812 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 813 + local_lock_nested_bh(&bq->obj->bulkq->bq_lock); 824 814 bq_flush_to_queue(bq); 815 + local_unlock_nested_bh(&bq->obj->bulkq->bq_lock); 825 816 826 817 /* If already running, costs spin_lock_irqsave + smb_mb */ 827 818 wake_up_process(bq->obj->kthread);
+38 -9
kernel/bpf/devmap.c
··· 45 45 * types of devmap; only the lookup and insertion is different. 46 46 */ 47 47 #include <linux/bpf.h> 48 + #include <linux/local_lock.h> 48 49 #include <net/xdp.h> 49 50 #include <linux/filter.h> 50 51 #include <trace/events/xdp.h> ··· 61 60 struct net_device *dev_rx; 62 61 struct bpf_prog *xdp_prog; 63 62 unsigned int count; 63 + local_lock_t bq_lock; 64 64 }; 65 65 66 66 struct bpf_dtab_netdev { ··· 383 381 int to_send = cnt; 384 382 int i; 385 383 384 + lockdep_assert_held(&bq->bq_lock); 385 + 386 386 if (unlikely(!cnt)) 387 387 return; 388 388 ··· 429 425 struct xdp_dev_bulk_queue *bq, *tmp; 430 426 431 427 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 428 + local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); 432 429 bq_xmit_all(bq, XDP_XMIT_FLUSH); 433 430 bq->dev_rx = NULL; 434 431 bq->xdp_prog = NULL; 435 432 __list_del_clearprev(&bq->flush_node); 433 + local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); 436 434 } 437 435 } 438 436 ··· 457 451 458 452 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu 459 453 * variable access, and map elements stick around. See comment above 460 - * xdp_do_flush() in filter.c. 454 + * xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh() 455 + * to serialise access to the per-CPU bq. 461 456 */ 462 457 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 463 458 struct net_device *dev_rx, struct bpf_prog *xdp_prog) 464 459 { 465 - struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 460 + struct xdp_dev_bulk_queue *bq; 461 + 462 + local_lock_nested_bh(&dev->xdp_bulkq->bq_lock); 463 + bq = this_cpu_ptr(dev->xdp_bulkq); 466 464 467 465 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 468 466 bq_xmit_all(bq, 0); ··· 487 477 } 488 478 489 479 bq->q[bq->count++] = xdpf; 480 + 481 + local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock); 490 482 } 491 483 492 484 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, ··· 600 588 } 601 589 602 590 /* Get ifindex of each upper device. 'indexes' must be able to hold at 603 - * least MAX_NEST_DEV elements. 604 - * Returns the number of ifindexes added. 591 + * least 'max' elements. 592 + * Returns the number of ifindexes added, or -EOVERFLOW if there are too 593 + * many upper devices. 605 594 */ 606 - static int get_upper_ifindexes(struct net_device *dev, int *indexes) 595 + static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max) 607 596 { 608 597 struct net_device *upper; 609 598 struct list_head *iter; 610 599 int n = 0; 611 600 612 601 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 602 + if (n >= max) 603 + return -EOVERFLOW; 613 604 indexes[n++] = upper->ifindex; 614 605 } 606 + 615 607 return n; 616 608 } 617 609 ··· 631 615 int err; 632 616 633 617 if (exclude_ingress) { 634 - num_excluded = get_upper_ifindexes(dev_rx, excluded_devices); 618 + num_excluded = get_upper_ifindexes(dev_rx, excluded_devices, 619 + ARRAY_SIZE(excluded_devices) - 1); 620 + if (num_excluded < 0) 621 + return num_excluded; 622 + 635 623 excluded_devices[num_excluded++] = dev_rx->ifindex; 636 624 } 637 625 ··· 753 733 int err; 754 734 755 735 if (exclude_ingress) { 756 - num_excluded = get_upper_ifindexes(dev, excluded_devices); 736 + num_excluded = get_upper_ifindexes(dev, excluded_devices, 737 + ARRAY_SIZE(excluded_devices) - 1); 738 + if (num_excluded < 0) 739 + return num_excluded; 740 + 757 741 excluded_devices[num_excluded++] = dev->ifindex; 758 742 } 759 743 ··· 1139 1115 if (!netdev->xdp_bulkq) 1140 1116 return NOTIFY_BAD; 1141 1117 1142 - for_each_possible_cpu(cpu) 1143 - per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 1118 + for_each_possible_cpu(cpu) { 1119 + struct xdp_dev_bulk_queue *bq; 1120 + 1121 + bq = per_cpu_ptr(netdev->xdp_bulkq, cpu); 1122 + bq->dev = netdev; 1123 + local_lock_init(&bq->bq_lock); 1124 + } 1144 1125 break; 1145 1126 case NETDEV_UNREGISTER: 1146 1127 /* This rcu_read_lock/unlock pair is needed because
+86
kernel/bpf/hashtab.c
··· 125 125 char key[] __aligned(8); 126 126 }; 127 127 128 + struct htab_btf_record { 129 + struct btf_record *record; 130 + u32 key_size; 131 + }; 132 + 128 133 static inline bool htab_is_prealloc(const struct bpf_htab *htab) 129 134 { 130 135 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); ··· 460 455 return -E2BIG; 461 456 462 457 return 0; 458 + } 459 + 460 + static void htab_mem_dtor(void *obj, void *ctx) 461 + { 462 + struct htab_btf_record *hrec = ctx; 463 + struct htab_elem *elem = obj; 464 + void *map_value; 465 + 466 + if (IS_ERR_OR_NULL(hrec->record)) 467 + return; 468 + 469 + map_value = htab_elem_value(elem, hrec->key_size); 470 + bpf_obj_free_fields(hrec->record, map_value); 471 + } 472 + 473 + static void htab_pcpu_mem_dtor(void *obj, void *ctx) 474 + { 475 + void __percpu *pptr = *(void __percpu **)obj; 476 + struct htab_btf_record *hrec = ctx; 477 + int cpu; 478 + 479 + if (IS_ERR_OR_NULL(hrec->record)) 480 + return; 481 + 482 + for_each_possible_cpu(cpu) 483 + bpf_obj_free_fields(hrec->record, per_cpu_ptr(pptr, cpu)); 484 + } 485 + 486 + static void htab_dtor_ctx_free(void *ctx) 487 + { 488 + struct htab_btf_record *hrec = ctx; 489 + 490 + btf_record_free(hrec->record); 491 + kfree(ctx); 492 + } 493 + 494 + static int htab_set_dtor(struct bpf_htab *htab, void (*dtor)(void *, void *)) 495 + { 496 + u32 key_size = htab->map.key_size; 497 + struct bpf_mem_alloc *ma; 498 + struct htab_btf_record *hrec; 499 + int err; 500 + 501 + /* No need for dtors. */ 502 + if (IS_ERR_OR_NULL(htab->map.record)) 503 + return 0; 504 + 505 + hrec = kzalloc(sizeof(*hrec), GFP_KERNEL); 506 + if (!hrec) 507 + return -ENOMEM; 508 + hrec->key_size = key_size; 509 + hrec->record = btf_record_dup(htab->map.record); 510 + if (IS_ERR(hrec->record)) { 511 + err = PTR_ERR(hrec->record); 512 + kfree(hrec); 513 + return err; 514 + } 515 + ma = htab_is_percpu(htab) ? &htab->pcpu_ma : &htab->ma; 516 + bpf_mem_alloc_set_dtor(ma, dtor, htab_dtor_ctx_free, hrec); 517 + return 0; 518 + } 519 + 520 + static int htab_map_check_btf(struct bpf_map *map, const struct btf *btf, 521 + const struct btf_type *key_type, const struct btf_type *value_type) 522 + { 523 + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 524 + 525 + if (htab_is_prealloc(htab)) 526 + return 0; 527 + /* 528 + * We must set the dtor using this callback, as map's BTF record is not 529 + * populated in htab_map_alloc(), so it will always appear as NULL. 530 + */ 531 + if (htab_is_percpu(htab)) 532 + return htab_set_dtor(htab, htab_pcpu_mem_dtor); 533 + else 534 + return htab_set_dtor(htab, htab_mem_dtor); 463 535 } 464 536 465 537 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ··· 2363 2281 .map_seq_show_elem = htab_map_seq_show_elem, 2364 2282 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2365 2283 .map_for_each_callback = bpf_for_each_hash_elem, 2284 + .map_check_btf = htab_map_check_btf, 2366 2285 .map_mem_usage = htab_map_mem_usage, 2367 2286 BATCH_OPS(htab), 2368 2287 .map_btf_id = &htab_map_btf_ids[0], ··· 2386 2303 .map_seq_show_elem = htab_map_seq_show_elem, 2387 2304 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2388 2305 .map_for_each_callback = bpf_for_each_hash_elem, 2306 + .map_check_btf = htab_map_check_btf, 2389 2307 .map_mem_usage = htab_map_mem_usage, 2390 2308 BATCH_OPS(htab_lru), 2391 2309 .map_btf_id = &htab_map_btf_ids[0], ··· 2566 2482 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2567 2483 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2568 2484 .map_for_each_callback = bpf_for_each_hash_elem, 2485 + .map_check_btf = htab_map_check_btf, 2569 2486 .map_mem_usage = htab_map_mem_usage, 2570 2487 BATCH_OPS(htab_percpu), 2571 2488 .map_btf_id = &htab_map_btf_ids[0], ··· 2587 2502 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2588 2503 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2589 2504 .map_for_each_callback = bpf_for_each_hash_elem, 2505 + .map_check_btf = htab_map_check_btf, 2590 2506 .map_mem_usage = htab_map_mem_usage, 2591 2507 BATCH_OPS(htab_lru_percpu), 2592 2508 .map_btf_id = &htab_map_btf_ids[0],
+1 -1
kernel/bpf/local_storage.c
··· 364 364 return -EINVAL; 365 365 } 366 366 367 - static int cgroup_storage_check_btf(const struct bpf_map *map, 367 + static int cgroup_storage_check_btf(struct bpf_map *map, 368 368 const struct btf *btf, 369 369 const struct btf_type *key_type, 370 370 const struct btf_type *value_type)
+1 -1
kernel/bpf/lpm_trie.c
··· 751 751 return err; 752 752 } 753 753 754 - static int trie_check_btf(const struct bpf_map *map, 754 + static int trie_check_btf(struct bpf_map *map, 755 755 const struct btf *btf, 756 756 const struct btf_type *key_type, 757 757 const struct btf_type *value_type)
+47 -11
kernel/bpf/memalloc.c
··· 102 102 int percpu_size; 103 103 bool draining; 104 104 struct bpf_mem_cache *tgt; 105 + void (*dtor)(void *obj, void *ctx); 106 + void *dtor_ctx; 105 107 106 108 /* list of objects to be freed after RCU GP */ 107 109 struct llist_head free_by_rcu; ··· 262 260 kfree(obj); 263 261 } 264 262 265 - static int free_all(struct llist_node *llnode, bool percpu) 263 + static int free_all(struct bpf_mem_cache *c, struct llist_node *llnode, bool percpu) 266 264 { 267 265 struct llist_node *pos, *t; 268 266 int cnt = 0; 269 267 270 268 llist_for_each_safe(pos, t, llnode) { 269 + if (c->dtor) 270 + c->dtor((void *)pos + LLIST_NODE_SZ, c->dtor_ctx); 271 271 free_one(pos, percpu); 272 272 cnt++; 273 273 } ··· 280 276 { 281 277 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 282 278 283 - free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 279 + free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 284 280 atomic_set(&c->call_rcu_ttrace_in_progress, 0); 285 281 } 286 282 ··· 312 308 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { 313 309 if (unlikely(READ_ONCE(c->draining))) { 314 310 llnode = llist_del_all(&c->free_by_rcu_ttrace); 315 - free_all(llnode, !!c->percpu_size); 311 + free_all(c, llnode, !!c->percpu_size); 316 312 } 317 313 return; 318 314 } ··· 421 417 dec_active(c, &flags); 422 418 423 419 if (unlikely(READ_ONCE(c->draining))) { 424 - free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); 420 + free_all(c, llist_del_all(&c->waiting_for_gp), !!c->percpu_size); 425 421 atomic_set(&c->call_rcu_in_progress, 0); 426 422 } else { 427 423 call_rcu_hurry(&c->rcu, __free_by_rcu); ··· 639 635 * Except for waiting_for_gp_ttrace list, there are no concurrent operations 640 636 * on these lists, so it is safe to use __llist_del_all(). 641 637 */ 642 - free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); 643 - free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 644 - free_all(__llist_del_all(&c->free_llist), percpu); 645 - free_all(__llist_del_all(&c->free_llist_extra), percpu); 646 - free_all(__llist_del_all(&c->free_by_rcu), percpu); 647 - free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); 648 - free_all(llist_del_all(&c->waiting_for_gp), percpu); 638 + free_all(c, llist_del_all(&c->free_by_rcu_ttrace), percpu); 639 + free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), percpu); 640 + free_all(c, __llist_del_all(&c->free_llist), percpu); 641 + free_all(c, __llist_del_all(&c->free_llist_extra), percpu); 642 + free_all(c, __llist_del_all(&c->free_by_rcu), percpu); 643 + free_all(c, __llist_del_all(&c->free_llist_extra_rcu), percpu); 644 + free_all(c, llist_del_all(&c->waiting_for_gp), percpu); 649 645 } 650 646 651 647 static void check_mem_cache(struct bpf_mem_cache *c) ··· 684 680 685 681 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 686 682 { 683 + /* We can free dtor ctx only once all callbacks are done using it. */ 684 + if (ma->dtor_ctx_free) 685 + ma->dtor_ctx_free(ma->dtor_ctx); 687 686 check_leaked_objs(ma); 688 687 free_percpu(ma->cache); 689 688 free_percpu(ma->caches); ··· 1020 1013 return -E2BIG; 1021 1014 1022 1015 return 0; 1016 + } 1017 + 1018 + void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma, void (*dtor)(void *obj, void *ctx), 1019 + void (*dtor_ctx_free)(void *ctx), void *ctx) 1020 + { 1021 + struct bpf_mem_caches *cc; 1022 + struct bpf_mem_cache *c; 1023 + int cpu, i; 1024 + 1025 + ma->dtor_ctx_free = dtor_ctx_free; 1026 + ma->dtor_ctx = ctx; 1027 + 1028 + if (ma->cache) { 1029 + for_each_possible_cpu(cpu) { 1030 + c = per_cpu_ptr(ma->cache, cpu); 1031 + c->dtor = dtor; 1032 + c->dtor_ctx = ctx; 1033 + } 1034 + } 1035 + if (ma->caches) { 1036 + for_each_possible_cpu(cpu) { 1037 + cc = per_cpu_ptr(ma->caches, cpu); 1038 + for (i = 0; i < NUM_CACHES; i++) { 1039 + c = &cc->cache[i]; 1040 + c->dtor = dtor; 1041 + c->dtor_ctx = ctx; 1042 + } 1043 + } 1044 + } 1023 1045 }
+1 -1
kernel/bpf/syscall.c
··· 1234 1234 } 1235 1235 EXPORT_SYMBOL_GPL(bpf_obj_name_cpy); 1236 1236 1237 - int map_check_no_btf(const struct bpf_map *map, 1237 + int map_check_no_btf(struct bpf_map *map, 1238 1238 const struct btf *btf, 1239 1239 const struct btf_type *key_type, 1240 1240 const struct btf_type *value_type)
+56
kernel/bpf/tnum.c
··· 269 269 { 270 270 return TNUM(swab64(a.value), swab64(a.mask)); 271 271 } 272 + 273 + /* Given tnum t, and a number z such that tmin <= z < tmax, where tmin 274 + * is the smallest member of the t (= t.value) and tmax is the largest 275 + * member of t (= t.value | t.mask), returns the smallest member of t 276 + * larger than z. 277 + * 278 + * For example, 279 + * t = x11100x0 280 + * z = 11110001 (241) 281 + * result = 11110010 (242) 282 + * 283 + * Note: if this function is called with z >= tmax, it just returns 284 + * early with tmax; if this function is called with z < tmin, the 285 + * algorithm already returns tmin. 286 + */ 287 + u64 tnum_step(struct tnum t, u64 z) 288 + { 289 + u64 tmax, j, p, q, r, s, v, u, w, res; 290 + u8 k; 291 + 292 + tmax = t.value | t.mask; 293 + 294 + /* if z >= largest member of t, return largest member of t */ 295 + if (z >= tmax) 296 + return tmax; 297 + 298 + /* if z < smallest member of t, return smallest member of t */ 299 + if (z < t.value) 300 + return t.value; 301 + 302 + /* keep t's known bits, and match all unknown bits to z */ 303 + j = t.value | (z & t.mask); 304 + 305 + if (j > z) { 306 + p = ~z & t.value & ~t.mask; 307 + k = fls64(p); /* k is the most-significant 0-to-1 flip */ 308 + q = U64_MAX << k; 309 + r = q & z; /* positions > k matched to z */ 310 + s = ~q & t.value; /* positions <= k matched to t.value */ 311 + v = r | s; 312 + res = v; 313 + } else { 314 + p = z & ~t.value & ~t.mask; 315 + k = fls64(p); /* k is the most-significant 1-to-0 flip */ 316 + q = U64_MAX << k; 317 + r = q & t.mask & z; /* unknown positions > k, matched to z */ 318 + s = q & ~t.mask; /* known positions > k, set to 1 */ 319 + v = r | s; 320 + /* add 1 to unknown positions > k to make value greater than z */ 321 + u = v + (1ULL << k); 322 + /* extract bits in unknown positions > k from u, rest from t.value */ 323 + w = (u & t.mask) | t.value; 324 + res = w; 325 + } 326 + return res; 327 + }
+30
kernel/bpf/verifier.c
··· 2379 2379 2380 2380 static void __update_reg64_bounds(struct bpf_reg_state *reg) 2381 2381 { 2382 + u64 tnum_next, tmax; 2383 + bool umin_in_tnum; 2384 + 2382 2385 /* min signed is max(sign bit) | min(other bits) */ 2383 2386 reg->smin_value = max_t(s64, reg->smin_value, 2384 2387 reg->var_off.value | (reg->var_off.mask & S64_MIN)); ··· 2391 2388 reg->umin_value = max(reg->umin_value, reg->var_off.value); 2392 2389 reg->umax_value = min(reg->umax_value, 2393 2390 reg->var_off.value | reg->var_off.mask); 2391 + 2392 + /* Check if u64 and tnum overlap in a single value */ 2393 + tnum_next = tnum_step(reg->var_off, reg->umin_value); 2394 + umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value; 2395 + tmax = reg->var_off.value | reg->var_off.mask; 2396 + if (umin_in_tnum && tnum_next > reg->umax_value) { 2397 + /* The u64 range and the tnum only overlap in umin. 2398 + * u64: ---[xxxxxx]----- 2399 + * tnum: --xx----------x- 2400 + */ 2401 + ___mark_reg_known(reg, reg->umin_value); 2402 + } else if (!umin_in_tnum && tnum_next == tmax) { 2403 + /* The u64 range and the tnum only overlap in the maximum value 2404 + * represented by the tnum, called tmax. 2405 + * u64: ---[xxxxxx]----- 2406 + * tnum: xx-----x-------- 2407 + */ 2408 + ___mark_reg_known(reg, tmax); 2409 + } else if (!umin_in_tnum && tnum_next <= reg->umax_value && 2410 + tnum_step(reg->var_off, tnum_next) > reg->umax_value) { 2411 + /* The u64 range and the tnum only overlap in between umin 2412 + * (excluded) and umax. 2413 + * u64: ---[xxxxxx]----- 2414 + * tnum: xx----x-------x- 2415 + */ 2416 + ___mark_reg_known(reg, tnum_next); 2417 + } 2394 2418 } 2395 2419 2396 2420 static void __update_reg_bounds(struct bpf_reg_state *reg)
-1
kernel/configs/debug.config
··· 29 29 # CONFIG_UBSAN_ALIGNMENT is not set 30 30 # CONFIG_UBSAN_DIV_ZERO is not set 31 31 # CONFIG_UBSAN_TRAP is not set 32 - # CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set 33 32 CONFIG_DEBUG_FS=y 34 33 CONFIG_DEBUG_FS_ALLOW_ALL=y 35 34 CONFIG_DEBUG_IRQFLAGS=y
+1 -1
kernel/dma/direct.h
··· 85 85 86 86 if (is_swiotlb_force_bounce(dev)) { 87 87 if (attrs & DMA_ATTR_MMIO) 88 - goto err_overflow; 88 + return DMA_MAPPING_ERROR; 89 89 90 90 return swiotlb_map(dev, phys, size, dir, attrs); 91 91 }
+64 -23
kernel/events/core.c
··· 4138 4138 if (*perf_event_fasync(event)) 4139 4139 event->pending_kill = POLL_ERR; 4140 4140 4141 - perf_event_wakeup(event); 4141 + event->pending_wakeup = 1; 4142 + irq_work_queue(&event->pending_irq); 4142 4143 } else { 4143 4144 struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu); 4144 4145 ··· 7465 7464 ret = perf_mmap_aux(vma, event, nr_pages); 7466 7465 if (ret) 7467 7466 return ret; 7467 + 7468 + /* 7469 + * Since pinned accounting is per vm we cannot allow fork() to copy our 7470 + * vma. 7471 + */ 7472 + vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); 7473 + vma->vm_ops = &perf_mmap_vmops; 7474 + 7475 + mapped = get_mapped(event, event_mapped); 7476 + if (mapped) 7477 + mapped(event, vma->vm_mm); 7478 + 7479 + /* 7480 + * Try to map it into the page table. On fail, invoke 7481 + * perf_mmap_close() to undo the above, as the callsite expects 7482 + * full cleanup in this case and therefore does not invoke 7483 + * vmops::close(). 7484 + */ 7485 + ret = map_range(event->rb, vma); 7486 + if (ret) 7487 + perf_mmap_close(vma); 7468 7488 } 7469 - 7470 - /* 7471 - * Since pinned accounting is per vm we cannot allow fork() to copy our 7472 - * vma. 7473 - */ 7474 - vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); 7475 - vma->vm_ops = &perf_mmap_vmops; 7476 - 7477 - mapped = get_mapped(event, event_mapped); 7478 - if (mapped) 7479 - mapped(event, vma->vm_mm); 7480 - 7481 - /* 7482 - * Try to map it into the page table. On fail, invoke 7483 - * perf_mmap_close() to undo the above, as the callsite expects 7484 - * full cleanup in this case and therefore does not invoke 7485 - * vmops::close(). 7486 - */ 7487 - ret = map_range(event->rb, vma); 7488 - if (ret) 7489 - perf_mmap_close(vma); 7490 7489 7491 7490 return ret; 7492 7491 } ··· 10777 10776 struct perf_sample_data *data, 10778 10777 struct pt_regs *regs) 10779 10778 { 10779 + /* 10780 + * Entry point from hardware PMI, interrupts should be disabled here. 10781 + * This serializes us against perf_event_remove_from_context() in 10782 + * things like perf_event_release_kernel(). 10783 + */ 10784 + lockdep_assert_irqs_disabled(); 10785 + 10780 10786 return __perf_event_overflow(event, 1, data, regs); 10781 10787 } 10782 10788 ··· 10860 10852 { 10861 10853 struct hw_perf_event *hwc = &event->hw; 10862 10854 10855 + /* 10856 + * This is: 10857 + * - software preempt 10858 + * - tracepoint preempt 10859 + * - tp_target_task irq (ctx->lock) 10860 + * - uprobes preempt/irq 10861 + * - kprobes preempt/irq 10862 + * - hw_breakpoint irq 10863 + * 10864 + * Any of these are sufficient to hold off RCU and thus ensure @event 10865 + * exists. 10866 + */ 10867 + lockdep_assert_preemption_disabled(); 10863 10868 local64_add(nr, &event->count); 10864 10869 10865 10870 if (!regs) 10866 10871 return; 10867 10872 10868 10873 if (!is_sampling_event(event)) 10874 + return; 10875 + 10876 + /* 10877 + * Serialize against event_function_call() IPIs like normal overflow 10878 + * event handling. Specifically, must not allow 10879 + * perf_event_release_kernel() -> perf_remove_from_context() to make 10880 + * progress and 'release' the event from under us. 10881 + */ 10882 + guard(irqsave)(); 10883 + if (event->state != PERF_EVENT_STATE_ACTIVE) 10869 10884 return; 10870 10885 10871 10886 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { ··· 11389 11358 struct perf_sample_data data; 11390 11359 struct perf_event *event; 11391 11360 11361 + /* 11362 + * Per being a tracepoint, this runs with preemption disabled. 11363 + */ 11364 + lockdep_assert_preemption_disabled(); 11365 + 11392 11366 struct perf_raw_record raw = { 11393 11367 .frag = { 11394 11368 .size = entry_size, ··· 11725 11689 { 11726 11690 struct perf_sample_data sample; 11727 11691 struct pt_regs *regs = data; 11692 + 11693 + /* 11694 + * Exception context, will have interrupts disabled. 11695 + */ 11696 + lockdep_assert_irqs_disabled(); 11728 11697 11729 11698 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 11730 11699 ··· 12195 12154 12196 12155 if (regs && !perf_exclude_event(event, regs)) { 12197 12156 if (!(event->attr.exclude_idle && is_idle_task(current))) 12198 - if (__perf_event_overflow(event, 1, &data, regs)) 12157 + if (perf_event_overflow(event, &data, regs)) 12199 12158 ret = HRTIMER_NORESTART; 12200 12159 } 12201 12160
+1 -1
kernel/fork.c
··· 3085 3085 return 0; 3086 3086 3087 3087 /* don't need lock here; in the worst case we'll do useless copy */ 3088 - if (fs->users == 1) 3088 + if (!(unshare_flags & CLONE_NEWNS) && fs->users == 1) 3089 3089 return 0; 3090 3090 3091 3091 *new_fsp = copy_fs_struct(fs);
+1 -1
kernel/kcsan/kcsan_test.c
··· 168 168 if (!report_available()) 169 169 return false; 170 170 171 - expect = kmalloc_obj(observed.lines); 171 + expect = (typeof(expect))kmalloc_obj(observed.lines); 172 172 if (WARN_ON(!expect)) 173 173 return false; 174 174
+25 -16
kernel/liveupdate/luo_file.c
··· 134 134 * state that is not preserved. Set by the handler's .preserve() 135 135 * callback, and must be freed in the handler's .unpreserve() 136 136 * callback. 137 - * @retrieved: A flag indicating whether a user/kernel in the new kernel has 137 + * @retrieve_status: Status code indicating whether a user/kernel in the new kernel has 138 138 * successfully called retrieve() on this file. This prevents 139 - * multiple retrieval attempts. 139 + * multiple retrieval attempts. A value of 0 means a retrieve() 140 + * has not been attempted, a positive value means the retrieve() 141 + * was successful, and a negative value means the retrieve() 142 + * failed, and the value is the error code of the call. 140 143 * @mutex: A mutex that protects the fields of this specific instance 141 144 * (e.g., @retrieved, @file), ensuring that operations like 142 145 * retrieving or finishing a file are atomic. ··· 164 161 struct file *file; 165 162 u64 serialized_data; 166 163 void *private_data; 167 - bool retrieved; 164 + int retrieve_status; 168 165 struct mutex mutex; 169 166 struct list_head list; 170 167 u64 token; ··· 301 298 luo_file->file = file; 302 299 luo_file->fh = fh; 303 300 luo_file->token = token; 304 - luo_file->retrieved = false; 305 301 mutex_init(&luo_file->mutex); 306 302 307 303 args.handler = fh; ··· 579 577 return -ENOENT; 580 578 581 579 guard(mutex)(&luo_file->mutex); 582 - if (luo_file->retrieved) { 580 + if (luo_file->retrieve_status < 0) { 581 + /* Retrieve was attempted and it failed. Return the error code. */ 582 + return luo_file->retrieve_status; 583 + } 584 + 585 + if (luo_file->retrieve_status > 0) { 583 586 /* 584 587 * Someone is asking for this file again, so get a reference 585 588 * for them. ··· 597 590 args.handler = luo_file->fh; 598 591 args.serialized_data = luo_file->serialized_data; 599 592 err = luo_file->fh->ops->retrieve(&args); 600 - if (!err) { 601 - luo_file->file = args.file; 602 - 603 - /* Get reference so we can keep this file in LUO until finish */ 604 - get_file(luo_file->file); 605 - *filep = luo_file->file; 606 - luo_file->retrieved = true; 593 + if (err) { 594 + /* Keep the error code for later use. */ 595 + luo_file->retrieve_status = err; 596 + return err; 607 597 } 608 598 609 - return err; 599 + luo_file->file = args.file; 600 + /* Get reference so we can keep this file in LUO until finish */ 601 + get_file(luo_file->file); 602 + *filep = luo_file->file; 603 + luo_file->retrieve_status = 1; 604 + 605 + return 0; 610 606 } 611 607 612 608 static int luo_file_can_finish_one(struct luo_file_set *file_set, ··· 625 615 args.handler = luo_file->fh; 626 616 args.file = luo_file->file; 627 617 args.serialized_data = luo_file->serialized_data; 628 - args.retrieved = luo_file->retrieved; 618 + args.retrieve_status = luo_file->retrieve_status; 629 619 can_finish = luo_file->fh->ops->can_finish(&args); 630 620 } 631 621 ··· 642 632 args.handler = luo_file->fh; 643 633 args.file = luo_file->file; 644 634 args.serialized_data = luo_file->serialized_data; 645 - args.retrieved = luo_file->retrieved; 635 + args.retrieve_status = luo_file->retrieve_status; 646 636 647 637 luo_file->fh->ops->finish(&args); 648 638 luo_flb_file_finish(luo_file->fh); ··· 798 788 luo_file->file = NULL; 799 789 luo_file->serialized_data = file_ser[i].data; 800 790 luo_file->token = file_ser[i].token; 801 - luo_file->retrieved = false; 802 791 mutex_init(&luo_file->mutex); 803 792 list_add_tail(&luo_file->list, &file_set->files_list); 804 793 }
+5 -3
kernel/rseq.c
··· 80 80 #include <linux/syscalls.h> 81 81 #include <linux/uaccess.h> 82 82 #include <linux/types.h> 83 + #include <linux/rseq.h> 83 84 #include <asm/ptrace.h> 84 85 85 86 #define CREATE_TRACE_POINTS ··· 450 449 * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq 451 450 * size, the required alignment is the original struct rseq alignment. 452 451 * 453 - * In order to be valid, rseq_len is either the original rseq size, or 454 - * large enough to contain all supported fields, as communicated to 452 + * The rseq_len is required to be greater or equal to the original rseq 453 + * size. In order to be valid, rseq_len is either the original rseq size, 454 + * or large enough to contain all supported fields, as communicated to 455 455 * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE. 456 456 */ 457 457 if (rseq_len < ORIG_RSEQ_SIZE || 458 458 (rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) || 459 - (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) || 459 + (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, rseq_alloc_align()) || 460 460 rseq_len < offsetof(struct rseq, end)))) 461 461 return -EINVAL; 462 462 if (!access_ok(rseq, rseq_len))
+1
kernel/sched/core.c
··· 6830 6830 /* SCX must consult the BPF scheduler to tell if rq is empty */ 6831 6831 if (!rq->nr_running && !scx_enabled()) { 6832 6832 next = prev; 6833 + rq->next_class = &idle_sched_class; 6833 6834 goto picked; 6834 6835 } 6835 6836 } else if (!preempt && prev_state) {
+2 -2
kernel/sched/ext.c
··· 2460 2460 /* see kick_cpus_irq_workfn() */ 2461 2461 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2462 2462 2463 - rq->next_class = &ext_sched_class; 2463 + rq_modified_begin(rq, &ext_sched_class); 2464 2464 2465 2465 rq_unpin_lock(rq, rf); 2466 2466 balance_one(rq, prev); ··· 2475 2475 * If @force_scx is true, always try to pick a SCHED_EXT task, 2476 2476 * regardless of any higher-priority sched classes activity. 2477 2477 */ 2478 - if (!force_scx && sched_class_above(rq->next_class, &ext_sched_class)) 2478 + if (!force_scx && rq_modified_above(rq, &ext_sched_class)) 2479 2479 return RETRY_TASK; 2480 2480 2481 2481 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
+112 -38
kernel/sched/fair.c
··· 589 589 return vruntime_cmp(a->deadline, "<", b->deadline); 590 590 } 591 591 592 + /* 593 + * Per avg_vruntime() below, cfs_rq::zero_vruntime is only slightly stale 594 + * and this value should be no more than two lag bounds. Which puts it in the 595 + * general order of: 596 + * 597 + * (slice + TICK_NSEC) << NICE_0_LOAD_SHIFT 598 + * 599 + * which is around 44 bits in size (on 64bit); that is 20 for 600 + * NICE_0_LOAD_SHIFT, another 20 for NSEC_PER_MSEC and then a handful for 601 + * however many msec the actual slice+tick ends up begin. 602 + * 603 + * (disregarding the actual divide-by-weight part makes for the worst case 604 + * weight of 2, which nicely cancels vs the fuzz in zero_vruntime not actually 605 + * being the zero-lag point). 606 + */ 592 607 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) 593 608 { 594 609 return vruntime_op(se->vruntime, "-", cfs_rq->zero_vruntime); ··· 691 676 } 692 677 693 678 static inline 694 - void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) 679 + void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta) 695 680 { 696 681 /* 697 - * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight 682 + * v' = v + d ==> sum_w_vruntime' = sum_w_vruntime - d*sum_weight 698 683 */ 699 684 cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta; 685 + cfs_rq->zero_vruntime += delta; 700 686 } 701 687 702 688 /* 703 - * Specifically: avg_runtime() + 0 must result in entity_eligible() := true 689 + * Specifically: avg_vruntime() + 0 must result in entity_eligible() := true 704 690 * For this to be so, the result of this function must have a left bias. 691 + * 692 + * Called in: 693 + * - place_entity() -- before enqueue 694 + * - update_entity_lag() -- before dequeue 695 + * - entity_tick() 696 + * 697 + * This means it is one entry 'behind' but that puts it close enough to where 698 + * the bound on entity_key() is at most two lag bounds. 705 699 */ 706 700 u64 avg_vruntime(struct cfs_rq *cfs_rq) 707 701 { 708 702 struct sched_entity *curr = cfs_rq->curr; 709 - s64 avg = cfs_rq->sum_w_vruntime; 710 - long load = cfs_rq->sum_weight; 703 + long weight = cfs_rq->sum_weight; 704 + s64 delta = 0; 711 705 712 - if (curr && curr->on_rq) { 713 - unsigned long weight = scale_load_down(curr->load.weight); 706 + if (curr && !curr->on_rq) 707 + curr = NULL; 714 708 715 - avg += entity_key(cfs_rq, curr) * weight; 716 - load += weight; 717 - } 709 + if (weight) { 710 + s64 runtime = cfs_rq->sum_w_vruntime; 718 711 719 - if (load) { 712 + if (curr) { 713 + unsigned long w = scale_load_down(curr->load.weight); 714 + 715 + runtime += entity_key(cfs_rq, curr) * w; 716 + weight += w; 717 + } 718 + 720 719 /* sign flips effective floor / ceiling */ 721 - if (avg < 0) 722 - avg -= (load - 1); 723 - avg = div_s64(avg, load); 720 + if (runtime < 0) 721 + runtime -= (weight - 1); 722 + 723 + delta = div_s64(runtime, weight); 724 + } else if (curr) { 725 + /* 726 + * When there is but one element, it is the average. 727 + */ 728 + delta = curr->vruntime - cfs_rq->zero_vruntime; 724 729 } 725 730 726 - return cfs_rq->zero_vruntime + avg; 731 + update_zero_vruntime(cfs_rq, delta); 732 + 733 + return cfs_rq->zero_vruntime; 727 734 } 735 + 736 + static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq); 728 737 729 738 /* 730 739 * lag_i = S - s_i = w_i * (V - v_i) ··· 763 724 * EEVDF gives the following limit for a steady state system: 764 725 * 765 726 * -r_max < lag < max(r_max, q) 766 - * 767 - * XXX could add max_slice to the augmented data to track this. 768 727 */ 769 728 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) 770 729 { 730 + u64 max_slice = cfs_rq_max_slice(cfs_rq) + TICK_NSEC; 771 731 s64 vlag, limit; 772 732 773 733 WARN_ON_ONCE(!se->on_rq); 774 734 775 735 vlag = avg_vruntime(cfs_rq) - se->vruntime; 776 - limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); 736 + limit = calc_delta_fair(max_slice, se); 777 737 778 738 se->vlag = clamp(vlag, -limit, limit); 779 739 } ··· 815 777 return vruntime_eligible(cfs_rq, se->vruntime); 816 778 } 817 779 818 - static void update_zero_vruntime(struct cfs_rq *cfs_rq) 819 - { 820 - u64 vruntime = avg_vruntime(cfs_rq); 821 - s64 delta = vruntime_op(vruntime, "-", cfs_rq->zero_vruntime); 822 - 823 - sum_w_vruntime_update(cfs_rq, delta); 824 - 825 - cfs_rq->zero_vruntime = vruntime; 826 - } 827 - 828 780 static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq) 829 781 { 830 782 struct sched_entity *root = __pick_root_entity(cfs_rq); ··· 828 800 min_slice = min(min_slice, root->min_slice); 829 801 830 802 return min_slice; 803 + } 804 + 805 + static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq) 806 + { 807 + struct sched_entity *root = __pick_root_entity(cfs_rq); 808 + struct sched_entity *curr = cfs_rq->curr; 809 + u64 max_slice = 0ULL; 810 + 811 + if (curr && curr->on_rq) 812 + max_slice = curr->slice; 813 + 814 + if (root) 815 + max_slice = max(max_slice, root->max_slice); 816 + 817 + return max_slice; 831 818 } 832 819 833 820 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) ··· 869 826 } 870 827 } 871 828 829 + static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node) 830 + { 831 + if (node) { 832 + struct sched_entity *rse = __node_2_se(node); 833 + if (rse->max_slice > se->max_slice) 834 + se->max_slice = rse->max_slice; 835 + } 836 + } 837 + 872 838 /* 873 839 * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime) 874 840 */ ··· 885 833 { 886 834 u64 old_min_vruntime = se->min_vruntime; 887 835 u64 old_min_slice = se->min_slice; 836 + u64 old_max_slice = se->max_slice; 888 837 struct rb_node *node = &se->run_node; 889 838 890 839 se->min_vruntime = se->vruntime; ··· 896 843 __min_slice_update(se, node->rb_right); 897 844 __min_slice_update(se, node->rb_left); 898 845 846 + se->max_slice = se->slice; 847 + __max_slice_update(se, node->rb_right); 848 + __max_slice_update(se, node->rb_left); 849 + 899 850 return se->min_vruntime == old_min_vruntime && 900 - se->min_slice == old_min_slice; 851 + se->min_slice == old_min_slice && 852 + se->max_slice == old_max_slice; 901 853 } 902 854 903 855 RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity, ··· 914 856 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 915 857 { 916 858 sum_w_vruntime_add(cfs_rq, se); 917 - update_zero_vruntime(cfs_rq); 918 859 se->min_vruntime = se->vruntime; 919 860 se->min_slice = se->slice; 920 861 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, ··· 925 868 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, 926 869 &min_vruntime_cb); 927 870 sum_w_vruntime_sub(cfs_rq, se); 928 - update_zero_vruntime(cfs_rq); 929 871 } 930 872 931 873 struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq) ··· 3846 3790 unsigned long weight) 3847 3791 { 3848 3792 bool curr = cfs_rq->curr == se; 3793 + bool rel_vprot = false; 3794 + u64 vprot; 3849 3795 3850 3796 if (se->on_rq) { 3851 3797 /* commit outstanding execution time */ ··· 3855 3797 update_entity_lag(cfs_rq, se); 3856 3798 se->deadline -= se->vruntime; 3857 3799 se->rel_deadline = 1; 3800 + if (curr && protect_slice(se)) { 3801 + vprot = se->vprot - se->vruntime; 3802 + rel_vprot = true; 3803 + } 3804 + 3858 3805 cfs_rq->nr_queued--; 3859 3806 if (!curr) 3860 3807 __dequeue_entity(cfs_rq, se); ··· 3875 3812 if (se->rel_deadline) 3876 3813 se->deadline = div_s64(se->deadline * se->load.weight, weight); 3877 3814 3815 + if (rel_vprot) 3816 + vprot = div_s64(vprot * se->load.weight, weight); 3817 + 3878 3818 update_load_set(&se->load, weight); 3879 3819 3880 3820 do { ··· 3889 3823 enqueue_load_avg(cfs_rq, se); 3890 3824 if (se->on_rq) { 3891 3825 place_entity(cfs_rq, se, 0); 3826 + if (rel_vprot) 3827 + se->vprot = se->vruntime + vprot; 3892 3828 update_load_add(&cfs_rq->load, se->load.weight); 3893 3829 if (!curr) 3894 3830 __enqueue_entity(cfs_rq, se); ··· 5488 5420 } 5489 5421 5490 5422 static void 5491 - set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 5423 + set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first) 5492 5424 { 5493 5425 clear_buddies(cfs_rq, se); 5494 5426 ··· 5503 5435 __dequeue_entity(cfs_rq, se); 5504 5436 update_load_avg(cfs_rq, se, UPDATE_TG); 5505 5437 5506 - set_protect_slice(cfs_rq, se); 5438 + if (first) 5439 + set_protect_slice(cfs_rq, se); 5507 5440 } 5508 5441 5509 5442 update_stats_curr_start(cfs_rq, se); ··· 5592 5523 */ 5593 5524 update_load_avg(cfs_rq, curr, UPDATE_TG); 5594 5525 update_cfs_group(curr); 5526 + 5527 + /* 5528 + * Pulls along cfs_rq::zero_vruntime. 5529 + */ 5530 + avg_vruntime(cfs_rq); 5595 5531 5596 5532 #ifdef CONFIG_SCHED_HRTICK 5597 5533 /* ··· 9022 8948 pse = parent_entity(pse); 9023 8949 } 9024 8950 if (se_depth >= pse_depth) { 9025 - set_next_entity(cfs_rq_of(se), se); 8951 + set_next_entity(cfs_rq_of(se), se, true); 9026 8952 se = parent_entity(se); 9027 8953 } 9028 8954 } 9029 8955 9030 8956 put_prev_entity(cfs_rq, pse); 9031 - set_next_entity(cfs_rq, se); 8957 + set_next_entity(cfs_rq, se, true); 9032 8958 9033 8959 __set_next_task_fair(rq, p, true); 9034 8960 } ··· 12982 12908 t0 = sched_clock_cpu(this_cpu); 12983 12909 __sched_balance_update_blocked_averages(this_rq); 12984 12910 12985 - this_rq->next_class = &fair_sched_class; 12911 + rq_modified_begin(this_rq, &fair_sched_class); 12986 12912 raw_spin_rq_unlock(this_rq); 12987 12913 12988 12914 for_each_domain(this_cpu, sd) { ··· 13049 12975 pulled_task = 1; 13050 12976 13051 12977 /* If a higher prio class was modified, restart the pick */ 13052 - if (sched_class_above(this_rq->next_class, &fair_sched_class)) 12978 + if (rq_modified_above(this_rq, &fair_sched_class)) 13053 12979 pulled_task = -1; 13054 12980 13055 12981 out: ··· 13642 13568 for_each_sched_entity(se) { 13643 13569 struct cfs_rq *cfs_rq = cfs_rq_of(se); 13644 13570 13645 - set_next_entity(cfs_rq, se); 13571 + set_next_entity(cfs_rq, se, first); 13646 13572 /* ensure bandwidth has been allocated on our new cfs_rq */ 13647 13573 account_cfs_rq_runtime(cfs_rq, 0); 13648 13574 }
+11
kernel/sched/sched.h
··· 2748 2748 2749 2749 #define sched_class_above(_a, _b) ((_a) < (_b)) 2750 2750 2751 + static inline void rq_modified_begin(struct rq *rq, const struct sched_class *class) 2752 + { 2753 + if (sched_class_above(rq->next_class, class)) 2754 + rq->next_class = class; 2755 + } 2756 + 2757 + static inline bool rq_modified_above(struct rq *rq, const struct sched_class *class) 2758 + { 2759 + return sched_class_above(rq->next_class, class); 2760 + } 2761 + 2751 2762 static inline bool sched_stop_runnable(struct rq *rq) 2752 2763 { 2753 2764 return rq->stop && task_on_rq_queued(rq->stop);
+7 -12
kernel/time/time.c
··· 365 365 } 366 366 #endif 367 367 368 + #if HZ > MSEC_PER_SEC || (MSEC_PER_SEC % HZ) 368 369 /** 369 370 * jiffies_to_msecs - Convert jiffies to milliseconds 370 371 * @j: jiffies value 371 - * 372 - * Avoid unnecessary multiplications/divisions in the 373 - * two most common HZ cases. 374 372 * 375 373 * Return: milliseconds value 376 374 */ 377 375 unsigned int jiffies_to_msecs(const unsigned long j) 378 376 { 379 - #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 380 - return (MSEC_PER_SEC / HZ) * j; 381 - #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 377 + #if HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 382 378 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); 383 379 #else 384 380 # if BITS_PER_LONG == 32 ··· 386 390 #endif 387 391 } 388 392 EXPORT_SYMBOL(jiffies_to_msecs); 393 + #endif 389 394 395 + #if (USEC_PER_SEC % HZ) 390 396 /** 391 397 * jiffies_to_usecs - Convert jiffies to microseconds 392 398 * @j: jiffies value ··· 403 405 */ 404 406 BUILD_BUG_ON(HZ > USEC_PER_SEC); 405 407 406 - #if !(USEC_PER_SEC % HZ) 407 - return (USEC_PER_SEC / HZ) * j; 408 - #else 409 - # if BITS_PER_LONG == 32 408 + #if BITS_PER_LONG == 32 410 409 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; 411 - # else 410 + #else 412 411 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; 413 - # endif 414 412 #endif 415 413 } 416 414 EXPORT_SYMBOL(jiffies_to_usecs); 415 + #endif 417 416 418 417 /** 419 418 * mktime64 - Converts date to seconds.
+3 -1
kernel/trace/bpf_trace.c
··· 2454 2454 struct seq_file *seq) 2455 2455 { 2456 2456 struct bpf_kprobe_multi_link *kmulti_link; 2457 + bool has_cookies; 2457 2458 2458 2459 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2460 + has_cookies = !!kmulti_link->cookies; 2459 2461 2460 2462 seq_printf(seq, 2461 2463 "kprobe_cnt:\t%u\n" ··· 2469 2467 for (int i = 0; i < kmulti_link->cnt; i++) { 2470 2468 seq_printf(seq, 2471 2469 "%llu\t %pS\n", 2472 - kmulti_link->cookies[i], 2470 + has_cookies ? kmulti_link->cookies[i] : 0, 2473 2471 (void *)kmulti_link->addrs[i]); 2474 2472 } 2475 2473 }
+3 -29
lib/Kconfig.debug
··· 630 630 631 631 config WARN_CONTEXT_ANALYSIS 632 632 bool "Compiler context-analysis warnings" 633 - depends on CC_IS_CLANG && CLANG_VERSION >= 220000 633 + depends on CC_IS_CLANG && CLANG_VERSION >= 220100 634 634 # Branch profiling re-defines "if", which messes with the compiler's 635 635 # ability to analyze __cond_acquires(..), resulting in false positives. 636 636 depends on !TRACE_BRANCH_PROFILING ··· 641 641 and releasing user-definable "context locks". 642 642 643 643 Clang's name of the feature is "Thread Safety Analysis". Requires 644 - Clang 22 or later. 644 + Clang 22.1.0 or later. 645 645 646 646 Produces warnings by default. Select CONFIG_WERROR if you wish to 647 647 turn these warnings into errors. ··· 760 760 761 761 config DEBUG_OBJECTS 762 762 bool "Debug object operations" 763 + depends on PREEMPT_COUNT || !DEFERRED_STRUCT_PAGE_INIT 763 764 depends on DEBUG_KERNEL 764 765 help 765 766 If you say Y here, additional code will be inserted into the ··· 1766 1765 every process, showing its current stack trace. 1767 1766 It is also used by various kernel debugging features that require 1768 1767 stack trace generation. 1769 - 1770 - config WARN_ALL_UNSEEDED_RANDOM 1771 - bool "Warn for all uses of unseeded randomness" 1772 - default n 1773 - help 1774 - Some parts of the kernel contain bugs relating to their use of 1775 - cryptographically secure random numbers before it's actually possible 1776 - to generate those numbers securely. This setting ensures that these 1777 - flaws don't go unnoticed, by enabling a message, should this ever 1778 - occur. This will allow people with obscure setups to know when things 1779 - are going wrong, so that they might contact developers about fixing 1780 - it. 1781 - 1782 - Unfortunately, on some models of some architectures getting 1783 - a fully seeded CRNG is extremely difficult, and so this can 1784 - result in dmesg getting spammed for a surprisingly long 1785 - time. This is really bad from a security perspective, and 1786 - so architecture maintainers really need to do what they can 1787 - to get the CRNG seeded sooner after the system is booted. 1788 - However, since users cannot do anything actionable to 1789 - address this, by default this option is disabled. 1790 - 1791 - Say Y here if you want to receive warnings for all uses of 1792 - unseeded randomness. This will be of use primarily for 1793 - those developers interested in improving the security of 1794 - Linux kernels running on their architecture (or 1795 - subarchitecture). 1796 1768 1797 1769 config DEBUG_KOBJECT 1798 1770 bool "kobject debugging"
+18 -1
lib/debugobjects.c
··· 398 398 399 399 atomic_inc(&cpus_allocating); 400 400 while (pool_should_refill(&pool_global)) { 401 + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; 401 402 HLIST_HEAD(head); 402 403 403 - if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN)) 404 + /* 405 + * Allow reclaim only in preemptible context and during 406 + * early boot. If not preemptible, the caller might hold 407 + * locks causing a deadlock in the allocator. 408 + * 409 + * If the reclaim flag is not set during early boot then 410 + * allocations, which happen before deferred page 411 + * initialization has completed, will fail. 412 + * 413 + * In preemptible context the flag is harmless and not a 414 + * performance issue as that's usually invoked from slow 415 + * path initialization context. 416 + */ 417 + if (preemptible() || system_state < SYSTEM_SCHEDULING) 418 + gfp |= __GFP_KSWAPD_RECLAIM; 419 + 420 + if (!kmem_alloc_batch(&head, obj_cache, gfp)) 404 421 break; 405 422 406 423 guard(raw_spinlock_irqsave)(&pool_lock);
+3
mm/damon/core.c
··· 1252 1252 { 1253 1253 int err; 1254 1254 1255 + if (!is_power_of_2(src->min_region_sz)) 1256 + return -EINVAL; 1257 + 1255 1258 err = damon_commit_schemes(dst, src); 1256 1259 if (err) 1257 1260 return err;
+3
mm/huge_memory.c
··· 94 94 95 95 inode = file_inode(vma->vm_file); 96 96 97 + if (IS_ANON_FILE(inode)) 98 + return false; 99 + 97 100 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); 98 101 } 99 102
+23 -6
mm/kfence/core.c
··· 13 13 #include <linux/hash.h> 14 14 #include <linux/irq_work.h> 15 15 #include <linux/jhash.h> 16 + #include <linux/kasan-enabled.h> 16 17 #include <linux/kcsan-checks.h> 17 18 #include <linux/kfence.h> 18 19 #include <linux/kmemleak.h> ··· 918 917 return; 919 918 920 919 /* 920 + * If KASAN hardware tags are enabled, disable KFENCE, because it 921 + * does not support MTE yet. 922 + */ 923 + if (kasan_hw_tags_enabled()) { 924 + pr_info("disabled as KASAN HW tags are enabled\n"); 925 + if (__kfence_pool) { 926 + memblock_free(__kfence_pool, KFENCE_POOL_SIZE); 927 + __kfence_pool = NULL; 928 + } 929 + kfence_sample_interval = 0; 930 + return; 931 + } 932 + 933 + /* 921 934 * If the pool has already been initialized by arch, there is no need to 922 935 * re-allocate the memory pool. 923 936 */ ··· 1004 989 #ifdef CONFIG_CONTIG_ALLOC 1005 990 struct page *pages; 1006 991 1007 - pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, 1008 - NULL); 992 + pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN, 993 + first_online_node, NULL); 1009 994 if (!pages) 1010 995 return -ENOMEM; 1011 996 1012 997 __kfence_pool = page_to_virt(pages); 1013 - pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, 1014 - NULL); 998 + pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN, 999 + first_online_node, NULL); 1015 1000 if (pages) 1016 1001 kfence_metadata_init = page_to_virt(pages); 1017 1002 #else ··· 1021 1006 return -EINVAL; 1022 1007 } 1023 1008 1024 - __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); 1009 + __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, 1010 + GFP_KERNEL | __GFP_SKIP_KASAN); 1025 1011 if (!__kfence_pool) 1026 1012 return -ENOMEM; 1027 1013 1028 - kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); 1014 + kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, 1015 + GFP_KERNEL | __GFP_SKIP_KASAN); 1029 1016 #endif 1030 1017 1031 1018 if (!kfence_metadata_init)
+6 -1
mm/memfd_luo.c
··· 326 326 struct memfd_luo_folio_ser *folios_ser; 327 327 struct memfd_luo_ser *ser; 328 328 329 - if (args->retrieved) 329 + /* 330 + * If retrieve was successful, nothing to do. If it failed, retrieve() 331 + * already cleaned up everything it could. So nothing to do there 332 + * either. Only need to clean up when retrieve was not called. 333 + */ 334 + if (args->retrieve_status) 330 335 return; 331 336 332 337 ser = phys_to_virt(args->serialized_data);
+5 -1
mm/mm_init.c
··· 1896 1896 for_each_node(nid) { 1897 1897 pg_data_t *pgdat; 1898 1898 1899 - if (!node_online(nid)) 1899 + /* 1900 + * If an architecture has not allocated node data for 1901 + * this node, presume the node is memoryless or offline. 1902 + */ 1903 + if (!NODE_DATA(nid)) 1900 1904 alloc_offline_node_data(nid); 1901 1905 1902 1906 pgdat = NODE_DATA(nid);
+2 -1
mm/page_alloc.c
··· 6928 6928 { 6929 6929 const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 6930 6930 const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | 6931 - __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; 6931 + __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO | 6932 + __GFP_SKIP_KASAN; 6932 6933 const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 6933 6934 6934 6935 /*
+2 -2
mm/slab.h
··· 290 290 291 291 /* Determine object index from a given position */ 292 292 static inline unsigned int __obj_to_index(const struct kmem_cache *cache, 293 - void *addr, void *obj) 293 + void *addr, const void *obj) 294 294 { 295 295 return reciprocal_divide(kasan_reset_tag(obj) - addr, 296 296 cache->reciprocal_size); 297 297 } 298 298 299 299 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 300 - const struct slab *slab, void *obj) 300 + const struct slab *slab, const void *obj) 301 301 { 302 302 if (is_kfence_address(obj)) 303 303 return 0;
+35 -16
mm/slub.c
··· 2041 2041 2042 2042 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 2043 2043 2044 - static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 2044 + static inline void mark_obj_codetag_empty(const void *obj) 2045 2045 { 2046 - struct slab *obj_exts_slab; 2046 + struct slab *obj_slab; 2047 2047 unsigned long slab_exts; 2048 2048 2049 - obj_exts_slab = virt_to_slab(obj_exts); 2050 - slab_exts = slab_obj_exts(obj_exts_slab); 2049 + obj_slab = virt_to_slab(obj); 2050 + slab_exts = slab_obj_exts(obj_slab); 2051 2051 if (slab_exts) { 2052 2052 get_slab_obj_exts(slab_exts); 2053 - unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 2054 - obj_exts_slab, obj_exts); 2055 - struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab, 2053 + unsigned int offs = obj_to_index(obj_slab->slab_cache, 2054 + obj_slab, obj); 2055 + struct slabobj_ext *ext = slab_obj_ext(obj_slab, 2056 2056 slab_exts, offs); 2057 2057 2058 2058 if (unlikely(is_codetag_empty(&ext->ref))) { ··· 2090 2090 2091 2091 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2092 2092 2093 - static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 2093 + static inline void mark_obj_codetag_empty(const void *obj) {} 2094 2094 static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; } 2095 2095 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2096 2096 struct slabobj_ext *vec, unsigned int objects) {} ··· 2196 2196 retry: 2197 2197 old_exts = READ_ONCE(slab->obj_exts); 2198 2198 handle_failed_objexts_alloc(old_exts, vec, objects); 2199 - slab_set_stride(slab, sizeof(struct slabobj_ext)); 2200 2199 2201 2200 if (new_slab) { 2202 2201 /* ··· 2210 2211 * assign slabobj_exts in parallel. In this case the existing 2211 2212 * objcg vector should be reused. 2212 2213 */ 2213 - mark_objexts_empty(vec); 2214 + mark_obj_codetag_empty(vec); 2214 2215 if (unlikely(!allow_spin)) 2215 2216 kfree_nolock(vec); 2216 2217 else ··· 2253 2254 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2254 2255 * the extension for obj_exts is expected to be NULL. 2255 2256 */ 2256 - mark_objexts_empty(obj_exts); 2257 + mark_obj_codetag_empty(obj_exts); 2257 2258 if (allow_spin) 2258 2259 kfree(obj_exts); 2259 2260 else ··· 2271 2272 void *addr; 2272 2273 unsigned long obj_exts; 2273 2274 2275 + /* Initialize stride early to avoid memory ordering issues */ 2276 + slab_set_stride(slab, sizeof(struct slabobj_ext)); 2277 + 2274 2278 if (!need_slab_obj_exts(s)) 2275 2279 return; 2276 2280 ··· 2290 2288 obj_exts |= MEMCG_DATA_OBJEXTS; 2291 2289 #endif 2292 2290 slab->obj_exts = obj_exts; 2293 - slab_set_stride(slab, sizeof(struct slabobj_ext)); 2294 2291 } else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) { 2295 2292 unsigned int offset = obj_exts_offset_in_object(s); 2296 2293 ··· 2312 2311 } 2313 2312 2314 2313 #else /* CONFIG_SLAB_OBJ_EXT */ 2314 + 2315 + static inline void mark_obj_codetag_empty(const void *obj) 2316 + { 2317 + } 2315 2318 2316 2319 static inline void init_slab_obj_exts(struct slab *slab) 2317 2320 { ··· 2788 2783 2789 2784 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf) 2790 2785 { 2786 + /* 2787 + * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its 2788 + * corresponding extension is NULL and alloc_tag_sub() will throw a 2789 + * warning, therefore replace NULL with CODETAG_EMPTY to indicate 2790 + * that the extension for this sheaf is expected to be NULL. 2791 + */ 2792 + if (s->flags & SLAB_KMALLOC) 2793 + mark_obj_codetag_empty(sheaf); 2794 + 2791 2795 kfree(sheaf); 2792 2796 2793 2797 stat(s, SHEAF_FREE); ··· 2836 2822 if (!sheaf) 2837 2823 return NULL; 2838 2824 2839 - if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) { 2825 + if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) { 2840 2826 free_empty_sheaf(s, sheaf); 2841 2827 return NULL; 2842 2828 } ··· 4589 4575 return NULL; 4590 4576 4591 4577 if (empty) { 4592 - if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) { 4578 + if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) { 4593 4579 full = empty; 4594 4580 } else { 4595 4581 /* ··· 4904 4890 static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s, 4905 4891 struct slab_sheaf *sheaf, gfp_t gfp) 4906 4892 { 4907 - int ret = 0; 4893 + gfp_t gfp_nomemalloc; 4894 + int ret; 4908 4895 4909 - ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC); 4896 + gfp_nomemalloc = gfp | __GFP_NOMEMALLOC; 4897 + if (gfp_pfmemalloc_allowed(gfp)) 4898 + gfp_nomemalloc |= __GFP_NOWARN; 4899 + 4900 + ret = refill_sheaf(s, sheaf, gfp_nomemalloc); 4910 4901 4911 4902 if (likely(!ret || !gfp_pfmemalloc_allowed(gfp))) 4912 4903 return ret;
+1
net/bluetooth/hci_sock.c
··· 2166 2166 mgmt_cleanup(sk); 2167 2167 skb_queue_purge(&sk->sk_receive_queue); 2168 2168 skb_queue_purge(&sk->sk_write_queue); 2169 + skb_queue_purge(&sk->sk_error_queue); 2169 2170 } 2170 2171 2171 2172 static const struct proto_ops hci_sock_ops = {
+1 -1
net/bluetooth/hci_sync.c
··· 4592 4592 { 4593 4593 int err; 4594 4594 4595 - if (iso_capable(hdev)) { 4595 + if (cis_capable(hdev)) { 4596 4596 /* Connected Isochronous Channels (Host Support) */ 4597 4597 err = hci_le_set_host_feature_sync(hdev, 32, 4598 4598 (iso_enabled(hdev) ? 0x01 :
+1
net/bluetooth/iso.c
··· 746 746 747 747 skb_queue_purge(&sk->sk_receive_queue); 748 748 skb_queue_purge(&sk->sk_write_queue); 749 + skb_queue_purge(&sk->sk_error_queue); 749 750 } 750 751 751 752 static void iso_sock_cleanup_listen(struct sock *parent)
+74 -29
net/bluetooth/l2cap_core.c
··· 4916 4916 goto response_unlock; 4917 4917 } 4918 4918 4919 + /* Check if Key Size is sufficient for the security level */ 4920 + if (!l2cap_check_enc_key_size(conn->hcon, pchan)) { 4921 + result = L2CAP_CR_LE_BAD_KEY_SIZE; 4922 + chan = NULL; 4923 + goto response_unlock; 4924 + } 4925 + 4919 4926 /* Check for valid dynamic CID range */ 4920 4927 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 4921 4928 result = L2CAP_CR_LE_INVALID_SCID; ··· 5058 5051 struct l2cap_chan *chan, *pchan; 5059 5052 u16 mtu, mps; 5060 5053 __le16 psm; 5061 - u8 result, len = 0; 5054 + u8 result, rsp_len = 0; 5062 5055 int i, num_scid; 5063 5056 bool defer = false; 5064 5057 5065 5058 if (!enable_ecred) 5066 5059 return -EINVAL; 5060 + 5061 + memset(pdu, 0, sizeof(*pdu)); 5067 5062 5068 5063 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5069 5064 result = L2CAP_CR_LE_INVALID_PARAMS; ··· 5074 5065 5075 5066 cmd_len -= sizeof(*req); 5076 5067 num_scid = cmd_len / sizeof(u16); 5068 + 5069 + /* Always respond with the same number of scids as in the request */ 5070 + rsp_len = cmd_len; 5077 5071 5078 5072 if (num_scid > L2CAP_ECRED_MAX_CID) { 5079 5073 result = L2CAP_CR_LE_INVALID_PARAMS; ··· 5087 5075 mps = __le16_to_cpu(req->mps); 5088 5076 5089 5077 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { 5090 - result = L2CAP_CR_LE_UNACCEPT_PARAMS; 5078 + result = L2CAP_CR_LE_INVALID_PARAMS; 5091 5079 goto response; 5092 5080 } 5093 5081 ··· 5107 5095 5108 5096 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); 5109 5097 5110 - memset(pdu, 0, sizeof(*pdu)); 5111 - 5112 5098 /* Check if we have socket listening on psm */ 5113 5099 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5114 5100 &conn->hcon->dst, LE_LINK); ··· 5119 5109 5120 5110 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 5121 5111 SMP_ALLOW_STK)) { 5122 - result = L2CAP_CR_LE_AUTHENTICATION; 5112 + result = pchan->sec_level == BT_SECURITY_MEDIUM ? 5113 + L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; 5114 + goto unlock; 5115 + } 5116 + 5117 + /* Check if the listening channel has set an output MTU then the 5118 + * requested MTU shall be less than or equal to that value. 5119 + */ 5120 + if (pchan->omtu && mtu < pchan->omtu) { 5121 + result = L2CAP_CR_LE_UNACCEPT_PARAMS; 5123 5122 goto unlock; 5124 5123 } 5125 5124 ··· 5140 5121 BT_DBG("scid[%d] 0x%4.4x", i, scid); 5141 5122 5142 5123 pdu->dcid[i] = 0x0000; 5143 - len += sizeof(*pdu->dcid); 5144 5124 5145 5125 /* Check for valid dynamic CID range */ 5146 5126 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { ··· 5206 5188 return 0; 5207 5189 5208 5190 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, 5209 - sizeof(*pdu) + len, pdu); 5191 + sizeof(*pdu) + rsp_len, pdu); 5210 5192 5211 5193 return 0; 5212 5194 } ··· 5328 5310 struct l2cap_ecred_reconf_req *req = (void *) data; 5329 5311 struct l2cap_ecred_reconf_rsp rsp; 5330 5312 u16 mtu, mps, result; 5331 - struct l2cap_chan *chan; 5313 + struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {}; 5332 5314 int i, num_scid; 5333 5315 5334 5316 if (!enable_ecred) 5335 5317 return -EINVAL; 5336 5318 5337 - if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { 5338 - result = L2CAP_CR_LE_INVALID_PARAMS; 5319 + if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5320 + result = L2CAP_RECONF_INVALID_CID; 5339 5321 goto respond; 5340 5322 } 5341 5323 ··· 5345 5327 BT_DBG("mtu %u mps %u", mtu, mps); 5346 5328 5347 5329 if (mtu < L2CAP_ECRED_MIN_MTU) { 5348 - result = L2CAP_RECONF_INVALID_MTU; 5330 + result = L2CAP_RECONF_INVALID_PARAMS; 5349 5331 goto respond; 5350 5332 } 5351 5333 5352 5334 if (mps < L2CAP_ECRED_MIN_MPS) { 5353 - result = L2CAP_RECONF_INVALID_MPS; 5335 + result = L2CAP_RECONF_INVALID_PARAMS; 5354 5336 goto respond; 5355 5337 } 5356 5338 5357 5339 cmd_len -= sizeof(*req); 5358 5340 num_scid = cmd_len / sizeof(u16); 5341 + 5342 + if (num_scid > L2CAP_ECRED_MAX_CID) { 5343 + result = L2CAP_RECONF_INVALID_PARAMS; 5344 + goto respond; 5345 + } 5346 + 5359 5347 result = L2CAP_RECONF_SUCCESS; 5360 5348 5349 + /* Check if each SCID, MTU and MPS are valid */ 5361 5350 for (i = 0; i < num_scid; i++) { 5362 5351 u16 scid; 5363 5352 5364 5353 scid = __le16_to_cpu(req->scid[i]); 5365 - if (!scid) 5366 - return -EPROTO; 5367 - 5368 - chan = __l2cap_get_chan_by_dcid(conn, scid); 5369 - if (!chan) 5370 - continue; 5371 - 5372 - /* If the MTU value is decreased for any of the included 5373 - * channels, then the receiver shall disconnect all 5374 - * included channels. 5375 - */ 5376 - if (chan->omtu > mtu) { 5377 - BT_ERR("chan %p decreased MTU %u -> %u", chan, 5378 - chan->omtu, mtu); 5379 - result = L2CAP_RECONF_INVALID_MTU; 5354 + if (!scid) { 5355 + result = L2CAP_RECONF_INVALID_CID; 5356 + goto respond; 5380 5357 } 5381 5358 5382 - chan->omtu = mtu; 5383 - chan->remote_mps = mps; 5359 + chan[i] = __l2cap_get_chan_by_dcid(conn, scid); 5360 + if (!chan[i]) { 5361 + result = L2CAP_RECONF_INVALID_CID; 5362 + goto respond; 5363 + } 5364 + 5365 + /* The MTU field shall be greater than or equal to the greatest 5366 + * current MTU size of these channels. 5367 + */ 5368 + if (chan[i]->omtu > mtu) { 5369 + BT_ERR("chan %p decreased MTU %u -> %u", chan[i], 5370 + chan[i]->omtu, mtu); 5371 + result = L2CAP_RECONF_INVALID_MTU; 5372 + goto respond; 5373 + } 5374 + 5375 + /* If more than one channel is being configured, the MPS field 5376 + * shall be greater than or equal to the current MPS size of 5377 + * each of these channels. If only one channel is being 5378 + * configured, the MPS field may be less than the current MPS 5379 + * of that channel. 5380 + */ 5381 + if (chan[i]->remote_mps >= mps && i) { 5382 + BT_ERR("chan %p decreased MPS %u -> %u", chan[i], 5383 + chan[i]->remote_mps, mps); 5384 + result = L2CAP_RECONF_INVALID_MPS; 5385 + goto respond; 5386 + } 5387 + } 5388 + 5389 + /* Commit the new MTU and MPS values after checking they are valid */ 5390 + for (i = 0; i < num_scid; i++) { 5391 + chan[i]->omtu = mtu; 5392 + chan[i]->remote_mps = mps; 5384 5393 } 5385 5394 5386 5395 respond:
+12 -4
net/bluetooth/l2cap_sock.c
··· 1029 1029 break; 1030 1030 } 1031 1031 1032 - /* Setting is not supported as it's the remote side that 1033 - * decides this. 1034 - */ 1035 - err = -EPERM; 1032 + /* Only allow setting output MTU when not connected */ 1033 + if (sk->sk_state == BT_CONNECTED) { 1034 + err = -EISCONN; 1035 + break; 1036 + } 1037 + 1038 + err = copy_safe_from_sockptr(&mtu, sizeof(mtu), optval, optlen); 1039 + if (err) 1040 + break; 1041 + 1042 + chan->omtu = mtu; 1036 1043 break; 1037 1044 1038 1045 case BT_RCVMTU: ··· 1823 1816 1824 1817 skb_queue_purge(&sk->sk_receive_queue); 1825 1818 skb_queue_purge(&sk->sk_write_queue); 1819 + skb_queue_purge(&sk->sk_error_queue); 1826 1820 } 1827 1821 1828 1822 static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
+1
net/bluetooth/sco.c
··· 470 470 471 471 skb_queue_purge(&sk->sk_receive_queue); 472 472 skb_queue_purge(&sk->sk_write_queue); 473 + skb_queue_purge(&sk->sk_error_queue); 473 474 } 474 475 475 476 static void sco_sock_cleanup_listen(struct sock *parent)
+23 -12
net/core/dev.c
··· 4822 4822 * to -1 or to their cpu id, but not to our id. 4823 4823 */ 4824 4824 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4825 + bool is_list = false; 4826 + 4825 4827 if (dev_xmit_recursion()) 4826 4828 goto recursion_alert; 4827 4829 ··· 4834 4832 HARD_TX_LOCK(dev, txq, cpu); 4835 4833 4836 4834 if (!netif_xmit_stopped(txq)) { 4835 + is_list = !!skb->next; 4836 + 4837 4837 dev_xmit_recursion_inc(); 4838 4838 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4839 4839 dev_xmit_recursion_dec(); 4840 - if (dev_xmit_complete(rc)) { 4841 - HARD_TX_UNLOCK(dev, txq); 4842 - goto out; 4843 - } 4840 + 4841 + /* GSO segments a single SKB into 4842 + * a list of frames. TCP expects error 4843 + * to mean none of the data was sent. 4844 + */ 4845 + if (is_list) 4846 + rc = NETDEV_TX_OK; 4844 4847 } 4845 4848 HARD_TX_UNLOCK(dev, txq); 4849 + if (!skb) /* xmit completed */ 4850 + goto out; 4851 + 4846 4852 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4847 4853 dev->name); 4854 + /* NETDEV_TX_BUSY or queue was stopped */ 4855 + if (!is_list) 4856 + rc = -ENETDOWN; 4848 4857 } else { 4849 4858 /* Recursion is detected! It is possible, 4850 4859 * unfortunately ··· 4863 4850 recursion_alert: 4864 4851 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4865 4852 dev->name); 4853 + rc = -ENETDOWN; 4866 4854 } 4867 4855 } 4868 4856 4869 - rc = -ENETDOWN; 4870 4857 rcu_read_unlock_bh(); 4871 4858 4872 4859 dev_core_stats_tx_dropped_inc(dev); ··· 5005 4992 5006 4993 static struct rps_dev_flow * 5007 4994 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 5008 - struct rps_dev_flow *rflow, u16 next_cpu, u32 hash, 5009 - u32 flow_id) 4995 + struct rps_dev_flow *rflow, u16 next_cpu, u32 hash) 5010 4996 { 5011 4997 if (next_cpu < nr_cpu_ids) { 5012 4998 u32 head; ··· 5016 5004 struct rps_dev_flow *tmp_rflow; 5017 5005 unsigned int tmp_cpu; 5018 5006 u16 rxq_index; 5007 + u32 flow_id; 5019 5008 int rc; 5020 5009 5021 5010 /* Should we steer this flow to a different hardware queue? */ ··· 5032 5019 if (!flow_table) 5033 5020 goto out; 5034 5021 5022 + flow_id = rfs_slot(hash, flow_table); 5035 5023 tmp_rflow = &flow_table->flows[flow_id]; 5036 5024 tmp_cpu = READ_ONCE(tmp_rflow->cpu); 5037 5025 ··· 5080 5066 struct rps_dev_flow_table *flow_table; 5081 5067 struct rps_map *map; 5082 5068 int cpu = -1; 5083 - u32 flow_id; 5084 5069 u32 tcpu; 5085 5070 u32 hash; 5086 5071 ··· 5126 5113 /* OK, now we know there is a match, 5127 5114 * we can look at the local (per receive queue) flow table 5128 5115 */ 5129 - flow_id = rfs_slot(hash, flow_table); 5130 - rflow = &flow_table->flows[flow_id]; 5116 + rflow = &flow_table->flows[rfs_slot(hash, flow_table)]; 5131 5117 tcpu = rflow->cpu; 5132 5118 5133 5119 /* ··· 5145 5133 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - 5146 5134 rflow->last_qtail)) >= 0)) { 5147 5135 tcpu = next_cpu; 5148 - rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash, 5149 - flow_id); 5136 + rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash); 5150 5137 } 5151 5138 5152 5139 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
+18 -5
net/core/skbuff.c
··· 5590 5590 5591 5591 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 5592 5592 { 5593 - bool ret; 5593 + struct socket *sock; 5594 + struct file *file; 5595 + bool ret = false; 5594 5596 5595 5597 if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) 5596 5598 return true; 5597 5599 5598 - read_lock_bh(&sk->sk_callback_lock); 5599 - ret = sk->sk_socket && sk->sk_socket->file && 5600 - file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 5601 - read_unlock_bh(&sk->sk_callback_lock); 5600 + /* The sk pointer remains valid as long as the skb is. The sk_socket and 5601 + * file pointer may become NULL if the socket is closed. Both structures 5602 + * (including file->cred) are RCU freed which means they can be accessed 5603 + * within a RCU read section. 5604 + */ 5605 + rcu_read_lock(); 5606 + sock = READ_ONCE(sk->sk_socket); 5607 + if (!sock) 5608 + goto out; 5609 + file = READ_ONCE(sock->file); 5610 + if (!file) 5611 + goto out; 5612 + ret = file_ns_capable(file, &init_user_ns, CAP_NET_RAW); 5613 + out: 5614 + rcu_read_unlock(); 5602 5615 return ret; 5603 5616 } 5604 5617
+1 -1
net/ipv4/syncookies.c
··· 203 203 bool own_req; 204 204 205 205 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 206 - NULL, &own_req); 206 + NULL, &own_req, NULL); 207 207 if (child) { 208 208 refcount_set(&req->rsk_refcnt, 1); 209 209 sock_rps_save_rxhash(child, skb);
+1 -1
net/ipv4/tcp_fastopen.c
··· 333 333 bool own_req; 334 334 335 335 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 336 - NULL, &own_req); 336 + NULL, &own_req, NULL); 337 337 if (!child) 338 338 return NULL; 339 339
+14 -4
net/ipv4/tcp_input.c
··· 4858 4858 */ 4859 4859 4860 4860 static enum skb_drop_reason tcp_sequence(const struct sock *sk, 4861 - u32 seq, u32 end_seq) 4861 + u32 seq, u32 end_seq, 4862 + const struct tcphdr *th) 4862 4863 { 4863 4864 const struct tcp_sock *tp = tcp_sk(sk); 4865 + u32 seq_limit; 4864 4866 4865 4867 if (before(end_seq, tp->rcv_wup)) 4866 4868 return SKB_DROP_REASON_TCP_OLD_SEQUENCE; 4867 4869 4868 - if (after(end_seq, tp->rcv_nxt + tcp_receive_window(tp))) { 4869 - if (after(seq, tp->rcv_nxt + tcp_receive_window(tp))) 4870 + seq_limit = tp->rcv_nxt + tcp_receive_window(tp); 4871 + if (unlikely(after(end_seq, seq_limit))) { 4872 + /* Some stacks are known to handle FIN incorrectly; allow the 4873 + * FIN to extend beyond the window and check it in detail later. 4874 + */ 4875 + if (!after(end_seq - th->fin, seq_limit)) 4876 + return SKB_NOT_DROPPED_YET; 4877 + 4878 + if (after(seq, seq_limit)) 4870 4879 return SKB_DROP_REASON_TCP_INVALID_SEQUENCE; 4871 4880 4872 4881 /* Only accept this packet if receive queue is empty. */ ··· 6388 6379 6389 6380 step1: 6390 6381 /* Step 1: check sequence number */ 6391 - reason = tcp_sequence(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 6382 + reason = tcp_sequence(sk, TCP_SKB_CB(skb)->seq, 6383 + TCP_SKB_CB(skb)->end_seq, th); 6392 6384 if (reason) { 6393 6385 /* RFC793, page 37: "In all states except SYN-SENT, all reset 6394 6386 * (RST) segments are validated by checking their SEQ-fields."
+7 -1
net/ipv4/tcp_ipv4.c
··· 1705 1705 struct request_sock *req, 1706 1706 struct dst_entry *dst, 1707 1707 struct request_sock *req_unhash, 1708 - bool *own_req) 1708 + bool *own_req, 1709 + void (*opt_child_init)(struct sock *newsk, 1710 + const struct sock *sk)) 1709 1711 { 1710 1712 struct inet_request_sock *ireq; 1711 1713 bool found_dup_sk = false; ··· 1759 1757 } 1760 1758 sk_setup_caps(newsk, dst); 1761 1759 1760 + #if IS_ENABLED(CONFIG_IPV6) 1761 + if (opt_child_init) 1762 + opt_child_init(newsk, sk); 1763 + #endif 1762 1764 tcp_ca_openreq_child(newsk, dst); 1763 1765 1764 1766 tcp_sync_mss(newsk, dst4_mtu(dst));
+1 -1
net/ipv4/tcp_minisocks.c
··· 925 925 * socket is created, wait for troubles. 926 926 */ 927 927 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 928 - req, &own_req); 928 + req, &own_req, NULL); 929 929 if (!child) 930 930 goto listen_overflow; 931 931
+1 -2
net/ipv4/udplite.c
··· 20 20 /* Designate sk as UDP-Lite socket */ 21 21 static int udplite_sk_init(struct sock *sk) 22 22 { 23 - udp_init_sock(sk); 24 23 pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, " 25 24 "please contact the netdev mailing list\n"); 26 - return 0; 25 + return udp_init_sock(sk); 27 26 } 28 27 29 28 static int udplite_rcv(struct sk_buff *skb)
+42 -56
net/ipv6/tcp_ipv6.c
··· 1312 1312 sizeof(struct inet6_skb_parm)); 1313 1313 } 1314 1314 1315 + /* Called from tcp_v4_syn_recv_sock() for v6_mapped children. */ 1316 + static void tcp_v6_mapped_child_init(struct sock *newsk, const struct sock *sk) 1317 + { 1318 + struct inet_sock *newinet = inet_sk(newsk); 1319 + struct ipv6_pinfo *newnp; 1320 + 1321 + newinet->pinet6 = newnp = tcp_inet6_sk(newsk); 1322 + newinet->ipv6_fl_list = NULL; 1323 + 1324 + memcpy(newnp, tcp_inet6_sk(sk), sizeof(struct ipv6_pinfo)); 1325 + 1326 + newnp->saddr = newsk->sk_v6_rcv_saddr; 1327 + 1328 + inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1329 + if (sk_is_mptcp(newsk)) 1330 + mptcpv6_handle_mapped(newsk, true); 1331 + newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1332 + #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 1333 + tcp_sk(newsk)->af_specific = &tcp_sock_ipv6_mapped_specific; 1334 + #endif 1335 + 1336 + newnp->ipv6_mc_list = NULL; 1337 + newnp->ipv6_ac_list = NULL; 1338 + newnp->pktoptions = NULL; 1339 + newnp->opt = NULL; 1340 + 1341 + /* tcp_v4_syn_recv_sock() has initialized newinet->mc_{index,ttl} */ 1342 + newnp->mcast_oif = newinet->mc_index; 1343 + newnp->mcast_hops = newinet->mc_ttl; 1344 + 1345 + newnp->rcv_flowinfo = 0; 1346 + if (inet6_test_bit(REPFLOW, sk)) 1347 + newnp->flow_label = 0; 1348 + } 1349 + 1315 1350 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1316 1351 struct request_sock *req, 1317 1352 struct dst_entry *dst, 1318 1353 struct request_sock *req_unhash, 1319 - bool *own_req) 1354 + bool *own_req, 1355 + void (*opt_child_init)(struct sock *newsk, 1356 + const struct sock *sk)) 1320 1357 { 1321 1358 const struct ipv6_pinfo *np = tcp_inet6_sk(sk); 1322 1359 struct inet_request_sock *ireq; ··· 1369 1332 #endif 1370 1333 struct flowi6 fl6; 1371 1334 1372 - if (skb->protocol == htons(ETH_P_IP)) { 1373 - /* 1374 - * v6 mapped 1375 - */ 1376 - 1377 - newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, 1378 - req_unhash, own_req); 1379 - 1380 - if (!newsk) 1381 - return NULL; 1382 - 1383 - newinet = inet_sk(newsk); 1384 - newinet->pinet6 = tcp_inet6_sk(newsk); 1385 - newinet->ipv6_fl_list = NULL; 1386 - 1387 - newnp = tcp_inet6_sk(newsk); 1388 - newtp = tcp_sk(newsk); 1389 - 1390 - memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1391 - 1392 - newnp->saddr = newsk->sk_v6_rcv_saddr; 1393 - 1394 - inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1395 - if (sk_is_mptcp(newsk)) 1396 - mptcpv6_handle_mapped(newsk, true); 1397 - newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1398 - #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 1399 - newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1400 - #endif 1401 - 1402 - newnp->ipv6_mc_list = NULL; 1403 - newnp->ipv6_ac_list = NULL; 1404 - newnp->pktoptions = NULL; 1405 - newnp->opt = NULL; 1406 - newnp->mcast_oif = inet_iif(skb); 1407 - newnp->mcast_hops = ip_hdr(skb)->ttl; 1408 - newnp->rcv_flowinfo = 0; 1409 - if (inet6_test_bit(REPFLOW, sk)) 1410 - newnp->flow_label = 0; 1411 - 1412 - /* 1413 - * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1414 - * here, tcp_create_openreq_child now does this for us, see the comment in 1415 - * that function for the gory details. -acme 1416 - */ 1417 - 1418 - /* It is tricky place. Until this moment IPv4 tcp 1419 - worked with IPv6 icsk.icsk_af_ops. 1420 - Sync it now. 1421 - */ 1422 - tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 1423 - 1424 - return newsk; 1425 - } 1426 - 1335 + if (skb->protocol == htons(ETH_P_IP)) 1336 + return tcp_v4_syn_recv_sock(sk, skb, req, dst, 1337 + req_unhash, own_req, 1338 + tcp_v6_mapped_child_init); 1427 1339 ireq = inet_rsk(req); 1428 1340 1429 1341 if (sk_acceptq_is_full(sk))
+1 -2
net/ipv6/udplite.c
··· 16 16 17 17 static int udplitev6_sk_init(struct sock *sk) 18 18 { 19 - udpv6_init_sock(sk); 20 19 pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, " 21 20 "please contact the netdev mailing list\n"); 22 - return 0; 21 + return udpv6_init_sock(sk); 23 22 } 24 23 25 24 static int udplitev6_rcv(struct sk_buff *skb)
+5 -2
net/ipv6/xfrm6_policy.c
··· 57 57 struct dst_entry *dst; 58 58 struct net_device *dev; 59 59 struct inet6_dev *idev; 60 + int err; 60 61 61 62 dst = xfrm6_dst_lookup(params); 62 63 if (IS_ERR(dst)) ··· 69 68 return -EHOSTUNREACH; 70 69 } 71 70 dev = idev->dev; 72 - ipv6_dev_get_saddr(dev_net(dev), dev, &params->daddr->in6, 0, 73 - &saddr->in6); 71 + err = ipv6_dev_get_saddr(dev_net(dev), dev, &params->daddr->in6, 0, 72 + &saddr->in6); 74 73 dst_release(dst); 74 + if (err) 75 + return -EHOSTUNREACH; 75 76 return 0; 76 77 } 77 78
+19 -2
net/kcm/kcmsock.c
··· 628 628 skb = txm->frag_skb; 629 629 } 630 630 631 - if (WARN_ON(!skb_shinfo(skb)->nr_frags) || 631 + if (WARN_ON_ONCE(!skb_shinfo(skb)->nr_frags) || 632 632 WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) { 633 633 ret = -EINVAL; 634 634 goto out; ··· 749 749 { 750 750 struct sock *sk = sock->sk; 751 751 struct kcm_sock *kcm = kcm_sk(sk); 752 - struct sk_buff *skb = NULL, *head = NULL; 752 + struct sk_buff *skb = NULL, *head = NULL, *frag_prev = NULL; 753 753 size_t copy, copied = 0; 754 754 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 755 755 int eor = (sock->type == SOCK_DGRAM) ? ··· 824 824 else 825 825 skb->next = tskb; 826 826 827 + frag_prev = skb; 827 828 skb = tskb; 828 829 skb->ip_summed = CHECKSUM_UNNECESSARY; 829 830 continue; ··· 933 932 934 933 out_error: 935 934 kcm_push(kcm); 935 + 936 + /* When MAX_SKB_FRAGS was reached, a new skb was allocated and 937 + * linked into the frag_list before data copy. If the copy 938 + * subsequently failed, this skb has zero frags. Remove it from 939 + * the frag_list to prevent kcm_write_msgs from later hitting 940 + * WARN_ON(!skb_shinfo(skb)->nr_frags). 941 + */ 942 + if (frag_prev && !skb_shinfo(skb)->nr_frags) { 943 + if (head == frag_prev) 944 + skb_shinfo(head)->frag_list = NULL; 945 + else 946 + frag_prev->next = NULL; 947 + kfree_skb(skb); 948 + /* Update skb as it may be saved in partial_message via goto */ 949 + skb = frag_prev; 950 + } 936 951 937 952 if (sock->type == SOCK_SEQPACKET) { 938 953 /* Wrote some bytes before encountering an
+2
net/mac80211/link.c
··· 281 281 struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; 282 282 struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; 283 283 bool use_deflink = old_links == 0; /* set for error case */ 284 + bool non_sta = sdata->vif.type != NL80211_IFTYPE_STATION; 284 285 285 286 lockdep_assert_wiphy(sdata->local->hw.wiphy); 286 287 ··· 338 337 link = links[link_id]; 339 338 ieee80211_link_init(sdata, link_id, &link->data, &link->conf); 340 339 ieee80211_link_setup(&link->data); 340 + ieee80211_set_wmm_default(&link->data, true, non_sta); 341 341 } 342 342 343 343 if (new_links == 0)
+3
net/mac80211/mesh.c
··· 1635 1635 if (!mesh_matches_local(sdata, elems)) 1636 1636 goto free; 1637 1637 1638 + if (!elems->mesh_chansw_params_ie) 1639 + goto free; 1640 + 1638 1641 ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; 1639 1642 if (!--ifmsh->chsw_ttl) 1640 1643 fwd_csa = false;
+3
net/mac80211/mlme.c
··· 7085 7085 control = le16_to_cpu(prof->control); 7086 7086 link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID; 7087 7087 7088 + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 7089 + continue; 7090 + 7088 7091 removed_links |= BIT(link_id); 7089 7092 7090 7093 /* the MAC address should not be included, but handle it */
+4 -2
net/mptcp/subflow.c
··· 808 808 struct request_sock *req, 809 809 struct dst_entry *dst, 810 810 struct request_sock *req_unhash, 811 - bool *own_req) 811 + bool *own_req, 812 + void (*opt_child_init)(struct sock *newsk, 813 + const struct sock *sk)) 812 814 { 813 815 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 814 816 struct mptcp_subflow_request_sock *subflow_req; ··· 857 855 858 856 create_child: 859 857 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 860 - req_unhash, own_req); 858 + req_unhash, own_req, opt_child_init); 861 859 862 860 if (child && *own_req) { 863 861 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
+1 -1
net/netfilter/nf_conntrack_h323_asn1.c
··· 796 796 797 797 if (ext || (son->attr & OPEN)) { 798 798 BYTE_ALIGN(bs); 799 - if (nf_h323_error_boundary(bs, len, 0)) 799 + if (nf_h323_error_boundary(bs, 2, 0)) 800 800 return H323_ERROR_BOUND; 801 801 len = get_len(bs); 802 802 if (nf_h323_error_boundary(bs, len, 0))
+38 -1
net/psp/psp_main.c
··· 166 166 { 167 167 struct udphdr *uh = udp_hdr(skb); 168 168 struct psphdr *psph = (struct psphdr *)(uh + 1); 169 + const struct sock *sk = skb->sk; 169 170 170 171 uh->dest = htons(PSP_DEFAULT_UDP_PORT); 171 - uh->source = udp_flow_src_port(net, skb, 0, 0, false); 172 + 173 + /* A bit of theory: Selection of the source port. 174 + * 175 + * We need some entropy, so that multiple flows use different 176 + * source ports for better RSS spreading at the receiver. 177 + * 178 + * We also need that all packets belonging to one TCP flow 179 + * use the same source port through their duration, 180 + * so that all these packets land in the same receive queue. 181 + * 182 + * udp_flow_src_port() is using sk_txhash, inherited from 183 + * skb_set_hash_from_sk() call in __tcp_transmit_skb(). 184 + * This field is subject to reshuffling, thanks to 185 + * sk_rethink_txhash() calls in various TCP functions. 186 + * 187 + * Instead, use sk->sk_hash which is constant through 188 + * the whole flow duration. 189 + */ 190 + if (likely(sk)) { 191 + u32 hash = sk->sk_hash; 192 + int min, max; 193 + 194 + /* These operations are cheap, no need to cache the result 195 + * in another socket field. 196 + */ 197 + inet_get_local_port_range(net, &min, &max); 198 + /* Since this is being sent on the wire obfuscate hash a bit 199 + * to minimize possibility that any useful information to an 200 + * attacker is leaked. Only upper 16 bits are relevant in the 201 + * computation for 16 bit port value because we use a 202 + * reciprocal divide. 203 + */ 204 + hash ^= hash << 16; 205 + uh->source = htons((((u64)hash * (max - min)) >> 32) + min); 206 + } else { 207 + uh->source = udp_flow_src_port(net, skb, 0, 0, false); 208 + } 172 209 uh->check = 0; 173 210 uh->len = htons(udp_len); 174 211
+3
net/rds/connection.c
··· 455 455 rcu_read_unlock(); 456 456 } 457 457 458 + /* we do not hold the socket lock here but it is safe because 459 + * fan-out is disabled when calling conn_slots_available() 460 + */ 458 461 if (conn->c_trans->conn_slots_available) 459 462 conn->c_trans->conn_slots_available(conn, false); 460 463 }
+4 -22
net/rds/tcp_listen.c
··· 59 59 static int 60 60 rds_tcp_get_peer_sport(struct socket *sock) 61 61 { 62 - union { 63 - struct sockaddr_storage storage; 64 - struct sockaddr addr; 65 - struct sockaddr_in sin; 66 - struct sockaddr_in6 sin6; 67 - } saddr; 68 - int sport; 62 + struct sock *sk = sock->sk; 69 63 70 - if (kernel_getpeername(sock, &saddr.addr) >= 0) { 71 - switch (saddr.addr.sa_family) { 72 - case AF_INET: 73 - sport = ntohs(saddr.sin.sin_port); 74 - break; 75 - case AF_INET6: 76 - sport = ntohs(saddr.sin6.sin6_port); 77 - break; 78 - default: 79 - sport = -1; 80 - } 81 - } else { 82 - sport = -1; 83 - } 64 + if (!sk) 65 + return -1; 84 66 85 - return sport; 67 + return ntohs(READ_ONCE(inet_sk(sk)->inet_dport)); 86 68 } 87 69 88 70 /* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the
+4 -2
net/smc/af_smc.c
··· 124 124 struct request_sock *req, 125 125 struct dst_entry *dst, 126 126 struct request_sock *req_unhash, 127 - bool *own_req) 127 + bool *own_req, 128 + void (*opt_child_init)(struct sock *newsk, 129 + const struct sock *sk)) 128 130 { 129 131 struct smc_sock *smc; 130 132 struct sock *child; ··· 144 142 145 143 /* passthrough to original syn recv sock fct */ 146 144 child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash, 147 - own_req); 145 + own_req, opt_child_init); 148 146 /* child must not inherit smc or its ops */ 149 147 if (child) { 150 148 rcu_assign_sk_user_data(child, NULL);
+1 -1
net/socket.c
··· 674 674 iput(SOCK_INODE(sock)); 675 675 return; 676 676 } 677 - sock->file = NULL; 677 + WRITE_ONCE(sock->file, NULL); 678 678 } 679 679 680 680 /**
+4 -2
net/tipc/name_table.c
··· 348 348 349 349 /* Return if the publication already exists */ 350 350 list_for_each_entry(_p, &sr->all_publ, all_publ) { 351 - if (_p->key == key && (!_p->sk.node || _p->sk.node == node)) { 351 + if (_p->key == key && _p->sk.ref == p->sk.ref && 352 + (!_p->sk.node || _p->sk.node == node)) { 352 353 pr_debug("Failed to bind duplicate %u,%u,%u/%u:%u/%u\n", 353 354 p->sr.type, p->sr.lower, p->sr.upper, 354 355 node, p->sk.ref, key); ··· 389 388 u32 node = sk->node; 390 389 391 390 list_for_each_entry(p, &r->all_publ, all_publ) { 392 - if (p->key != key || (node && node != p->sk.node)) 391 + if (p->key != key || p->sk.ref != sk->ref || 392 + (node && node != p->sk.node)) 393 393 continue; 394 394 list_del(&p->all_publ); 395 395 list_del(&p->local_publ);
+1 -1
net/tls/tls_sw.c
··· 2533 2533 2534 2534 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2535 2535 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2536 - cancel_delayed_work_sync(&ctx->tx_work.work); 2536 + disable_delayed_work_sync(&ctx->tx_work.work); 2537 2537 } 2538 2538 2539 2539 void tls_sw_release_resources_tx(struct sock *sk)
+12 -7
net/vmw_vsock/af_vsock.c
··· 90 90 * 91 91 * - /proc/sys/net/vsock/ns_mode (read-only) reports the current namespace's 92 92 * mode, which is set at namespace creation and immutable thereafter. 93 - * - /proc/sys/net/vsock/child_ns_mode (writable) controls what mode future 93 + * - /proc/sys/net/vsock/child_ns_mode (write-once) controls what mode future 94 94 * child namespaces will inherit when created. The initial value matches 95 95 * the namespace's own ns_mode. 96 96 * 97 97 * Changing child_ns_mode only affects newly created namespaces, not the 98 98 * current namespace or existing children. A "local" namespace cannot set 99 - * child_ns_mode to "global". At namespace creation, ns_mode is inherited 100 - * from the parent's child_ns_mode. 99 + * child_ns_mode to "global". child_ns_mode is write-once, so that it may be 100 + * configured and locked down by a namespace manager. Writing a different 101 + * value after the first write returns -EBUSY. At namespace creation, ns_mode 102 + * is inherited from the parent's child_ns_mode. 101 103 * 102 - * The init_net mode is "global" and cannot be modified. 104 + * The init_net mode is "global" and cannot be modified. The init_net 105 + * child_ns_mode is also write-once, so an init process (e.g. systemd) can 106 + * set it to "local" to ensure all new namespaces inherit local mode. 103 107 * 104 108 * The modes affect the allocation and accessibility of CIDs as follows: 105 109 * ··· 2829 2825 if (write) 2830 2826 return -EPERM; 2831 2827 2832 - net = current->nsproxy->net_ns; 2828 + net = container_of(table->data, struct net, vsock.mode); 2833 2829 2834 2830 return __vsock_net_mode_string(table, write, buffer, lenp, ppos, 2835 2831 vsock_net_mode(net), NULL); ··· 2842 2838 struct net *net; 2843 2839 int ret; 2844 2840 2845 - net = current->nsproxy->net_ns; 2841 + net = container_of(table->data, struct net, vsock.child_ns_mode); 2846 2842 2847 2843 ret = __vsock_net_mode_string(table, write, buffer, lenp, ppos, 2848 2844 vsock_net_child_mode(net), &new_mode); ··· 2857 2853 new_mode == VSOCK_NET_MODE_GLOBAL) 2858 2854 return -EPERM; 2859 2855 2860 - vsock_net_set_child_mode(net, new_mode); 2856 + if (!vsock_net_set_child_mode(net, new_mode)) 2857 + return -EBUSY; 2861 2858 } 2862 2859 2863 2860 return 0;
+1
net/wireless/core.c
··· 1211 1211 /* this has nothing to do now but make sure it's gone */ 1212 1212 cancel_work_sync(&rdev->wiphy_work); 1213 1213 1214 + cancel_work_sync(&rdev->rfkill_block); 1214 1215 cancel_work_sync(&rdev->conn_work); 1215 1216 flush_work(&rdev->event_work); 1216 1217 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
+2 -2
net/wireless/radiotap.c
··· 239 239 default: 240 240 if (!iterator->current_namespace || 241 241 iterator->_arg_index >= iterator->current_namespace->n_bits) { 242 - if (iterator->current_namespace == &radiotap_ns) 243 - return -ENOENT; 244 242 align = 0; 245 243 } else { 246 244 align = iterator->current_namespace->align_size[iterator->_arg_index].align; 247 245 size = iterator->current_namespace->align_size[iterator->_arg_index].size; 248 246 } 249 247 if (!align) { 248 + if (iterator->current_namespace == &radiotap_ns) 249 + return -ENOENT; 250 250 /* skip all subsequent data */ 251 251 iterator->_arg = iterator->_next_ns_data; 252 252 /* give up on this namespace */
+1 -1
net/wireless/wext-compat.c
··· 683 683 684 684 idx = erq->flags & IW_ENCODE_INDEX; 685 685 if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 686 - if (idx < 4 || idx > 5) { 686 + if (idx < 5 || idx > 6) { 687 687 idx = wdev->wext.default_mgmt_key; 688 688 if (idx < 0) 689 689 return -EINVAL;
+1 -1
net/xfrm/espintcp.c
··· 536 536 sk->sk_prot = &tcp_prot; 537 537 barrier(); 538 538 539 - cancel_work_sync(&ctx->work); 539 + disable_work_sync(&ctx->work); 540 540 strp_done(&ctx->strp); 541 541 542 542 skb_queue_purge(&ctx->out_queue);
+11 -1
net/xfrm/xfrm_device.c
··· 544 544 return NOTIFY_DONE; 545 545 } 546 546 547 + static int xfrm_dev_unregister(struct net_device *dev) 548 + { 549 + xfrm_dev_state_flush(dev_net(dev), dev, true); 550 + xfrm_dev_policy_flush(dev_net(dev), dev, true); 551 + 552 + return NOTIFY_DONE; 553 + } 554 + 547 555 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 548 556 { 549 557 struct net_device *dev = netdev_notifier_info_to_dev(ptr); ··· 564 556 return xfrm_api_check(dev); 565 557 566 558 case NETDEV_DOWN: 567 - case NETDEV_UNREGISTER: 568 559 return xfrm_dev_down(dev); 560 + 561 + case NETDEV_UNREGISTER: 562 + return xfrm_dev_unregister(dev); 569 563 } 570 564 return NOTIFY_DONE; 571 565 }
+9 -2
net/xfrm/xfrm_policy.c
··· 3801 3801 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 3802 3802 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 3803 3803 struct xfrm_tmpl **tpp = tp; 3804 + int i, k = 0; 3804 3805 int ti = 0; 3805 - int i, k; 3806 3806 3807 3807 sp = skb_sec_path(skb); 3808 3808 if (!sp) ··· 3828 3828 tpp = stp; 3829 3829 } 3830 3830 3831 + if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET && sp == &dummy) 3832 + /* This policy template was already checked by HW 3833 + * and secpath was removed in __xfrm_policy_check2. 3834 + */ 3835 + goto out; 3836 + 3831 3837 /* For each tunnel xfrm, find the first matching tmpl. 3832 3838 * For each tmpl before that, find corresponding xfrm. 3833 3839 * Order is _important_. Later we will implement ··· 3843 3837 * verified to allow them to be skipped in future policy 3844 3838 * checks (e.g. nested tunnels). 3845 3839 */ 3846 - for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 3840 + for (i = xfrm_nr - 1; i >= 0; i--) { 3847 3841 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id); 3848 3842 if (k < 0) { 3849 3843 if (k < -1) ··· 3859 3853 goto reject; 3860 3854 } 3861 3855 3856 + out: 3862 3857 xfrm_pols_put(pols, npols); 3863 3858 sp->verified_cnt = k; 3864 3859
+89 -42
rust/kernel/io.rs
··· 139 139 140 140 /// Internal helper macros used to invoke C MMIO read functions. 141 141 /// 142 - /// This macro is intended to be used by higher-level MMIO access macros (define_read) and provides 143 - /// a unified expansion for infallible vs. fallible read semantics. It emits a direct call into the 144 - /// corresponding C helper and performs the required cast to the Rust return type. 142 + /// This macro is intended to be used by higher-level MMIO access macros (io_define_read) and 143 + /// provides a unified expansion for infallible vs. fallible read semantics. It emits a direct call 144 + /// into the corresponding C helper and performs the required cast to the Rust return type. 145 145 /// 146 146 /// # Parameters 147 147 /// ··· 166 166 167 167 /// Internal helper macros used to invoke C MMIO write functions. 168 168 /// 169 - /// This macro is intended to be used by higher-level MMIO access macros (define_write) and provides 170 - /// a unified expansion for infallible vs. fallible write semantics. It emits a direct call into the 171 - /// corresponding C helper and performs the required cast to the Rust return type. 169 + /// This macro is intended to be used by higher-level MMIO access macros (io_define_write) and 170 + /// provides a unified expansion for infallible vs. fallible write semantics. It emits a direct call 171 + /// into the corresponding C helper and performs the required cast to the Rust return type. 172 172 /// 173 173 /// # Parameters 174 174 /// ··· 193 193 }}; 194 194 } 195 195 196 - macro_rules! define_read { 196 + /// Generates an accessor method for reading from an I/O backend. 197 + /// 198 + /// This macro reduces boilerplate by automatically generating either compile-time bounds-checked 199 + /// (infallible) or runtime bounds-checked (fallible) read methods. It abstracts the address 200 + /// calculation and bounds checking, and delegates the actual I/O read operation to a specified 201 + /// helper macro, making it generic over different I/O backends. 202 + /// 203 + /// # Parameters 204 + /// 205 + /// * `infallible` / `fallible` - Determines the bounds-checking strategy. `infallible` relies on 206 + /// `IoKnownSize` for compile-time checks and returns the value directly. `fallible` performs 207 + /// runtime checks against `maxsize()` and returns a `Result<T>`. 208 + /// * `$(#[$attr:meta])*` - Optional attributes to apply to the generated method (e.g., 209 + /// `#[cfg(CONFIG_64BIT)]` or inline directives). 210 + /// * `$vis:vis` - The visibility of the generated method (e.g., `pub`). 211 + /// * `$name:ident` / `$try_name:ident` - The name of the generated method (e.g., `read32`, 212 + /// `try_read8`). 213 + /// * `$call_macro:ident` - The backend-specific helper macro used to emit the actual I/O call 214 + /// (e.g., `call_mmio_read`). 215 + /// * `$c_fn:ident` - The backend-specific C function or identifier to be passed into the 216 + /// `$call_macro`. 217 + /// * `$type_name:ty` - The Rust type of the value being read (e.g., `u8`, `u32`). 218 + #[macro_export] 219 + macro_rules! io_define_read { 197 220 (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) -> 198 221 $type_name:ty) => { 199 222 /// Read IO data from a given offset known at compile time. ··· 249 226 } 250 227 }; 251 228 } 252 - pub(crate) use define_read; 229 + pub use io_define_read; 253 230 254 - macro_rules! define_write { 231 + /// Generates an accessor method for writing to an I/O backend. 232 + /// 233 + /// This macro reduces boilerplate by automatically generating either compile-time bounds-checked 234 + /// (infallible) or runtime bounds-checked (fallible) write methods. It abstracts the address 235 + /// calculation and bounds checking, and delegates the actual I/O write operation to a specified 236 + /// helper macro, making it generic over different I/O backends. 237 + /// 238 + /// # Parameters 239 + /// 240 + /// * `infallible` / `fallible` - Determines the bounds-checking strategy. `infallible` relies on 241 + /// `IoKnownSize` for compile-time checks and returns `()`. `fallible` performs runtime checks 242 + /// against `maxsize()` and returns a `Result`. 243 + /// * `$(#[$attr:meta])*` - Optional attributes to apply to the generated method (e.g., 244 + /// `#[cfg(CONFIG_64BIT)]` or inline directives). 245 + /// * `$vis:vis` - The visibility of the generated method (e.g., `pub`). 246 + /// * `$name:ident` / `$try_name:ident` - The name of the generated method (e.g., `write32`, 247 + /// `try_write8`). 248 + /// * `$call_macro:ident` - The backend-specific helper macro used to emit the actual I/O call 249 + /// (e.g., `call_mmio_write`). 250 + /// * `$c_fn:ident` - The backend-specific C function or identifier to be passed into the 251 + /// `$call_macro`. 252 + /// * `$type_name:ty` - The Rust type of the value being written (e.g., `u8`, `u32`). Note the use 253 + /// of `<-` before the type to denote a write operation. 254 + #[macro_export] 255 + macro_rules! io_define_write { 255 256 (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) <- 256 257 $type_name:ty) => { 257 258 /// Write IO data from a given offset known at compile time. ··· 306 259 } 307 260 }; 308 261 } 309 - pub(crate) use define_write; 262 + pub use io_define_write; 310 263 311 264 /// Checks whether an access of type `U` at the given `offset` 312 265 /// is valid within this region. ··· 556 509 self.0.maxsize() 557 510 } 558 511 559 - define_read!(fallible, try_read8, call_mmio_read(readb) -> u8); 560 - define_read!(fallible, try_read16, call_mmio_read(readw) -> u16); 561 - define_read!(fallible, try_read32, call_mmio_read(readl) -> u32); 562 - define_read!( 512 + io_define_read!(fallible, try_read8, call_mmio_read(readb) -> u8); 513 + io_define_read!(fallible, try_read16, call_mmio_read(readw) -> u16); 514 + io_define_read!(fallible, try_read32, call_mmio_read(readl) -> u32); 515 + io_define_read!( 563 516 fallible, 564 517 #[cfg(CONFIG_64BIT)] 565 518 try_read64, 566 519 call_mmio_read(readq) -> u64 567 520 ); 568 521 569 - define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8); 570 - define_write!(fallible, try_write16, call_mmio_write(writew) <- u16); 571 - define_write!(fallible, try_write32, call_mmio_write(writel) <- u32); 572 - define_write!( 522 + io_define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8); 523 + io_define_write!(fallible, try_write16, call_mmio_write(writew) <- u16); 524 + io_define_write!(fallible, try_write32, call_mmio_write(writel) <- u32); 525 + io_define_write!( 573 526 fallible, 574 527 #[cfg(CONFIG_64BIT)] 575 528 try_write64, 576 529 call_mmio_write(writeq) <- u64 577 530 ); 578 531 579 - define_read!(infallible, read8, call_mmio_read(readb) -> u8); 580 - define_read!(infallible, read16, call_mmio_read(readw) -> u16); 581 - define_read!(infallible, read32, call_mmio_read(readl) -> u32); 582 - define_read!( 532 + io_define_read!(infallible, read8, call_mmio_read(readb) -> u8); 533 + io_define_read!(infallible, read16, call_mmio_read(readw) -> u16); 534 + io_define_read!(infallible, read32, call_mmio_read(readl) -> u32); 535 + io_define_read!( 583 536 infallible, 584 537 #[cfg(CONFIG_64BIT)] 585 538 read64, 586 539 call_mmio_read(readq) -> u64 587 540 ); 588 541 589 - define_write!(infallible, write8, call_mmio_write(writeb) <- u8); 590 - define_write!(infallible, write16, call_mmio_write(writew) <- u16); 591 - define_write!(infallible, write32, call_mmio_write(writel) <- u32); 592 - define_write!( 542 + io_define_write!(infallible, write8, call_mmio_write(writeb) <- u8); 543 + io_define_write!(infallible, write16, call_mmio_write(writew) <- u16); 544 + io_define_write!(infallible, write32, call_mmio_write(writel) <- u32); 545 + io_define_write!( 593 546 infallible, 594 547 #[cfg(CONFIG_64BIT)] 595 548 write64, ··· 613 566 unsafe { &*core::ptr::from_ref(raw).cast() } 614 567 } 615 568 616 - define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8); 617 - define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16); 618 - define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32); 619 - define_read!( 569 + io_define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8); 570 + io_define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16); 571 + io_define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32); 572 + io_define_read!( 620 573 infallible, 621 574 #[cfg(CONFIG_64BIT)] 622 575 pub read64_relaxed, 623 576 call_mmio_read(readq_relaxed) -> u64 624 577 ); 625 578 626 - define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8); 627 - define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16); 628 - define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32); 629 - define_read!( 579 + io_define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8); 580 + io_define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16); 581 + io_define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32); 582 + io_define_read!( 630 583 fallible, 631 584 #[cfg(CONFIG_64BIT)] 632 585 pub try_read64_relaxed, 633 586 call_mmio_read(readq_relaxed) -> u64 634 587 ); 635 588 636 - define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8); 637 - define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16); 638 - define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32); 639 - define_write!( 589 + io_define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8); 590 + io_define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16); 591 + io_define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32); 592 + io_define_write!( 640 593 infallible, 641 594 #[cfg(CONFIG_64BIT)] 642 595 pub write64_relaxed, 643 596 call_mmio_write(writeq_relaxed) <- u64 644 597 ); 645 598 646 - define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8); 647 - define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16); 648 - define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32); 649 - define_write!( 599 + io_define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8); 600 + io_define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16); 601 + io_define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32); 602 + io_define_write!( 650 603 fallible, 651 604 #[cfg(CONFIG_64BIT)] 652 605 pub try_write64_relaxed,
+12 -12
rust/kernel/pci/io.rs
··· 8 8 device, 9 9 devres::Devres, 10 10 io::{ 11 - define_read, 12 - define_write, 11 + io_define_read, 12 + io_define_write, 13 13 Io, 14 14 IoCapable, 15 15 IoKnownSize, ··· 88 88 /// Internal helper macros used to invoke C PCI configuration space read functions. 89 89 /// 90 90 /// This macro is intended to be used by higher-level PCI configuration space access macros 91 - /// (define_read) and provides a unified expansion for infallible vs. fallible read semantics. It 91 + /// (io_define_read) and provides a unified expansion for infallible vs. fallible read semantics. It 92 92 /// emits a direct call into the corresponding C helper and performs the required cast to the Rust 93 93 /// return type. 94 94 /// ··· 117 117 /// Internal helper macros used to invoke C PCI configuration space write functions. 118 118 /// 119 119 /// This macro is intended to be used by higher-level PCI configuration space access macros 120 - /// (define_write) and provides a unified expansion for infallible vs. fallible read semantics. It 121 - /// emits a direct call into the corresponding C helper and performs the required cast to the Rust 122 - /// return type. 120 + /// (io_define_write) and provides a unified expansion for infallible vs. fallible read semantics. 121 + /// It emits a direct call into the corresponding C helper and performs the required cast to the 122 + /// Rust return type. 123 123 /// 124 124 /// # Parameters 125 125 /// ··· 163 163 // PCI configuration space does not support fallible operations. 164 164 // The default implementations from the Io trait are not used. 165 165 166 - define_read!(infallible, read8, call_config_read(pci_read_config_byte) -> u8); 167 - define_read!(infallible, read16, call_config_read(pci_read_config_word) -> u16); 168 - define_read!(infallible, read32, call_config_read(pci_read_config_dword) -> u32); 166 + io_define_read!(infallible, read8, call_config_read(pci_read_config_byte) -> u8); 167 + io_define_read!(infallible, read16, call_config_read(pci_read_config_word) -> u16); 168 + io_define_read!(infallible, read32, call_config_read(pci_read_config_dword) -> u32); 169 169 170 - define_write!(infallible, write8, call_config_write(pci_write_config_byte) <- u8); 171 - define_write!(infallible, write16, call_config_write(pci_write_config_word) <- u16); 172 - define_write!(infallible, write32, call_config_write(pci_write_config_dword) <- u32); 170 + io_define_write!(infallible, write8, call_config_write(pci_write_config_byte) <- u8); 171 + io_define_write!(infallible, write16, call_config_write(pci_write_config_word) <- u16); 172 + io_define_write!(infallible, write32, call_config_write(pci_write_config_dword) <- u32); 173 173 } 174 174 175 175 impl<'a, S: ConfigSpaceKind> IoKnownSize for ConfigSpace<'a, S> {
+413
sound/soc/amd/acp/amd-acp63-acpi-match.c
··· 30 30 .group_id = 1 31 31 }; 32 32 33 + static const struct snd_soc_acpi_endpoint spk_2_endpoint = { 34 + .num = 0, 35 + .aggregated = 1, 36 + .group_position = 2, 37 + .group_id = 1 38 + }; 39 + 40 + static const struct snd_soc_acpi_endpoint spk_3_endpoint = { 41 + .num = 0, 42 + .aggregated = 1, 43 + .group_position = 3, 44 + .group_id = 1 45 + }; 46 + 33 47 static const struct snd_soc_acpi_adr_device rt711_rt1316_group_adr[] = { 34 48 { 35 49 .adr = 0x000030025D071101ull, ··· 117 103 } 118 104 }; 119 105 106 + static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { 107 + { /* Jack Playback Endpoint */ 108 + .num = 0, 109 + .aggregated = 0, 110 + .group_position = 0, 111 + .group_id = 0, 112 + }, 113 + { /* DMIC Capture Endpoint */ 114 + .num = 1, 115 + .aggregated = 0, 116 + .group_position = 0, 117 + .group_id = 0, 118 + }, 119 + { /* Jack Capture Endpoint */ 120 + .num = 2, 121 + .aggregated = 0, 122 + .group_position = 0, 123 + .group_id = 0, 124 + }, 125 + { /* Speaker Playback Endpoint */ 126 + .num = 3, 127 + .aggregated = 0, 128 + .group_position = 0, 129 + .group_id = 0, 130 + }, 131 + }; 132 + 133 + static const struct snd_soc_acpi_adr_device cs35l56x4_l1u3210_adr[] = { 134 + { 135 + .adr = 0x00013301FA355601ull, 136 + .num_endpoints = 1, 137 + .endpoints = &spk_l_endpoint, 138 + .name_prefix = "AMP1" 139 + }, 140 + { 141 + .adr = 0x00013201FA355601ull, 142 + .num_endpoints = 1, 143 + .endpoints = &spk_r_endpoint, 144 + .name_prefix = "AMP2" 145 + }, 146 + { 147 + .adr = 0x00013101FA355601ull, 148 + .num_endpoints = 1, 149 + .endpoints = &spk_2_endpoint, 150 + .name_prefix = "AMP3" 151 + }, 152 + { 153 + .adr = 0x00013001FA355601ull, 154 + .num_endpoints = 1, 155 + .endpoints = &spk_3_endpoint, 156 + .name_prefix = "AMP4" 157 + }, 158 + }; 159 + 160 + static const struct snd_soc_acpi_adr_device cs35l63x2_l0u01_adr[] = { 161 + { 162 + .adr = 0x00003001FA356301ull, 163 + .num_endpoints = 1, 164 + .endpoints = &spk_l_endpoint, 165 + .name_prefix = "AMP1" 166 + }, 167 + { 168 + .adr = 0x00003101FA356301ull, 169 + .num_endpoints = 1, 170 + .endpoints = &spk_r_endpoint, 171 + .name_prefix = "AMP2" 172 + }, 173 + }; 174 + 175 + static const struct snd_soc_acpi_adr_device cs35l63x2_l1u01_adr[] = { 176 + { 177 + .adr = 0x00013001FA356301ull, 178 + .num_endpoints = 1, 179 + .endpoints = &spk_l_endpoint, 180 + .name_prefix = "AMP1" 181 + }, 182 + { 183 + .adr = 0x00013101FA356301ull, 184 + .num_endpoints = 1, 185 + .endpoints = &spk_r_endpoint, 186 + .name_prefix = "AMP2" 187 + }, 188 + }; 189 + 190 + static const struct snd_soc_acpi_adr_device cs35l63x2_l1u13_adr[] = { 191 + { 192 + .adr = 0x00013101FA356301ull, 193 + .num_endpoints = 1, 194 + .endpoints = &spk_l_endpoint, 195 + .name_prefix = "AMP1" 196 + }, 197 + { 198 + .adr = 0x00013301FA356301ull, 199 + .num_endpoints = 1, 200 + .endpoints = &spk_r_endpoint, 201 + .name_prefix = "AMP2" 202 + }, 203 + }; 204 + 205 + static const struct snd_soc_acpi_adr_device cs35l63x4_l0u0246_adr[] = { 206 + { 207 + .adr = 0x00003001FA356301ull, 208 + .num_endpoints = 1, 209 + .endpoints = &spk_l_endpoint, 210 + .name_prefix = "AMP1" 211 + }, 212 + { 213 + .adr = 0x00003201FA356301ull, 214 + .num_endpoints = 1, 215 + .endpoints = &spk_r_endpoint, 216 + .name_prefix = "AMP2" 217 + }, 218 + { 219 + .adr = 0x00003401FA356301ull, 220 + .num_endpoints = 1, 221 + .endpoints = &spk_2_endpoint, 222 + .name_prefix = "AMP3" 223 + }, 224 + { 225 + .adr = 0x00003601FA356301ull, 226 + .num_endpoints = 1, 227 + .endpoints = &spk_3_endpoint, 228 + .name_prefix = "AMP4" 229 + }, 230 + }; 231 + 232 + static const struct snd_soc_acpi_adr_device cs42l43_l0u0_adr[] = { 233 + { 234 + .adr = 0x00003001FA424301ull, 235 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), 236 + .endpoints = cs42l43_endpoints, 237 + .name_prefix = "cs42l43" 238 + } 239 + }; 240 + 241 + static const struct snd_soc_acpi_adr_device cs42l43_l0u1_adr[] = { 242 + { 243 + .adr = 0x00003101FA424301ull, 244 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), 245 + .endpoints = cs42l43_endpoints, 246 + .name_prefix = "cs42l43" 247 + } 248 + }; 249 + 250 + static const struct snd_soc_acpi_adr_device cs42l43b_l0u1_adr[] = { 251 + { 252 + .adr = 0x00003101FA2A3B01ull, 253 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), 254 + .endpoints = cs42l43_endpoints, 255 + .name_prefix = "cs42l43" 256 + } 257 + }; 258 + 259 + static const struct snd_soc_acpi_adr_device cs42l43_l1u0_cs35l56x4_l1u0123_adr[] = { 260 + { 261 + .adr = 0x00013001FA424301ull, 262 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), 263 + .endpoints = cs42l43_endpoints, 264 + .name_prefix = "cs42l43" 265 + }, 266 + { 267 + .adr = 0x00013001FA355601ull, 268 + .num_endpoints = 1, 269 + .endpoints = &spk_l_endpoint, 270 + .name_prefix = "AMP1" 271 + }, 272 + { 273 + .adr = 0x00013101FA355601ull, 274 + .num_endpoints = 1, 275 + .endpoints = &spk_r_endpoint, 276 + .name_prefix = "AMP2" 277 + }, 278 + { 279 + .adr = 0x00013201FA355601ull, 280 + .num_endpoints = 1, 281 + .endpoints = &spk_2_endpoint, 282 + .name_prefix = "AMP3" 283 + }, 284 + { 285 + .adr = 0x00013301FA355601ull, 286 + .num_endpoints = 1, 287 + .endpoints = &spk_3_endpoint, 288 + .name_prefix = "AMP4" 289 + }, 290 + }; 291 + 292 + static const struct snd_soc_acpi_adr_device cs42l45_l0u0_adr[] = { 293 + { 294 + .adr = 0x00003001FA424501ull, 295 + /* Re-use endpoints, but cs42l45 has no speaker */ 296 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints) - 1, 297 + .endpoints = cs42l43_endpoints, 298 + .name_prefix = "cs42l45" 299 + } 300 + }; 301 + 302 + static const struct snd_soc_acpi_adr_device cs42l45_l1u0_adr[] = { 303 + { 304 + .adr = 0x00013001FA424501ull, 305 + /* Re-use endpoints, but cs42l45 has no speaker */ 306 + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints) - 1, 307 + .endpoints = cs42l43_endpoints, 308 + .name_prefix = "cs42l45" 309 + } 310 + }; 311 + 312 + static const struct snd_soc_acpi_link_adr acp63_cs35l56x4_l1u3210[] = { 313 + { 314 + .mask = BIT(1), 315 + .num_adr = ARRAY_SIZE(cs35l56x4_l1u3210_adr), 316 + .adr_d = cs35l56x4_l1u3210_adr, 317 + }, 318 + {} 319 + }; 320 + 321 + static const struct snd_soc_acpi_link_adr acp63_cs35l63x4_l0u0246[] = { 322 + { 323 + .mask = BIT(0), 324 + .num_adr = ARRAY_SIZE(cs35l63x4_l0u0246_adr), 325 + .adr_d = cs35l63x4_l0u0246_adr, 326 + }, 327 + {} 328 + }; 329 + 330 + static const struct snd_soc_acpi_link_adr acp63_cs42l43_l0u1[] = { 331 + { 332 + .mask = BIT(0), 333 + .num_adr = ARRAY_SIZE(cs42l43_l0u1_adr), 334 + .adr_d = cs42l43_l0u1_adr, 335 + }, 336 + {} 337 + }; 338 + 339 + static const struct snd_soc_acpi_link_adr acp63_cs42l43b_l0u1[] = { 340 + { 341 + .mask = BIT(0), 342 + .num_adr = ARRAY_SIZE(cs42l43b_l0u1_adr), 343 + .adr_d = cs42l43b_l0u1_adr, 344 + }, 345 + {} 346 + }; 347 + 348 + static const struct snd_soc_acpi_link_adr acp63_cs42l43_l0u0_cs35l56x4_l1u3210[] = { 349 + { 350 + .mask = BIT(0), 351 + .num_adr = ARRAY_SIZE(cs42l43_l0u0_adr), 352 + .adr_d = cs42l43_l0u0_adr, 353 + }, 354 + { 355 + .mask = BIT(1), 356 + .num_adr = ARRAY_SIZE(cs35l56x4_l1u3210_adr), 357 + .adr_d = cs35l56x4_l1u3210_adr, 358 + }, 359 + {} 360 + }; 361 + 362 + static const struct snd_soc_acpi_link_adr acp63_cs42l43_l1u0_cs35l56x4_l1u0123[] = { 363 + { 364 + .mask = BIT(1), 365 + .num_adr = ARRAY_SIZE(cs42l43_l1u0_cs35l56x4_l1u0123_adr), 366 + .adr_d = cs42l43_l1u0_cs35l56x4_l1u0123_adr, 367 + }, 368 + {} 369 + }; 370 + 371 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l0u0[] = { 372 + { 373 + .mask = BIT(0), 374 + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), 375 + .adr_d = cs42l45_l0u0_adr, 376 + }, 377 + {} 378 + }; 379 + 380 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l0u0_cs35l63x2_l1u01[] = { 381 + { 382 + .mask = BIT(0), 383 + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), 384 + .adr_d = cs42l45_l0u0_adr, 385 + }, 386 + { 387 + .mask = BIT(1), 388 + .num_adr = ARRAY_SIZE(cs35l63x2_l1u01_adr), 389 + .adr_d = cs35l63x2_l1u01_adr, 390 + }, 391 + {} 392 + }; 393 + 394 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l0u0_cs35l63x2_l1u13[] = { 395 + { 396 + .mask = BIT(0), 397 + .num_adr = ARRAY_SIZE(cs42l45_l0u0_adr), 398 + .adr_d = cs42l45_l0u0_adr, 399 + }, 400 + { 401 + .mask = BIT(1), 402 + .num_adr = ARRAY_SIZE(cs35l63x2_l1u13_adr), 403 + .adr_d = cs35l63x2_l1u13_adr, 404 + }, 405 + {} 406 + }; 407 + 408 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l1u0[] = { 409 + { 410 + .mask = BIT(1), 411 + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), 412 + .adr_d = cs42l45_l1u0_adr, 413 + }, 414 + {} 415 + }; 416 + 417 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l1u0_cs35l63x2_l0u01[] = { 418 + { 419 + .mask = BIT(1), 420 + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), 421 + .adr_d = cs42l45_l1u0_adr, 422 + }, 423 + { 424 + .mask = BIT(0), 425 + .num_adr = ARRAY_SIZE(cs35l63x2_l0u01_adr), 426 + .adr_d = cs35l63x2_l0u01_adr, 427 + }, 428 + {} 429 + }; 430 + 431 + static const struct snd_soc_acpi_link_adr acp63_cs42l45_l1u0_cs35l63x4_l0u0246[] = { 432 + { 433 + .mask = BIT(1), 434 + .num_adr = ARRAY_SIZE(cs42l45_l1u0_adr), 435 + .adr_d = cs42l45_l1u0_adr, 436 + }, 437 + { 438 + .mask = BIT(0), 439 + .num_adr = ARRAY_SIZE(cs35l63x4_l0u0246_adr), 440 + .adr_d = cs35l63x4_l0u0246_adr, 441 + }, 442 + {} 443 + }; 444 + 120 445 static const struct snd_soc_acpi_link_adr acp63_rt722_only[] = { 121 446 { 122 447 .mask = BIT(0), ··· 486 133 { 487 134 .link_mask = BIT(0) | BIT(1), 488 135 .links = acp63_4_in_1_sdca, 136 + .drv_name = "amd_sdw", 137 + }, 138 + { 139 + .link_mask = BIT(0) | BIT(1), 140 + .links = acp63_cs42l43_l0u0_cs35l56x4_l1u3210, 141 + .drv_name = "amd_sdw", 142 + }, 143 + { 144 + .link_mask = BIT(0) | BIT(1), 145 + .links = acp63_cs42l45_l1u0_cs35l63x4_l0u0246, 146 + .drv_name = "amd_sdw", 147 + }, 148 + { 149 + .link_mask = BIT(0) | BIT(1), 150 + .links = acp63_cs42l45_l0u0_cs35l63x2_l1u01, 151 + .drv_name = "amd_sdw", 152 + }, 153 + { 154 + .link_mask = BIT(0) | BIT(1), 155 + .links = acp63_cs42l45_l0u0_cs35l63x2_l1u13, 156 + .drv_name = "amd_sdw", 157 + }, 158 + { 159 + .link_mask = BIT(0) | BIT(1), 160 + .links = acp63_cs42l45_l1u0_cs35l63x2_l0u01, 161 + .drv_name = "amd_sdw", 162 + }, 163 + { 164 + .link_mask = BIT(1), 165 + .links = acp63_cs42l43_l1u0_cs35l56x4_l1u0123, 166 + .drv_name = "amd_sdw", 167 + }, 168 + { 169 + .link_mask = BIT(1), 170 + .links = acp63_cs35l56x4_l1u3210, 171 + .drv_name = "amd_sdw", 172 + }, 173 + { 174 + .link_mask = BIT(0), 175 + .links = acp63_cs35l63x4_l0u0246, 176 + .drv_name = "amd_sdw", 177 + }, 178 + { 179 + .link_mask = BIT(0), 180 + .links = acp63_cs42l43_l0u1, 181 + .drv_name = "amd_sdw", 182 + }, 183 + { 184 + .link_mask = BIT(0), 185 + .links = acp63_cs42l43b_l0u1, 186 + .drv_name = "amd_sdw", 187 + }, 188 + { 189 + .link_mask = BIT(0), 190 + .links = acp63_cs42l45_l0u0, 191 + .drv_name = "amd_sdw", 192 + }, 193 + { 194 + .link_mask = BIT(1), 195 + .links = acp63_cs42l45_l1u0, 489 196 .drv_name = "amd_sdw", 490 197 }, 491 198 {},
+7
sound/soc/amd/yc/acp6x-mach.c
··· 710 710 DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK BM1503CDA"), 711 711 } 712 712 }, 713 + { 714 + .driver_data = &acp6x_card, 715 + .matches = { 716 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), 717 + DMI_MATCH(DMI_BOARD_NAME, "PM1503CDA"), 718 + } 719 + }, 713 720 {} 714 721 }; 715 722
+15 -1
sound/soc/codecs/cs35l56-shared.c
··· 26 26 27 27 #include "cs35l56.h" 28 28 29 - static const struct reg_sequence cs35l56_patch[] = { 29 + static const struct reg_sequence cs35l56_asp_patch[] = { 30 30 /* 31 31 * Firmware can change these to non-defaults to satisfy SDCA. 32 32 * Ensure that they are at known defaults. ··· 43 43 { CS35L56_ASP1TX2_INPUT, 0x00000000 }, 44 44 { CS35L56_ASP1TX3_INPUT, 0x00000000 }, 45 45 { CS35L56_ASP1TX4_INPUT, 0x00000000 }, 46 + }; 47 + 48 + int cs35l56_set_asp_patch(struct cs35l56_base *cs35l56_base) 49 + { 50 + return regmap_register_patch(cs35l56_base->regmap, cs35l56_asp_patch, 51 + ARRAY_SIZE(cs35l56_asp_patch)); 52 + } 53 + EXPORT_SYMBOL_NS_GPL(cs35l56_set_asp_patch, "SND_SOC_CS35L56_SHARED"); 54 + 55 + static const struct reg_sequence cs35l56_patch[] = { 56 + /* 57 + * Firmware can change these to non-defaults to satisfy SDCA. 58 + * Ensure that they are at known defaults. 59 + */ 46 60 { CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 }, 47 61 { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 }, 48 62 { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 },
+10 -2
sound/soc/codecs/cs35l56.c
··· 348 348 return wm_adsp_event(w, kcontrol, event); 349 349 } 350 350 351 + static int cs35l56_asp_dai_probe(struct snd_soc_dai *codec_dai) 352 + { 353 + struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component); 354 + 355 + return cs35l56_set_asp_patch(&cs35l56->base); 356 + } 357 + 351 358 static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) 352 359 { 353 360 struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component); ··· 559 552 } 560 553 561 554 static const struct snd_soc_dai_ops cs35l56_ops = { 555 + .probe = cs35l56_asp_dai_probe, 562 556 .set_fmt = cs35l56_asp_dai_set_fmt, 563 557 .set_tdm_slot = cs35l56_asp_dai_set_tdm_slot, 564 558 .hw_params = cs35l56_asp_dai_hw_params, ··· 1625 1617 if (num_pulls < 0) 1626 1618 return num_pulls; 1627 1619 1628 - if (num_pulls != num_gpios) { 1620 + if (num_pulls && (num_pulls != num_gpios)) { 1629 1621 dev_warn(cs35l56->base.dev, "%s count(%d) != %s count(%d)\n", 1630 - pull_name, num_pulls, gpio_name, num_gpios); 1622 + pull_name, num_pulls, gpio_name, num_gpios); 1631 1623 } 1632 1624 1633 1625 ret = cs35l56_check_and_save_onchip_spkid_gpios(&cs35l56->base,
+3 -2
sound/soc/codecs/rt1320-sdw.c
··· 2629 2629 struct sdw_port_config port_config; 2630 2630 struct sdw_port_config dmic_port_config[2]; 2631 2631 struct sdw_stream_runtime *sdw_stream; 2632 - int retval; 2632 + int retval, num_channels; 2633 2633 unsigned int sampling_rate; 2634 2634 2635 2635 dev_dbg(dai->dev, "%s %s", __func__, dai->name); ··· 2661 2661 dmic_port_config[1].num = 10; 2662 2662 break; 2663 2663 case RT1321_DEV_ID: 2664 - dmic_port_config[0].ch_mask = BIT(0) | BIT(1); 2664 + num_channels = params_channels(params); 2665 + dmic_port_config[0].ch_mask = GENMASK(num_channels - 1, 0); 2665 2666 dmic_port_config[0].num = 8; 2666 2667 break; 2667 2668 default:
+10 -4
sound/soc/fsl/fsl_easrc.c
··· 52 52 struct soc_mreg_control *mc = 53 53 (struct soc_mreg_control *)kcontrol->private_value; 54 54 unsigned int regval = ucontrol->value.integer.value[0]; 55 + int ret; 56 + 57 + ret = (easrc_priv->bps_iec958[mc->regbase] != regval); 55 58 56 59 easrc_priv->bps_iec958[mc->regbase] = regval; 57 60 58 - return 0; 61 + return ret; 59 62 } 60 63 61 64 static int fsl_easrc_iec958_get_bits(struct snd_kcontrol *kcontrol, ··· 96 93 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); 97 94 struct soc_mreg_control *mc = 98 95 (struct soc_mreg_control *)kcontrol->private_value; 96 + struct fsl_asrc *easrc = snd_soc_component_get_drvdata(component); 99 97 unsigned int regval = ucontrol->value.integer.value[0]; 98 + bool changed; 100 99 int ret; 101 100 102 - ret = snd_soc_component_write(component, mc->regbase, regval); 103 - if (ret < 0) 101 + ret = regmap_update_bits_check(easrc->regmap, mc->regbase, 102 + GENMASK(31, 0), regval, &changed); 103 + if (ret != 0) 104 104 return ret; 105 105 106 - return 0; 106 + return changed; 107 107 } 108 108 109 109 #define SOC_SINGLE_REG_RW(xname, xreg) \
+8
sound/soc/intel/boards/sof_sdw.c
··· 763 763 }, 764 764 .driver_data = (void *)(SOC_SDW_CODEC_SPKR), 765 765 }, 766 + { 767 + .callback = sof_sdw_quirk_cb, 768 + .matches = { 769 + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 770 + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0CCD") 771 + }, 772 + .driver_data = (void *)(SOC_SDW_CODEC_SPKR), 773 + }, 766 774 /* Pantherlake devices*/ 767 775 { 768 776 .callback = sof_sdw_quirk_cb,
+4 -1
sound/soc/sdca/sdca_functions.c
··· 1156 1156 if (!terminal->is_dataport) { 1157 1157 const char *type_name = sdca_find_terminal_name(terminal->type); 1158 1158 1159 - if (type_name) 1159 + if (type_name) { 1160 1160 entity->label = devm_kasprintf(dev, GFP_KERNEL, "%s %s", 1161 1161 entity->label, type_name); 1162 + if (!entity->label) 1163 + return -ENOMEM; 1164 + } 1162 1165 } 1163 1166 1164 1167 ret = fwnode_property_read_u32(entity_node,
+5 -2
tools/bpf/resolve_btfids/Makefile
··· 65 65 LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null) 66 66 LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) 67 67 68 + ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz) 69 + ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd) 70 + 68 71 HOSTCFLAGS_resolve_btfids += -g \ 69 72 -I$(srctree)/tools/include \ 70 73 -I$(srctree)/tools/include/uapi \ ··· 76 73 $(LIBELF_FLAGS) \ 77 74 -Wall -Werror 78 75 79 - LIBS = $(LIBELF_LIBS) -lz 76 + LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS) 80 77 81 78 export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR 82 79 include $(srctree)/tools/build/Makefile.include ··· 86 83 87 84 $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) 88 85 $(call msg,LINK,$@) 89 - $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) 86 + $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) $(EXTRA_LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) 90 87 91 88 clean_objects := $(wildcard $(OUTPUT)/*.o \ 92 89 $(OUTPUT)/.*.o.cmd \
+54 -27
tools/bpf/resolve_btfids/main.c
··· 226 226 } 227 227 228 228 static struct btf_id *__btf_id__add(struct rb_root *root, 229 - char *name, 229 + const char *name, 230 230 enum btf_id_kind kind, 231 231 bool unique) 232 232 { ··· 250 250 id = zalloc(sizeof(*id)); 251 251 if (id) { 252 252 pr_debug("adding symbol %s\n", name); 253 - id->name = name; 253 + id->name = strdup(name); 254 + if (!id->name) { 255 + free(id); 256 + return NULL; 257 + } 254 258 id->kind = kind; 255 259 rb_link_node(&id->rb_node, parent, p); 256 260 rb_insert_color(&id->rb_node, root); ··· 262 258 return id; 263 259 } 264 260 265 - static inline struct btf_id *btf_id__add(struct rb_root *root, char *name, enum btf_id_kind kind) 261 + static inline struct btf_id *btf_id__add(struct rb_root *root, 262 + const char *name, 263 + enum btf_id_kind kind) 266 264 { 267 265 return __btf_id__add(root, name, kind, false); 268 266 } 269 267 270 - static inline struct btf_id *btf_id__add_unique(struct rb_root *root, char *name, enum btf_id_kind kind) 268 + static inline struct btf_id *btf_id__add_unique(struct rb_root *root, 269 + const char *name, 270 + enum btf_id_kind kind) 271 271 { 272 272 return __btf_id__add(root, name, kind, true); 273 273 } 274 274 275 - static char *get_id(const char *prefix_end) 275 + static int get_id(const char *prefix_end, char *buf, size_t buf_sz) 276 276 { 277 277 /* 278 278 * __BTF_ID__func__vfs_truncate__0 ··· 285 277 */ 286 278 int len = strlen(prefix_end); 287 279 int pos = sizeof("__") - 1; 288 - char *p, *id; 280 + char *p; 289 281 290 282 if (pos >= len) 291 - return NULL; 283 + return -1; 292 284 293 - id = strdup(prefix_end + pos); 294 - if (id) { 295 - /* 296 - * __BTF_ID__func__vfs_truncate__0 297 - * id = ^ 298 - * 299 - * cut the unique id part 300 - */ 301 - p = strrchr(id, '_'); 302 - p--; 303 - if (*p != '_') { 304 - free(id); 305 - return NULL; 306 - } 307 - *p = '\0'; 308 - } 309 - return id; 285 + if (len - pos >= buf_sz) 286 + return -1; 287 + 288 + strcpy(buf, prefix_end + pos); 289 + /* 290 + * __BTF_ID__func__vfs_truncate__0 291 + * buf = ^ 292 + * 293 + * cut the unique id part 294 + */ 295 + p = strrchr(buf, '_'); 296 + p--; 297 + if (*p != '_') 298 + return -1; 299 + *p = '\0'; 300 + 301 + return 0; 310 302 } 311 303 312 304 static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind kind) ··· 343 335 344 336 static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) 345 337 { 346 - char *id; 338 + char id[KSYM_NAME_LEN]; 347 339 348 - id = get_id(name + size); 349 - if (!id) { 340 + if (get_id(name + size, id, sizeof(id))) { 350 341 pr_err("FAILED to parse symbol name: %s\n", name); 351 342 return NULL; 352 343 } 353 344 354 345 return btf_id__add(root, id, BTF_ID_KIND_SYM); 346 + } 347 + 348 + static void btf_id__free_all(struct rb_root *root) 349 + { 350 + struct rb_node *next; 351 + struct btf_id *id; 352 + 353 + next = rb_first(root); 354 + while (next) { 355 + id = rb_entry(next, struct btf_id, rb_node); 356 + next = rb_next(&id->rb_node); 357 + rb_erase(&id->rb_node, root); 358 + free(id->name); 359 + free(id); 360 + } 355 361 } 356 362 357 363 static void bswap_32_data(void *data, u32 nr_bytes) ··· 1569 1547 out: 1570 1548 btf__free(obj.base_btf); 1571 1549 btf__free(obj.btf); 1550 + btf_id__free_all(&obj.structs); 1551 + btf_id__free_all(&obj.unions); 1552 + btf_id__free_all(&obj.typedefs); 1553 + btf_id__free_all(&obj.funcs); 1554 + btf_id__free_all(&obj.sets); 1572 1555 if (obj.efile.elf) { 1573 1556 elf_end(obj.efile.elf); 1574 1557 close(obj.efile.fd);
+4
tools/include/linux/args.h
··· 22 22 #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 23 23 24 24 /* Concatenate two parameters, but allow them to be expanded beforehand. */ 25 + #ifndef __CONCAT 25 26 #define __CONCAT(a, b) a ## b 27 + #endif 28 + #ifndef CONCATENATE 26 29 #define CONCATENATE(a, b) __CONCAT(a, b) 30 + #endif 27 31 28 32 #endif /* _LINUX_ARGS_H */
+76
tools/testing/selftests/arm64/signal/testcases/gcs_prot_none_fault.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2026 ARM Limited 4 + */ 5 + 6 + #include <errno.h> 7 + #include <signal.h> 8 + #include <unistd.h> 9 + 10 + #include <sys/mman.h> 11 + #include <sys/prctl.h> 12 + 13 + #include "test_signals_utils.h" 14 + #include "testcases.h" 15 + 16 + static uint64_t *gcs_page; 17 + static bool post_mprotect; 18 + 19 + #ifndef __NR_map_shadow_stack 20 + #define __NR_map_shadow_stack 453 21 + #endif 22 + 23 + static bool alloc_gcs(struct tdescr *td) 24 + { 25 + long page_size = sysconf(_SC_PAGE_SIZE); 26 + 27 + gcs_page = (void *)syscall(__NR_map_shadow_stack, 0, 28 + page_size, 0); 29 + if (gcs_page == MAP_FAILED) { 30 + fprintf(stderr, "Failed to map %ld byte GCS: %d\n", 31 + page_size, errno); 32 + return false; 33 + } 34 + 35 + return true; 36 + } 37 + 38 + static int gcs_prot_none_fault_trigger(struct tdescr *td) 39 + { 40 + /* Verify that the page is readable (ie, not completely unmapped) */ 41 + fprintf(stderr, "Read value 0x%lx\n", gcs_page[0]); 42 + 43 + if (mprotect(gcs_page, sysconf(_SC_PAGE_SIZE), PROT_NONE) != 0) { 44 + fprintf(stderr, "mprotect(PROT_NONE) failed: %d\n", errno); 45 + return 0; 46 + } 47 + post_mprotect = true; 48 + 49 + /* This should trigger a fault if PROT_NONE is honoured for the GCS page */ 50 + fprintf(stderr, "Read value after mprotect(PROT_NONE) 0x%lx\n", gcs_page[0]); 51 + return 0; 52 + } 53 + 54 + static int gcs_prot_none_fault_signal(struct tdescr *td, siginfo_t *si, 55 + ucontext_t *uc) 56 + { 57 + ASSERT_GOOD_CONTEXT(uc); 58 + 59 + /* A fault before mprotect(PROT_NONE) is unexpected. */ 60 + if (!post_mprotect) 61 + return 0; 62 + 63 + return 1; 64 + } 65 + 66 + struct tdescr tde = { 67 + .name = "GCS PROT_NONE fault", 68 + .descr = "Read from GCS after mprotect(PROT_NONE) segfaults", 69 + .feats_required = FEAT_GCS, 70 + .timeout = 3, 71 + .sig_ok = SIGSEGV, 72 + .sanity_disabled = true, 73 + .init = alloc_gcs, 74 + .trigger = gcs_prot_none_fault_trigger, 75 + .run = gcs_prot_none_fault_signal, 76 + };
+3
tools/testing/selftests/bpf/DENYLIST.asan
··· 1 + *arena* 2 + task_local_data 3 + uprobe_multi_test
+9 -4
tools/testing/selftests/bpf/Makefile
··· 27 27 endif 28 28 29 29 BPF_GCC ?= $(shell command -v bpf-gcc;) 30 + ifdef ASAN 31 + SAN_CFLAGS ?= -fsanitize=address -fno-omit-frame-pointer 32 + else 30 33 SAN_CFLAGS ?= 34 + endif 31 35 SAN_LDFLAGS ?= $(SAN_CFLAGS) 32 36 RELEASE ?= 33 37 OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) ··· 330 326 $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool 331 327 $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 332 328 ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \ 333 - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ 334 - EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ 329 + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ 330 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 335 331 OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ 336 332 LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ 337 333 LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ ··· 342 338 $(BPFOBJ) | $(BUILD_DIR)/bpftool 343 339 $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 344 340 ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \ 345 - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ 346 - EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ 341 + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ 342 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 347 343 OUTPUT=$(BUILD_DIR)/bpftool/ \ 348 344 LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \ 349 345 LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \ ··· 408 404 $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ 409 405 CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \ 410 406 LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ 407 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 411 408 OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) 412 409 413 410 # Get Clang's default includes on this system, as opposed to those seen by
+8 -6
tools/testing/selftests/bpf/benchs/bench_trigger.c
··· 230 230 static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe) 231 231 { 232 232 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 233 - char **syms = NULL; 234 - size_t cnt = 0; 233 + struct bpf_link *link = NULL; 234 + struct ksyms *ksyms = NULL; 235 235 236 236 /* Some recursive functions will be skipped in 237 237 * bpf_get_ksyms -> skip_entry, as they can introduce sufficient ··· 241 241 * So, don't run the kprobe-multi-all and kretprobe-multi-all on 242 242 * a debug kernel. 243 243 */ 244 - if (bpf_get_ksyms(&syms, &cnt, true)) { 244 + if (bpf_get_ksyms(&ksyms, true)) { 245 245 fprintf(stderr, "failed to get ksyms\n"); 246 246 exit(1); 247 247 } 248 248 249 - opts.syms = (const char **) syms; 250 - opts.cnt = cnt; 249 + opts.syms = (const char **)ksyms->filtered_syms; 250 + opts.cnt = ksyms->filtered_cnt; 251 251 opts.retprobe = kretprobe; 252 252 /* attach empty to all the kernel functions except bpf_get_numa_node_id. */ 253 - if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) { 253 + link = bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts); 254 + free_kallsyms_local(ksyms); 255 + if (!link) { 254 256 fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n"); 255 257 exit(1); 256 258 }
+32 -13
tools/testing/selftests/bpf/bpf_util.h
··· 8 8 #include <errno.h> 9 9 #include <syscall.h> 10 10 #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */ 11 + #include <linux/args.h> 11 12 12 13 static inline unsigned int bpf_num_possible_cpus(void) 13 14 { ··· 22 21 return possible_cpus; 23 22 } 24 23 25 - /* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst 26 - * is zero-terminated string no matter what (unless sz == 0, in which case 27 - * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs 28 - * in what is returned. Given this is internal helper, it's trivial to extend 29 - * this, when necessary. Use this instead of strncpy inside libbpf source code. 24 + /* 25 + * Simplified strscpy() implementation. The kernel one is in lib/string.c 30 26 */ 31 - static inline void bpf_strlcpy(char *dst, const char *src, size_t sz) 27 + static inline ssize_t sized_strscpy(char *dest, const char *src, size_t count) 32 28 { 33 - size_t i; 29 + long res = 0; 34 30 35 - if (sz == 0) 36 - return; 31 + if (count == 0) 32 + return -E2BIG; 37 33 38 - sz--; 39 - for (i = 0; i < sz && src[i]; i++) 40 - dst[i] = src[i]; 41 - dst[i] = '\0'; 34 + while (count > 1) { 35 + char c; 36 + 37 + c = src[res]; 38 + dest[res] = c; 39 + if (!c) 40 + return res; 41 + res++; 42 + count--; 43 + } 44 + 45 + /* Force NUL-termination. */ 46 + dest[res] = '\0'; 47 + 48 + /* Return E2BIG if the source didn't stop */ 49 + return src[res] ? -E2BIG : res; 42 50 } 51 + 52 + #define __strscpy0(dst, src, ...) \ 53 + sized_strscpy(dst, src, sizeof(dst)) 54 + #define __strscpy1(dst, src, size) \ 55 + sized_strscpy(dst, src, size) 56 + 57 + #undef strscpy /* Redefine the placeholder from tools/include/linux/string.h */ 58 + #define strscpy(dst, src, ...) \ 59 + CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__) 43 60 44 61 #define __bpf_percpu_val_align __attribute__((__aligned__(8))) 45 62
+19 -6
tools/testing/selftests/bpf/bpftool_helpers.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #include "bpftool_helpers.h" 3 2 #include <unistd.h> 4 3 #include <string.h> 5 4 #include <stdbool.h> 5 + 6 + #include "bpf_util.h" 7 + #include "bpftool_helpers.h" 6 8 7 9 #define BPFTOOL_PATH_MAX_LEN 64 8 10 #define BPFTOOL_FULL_CMD_MAX_LEN 512 9 11 10 12 #define BPFTOOL_DEFAULT_PATH "tools/sbin/bpftool" 11 13 12 - static int detect_bpftool_path(char *buffer) 14 + static int detect_bpftool_path(char *buffer, size_t size) 13 15 { 14 16 char tmp[BPFTOOL_PATH_MAX_LEN]; 17 + const char *env_path; 18 + 19 + /* First, check if BPFTOOL environment variable is set */ 20 + env_path = getenv("BPFTOOL"); 21 + if (env_path && access(env_path, X_OK) == 0) { 22 + strscpy(buffer, env_path, size); 23 + return 0; 24 + } else if (env_path) { 25 + fprintf(stderr, "bpftool '%s' doesn't exist or is not executable\n", env_path); 26 + return 1; 27 + } 15 28 16 29 /* Check default bpftool location (will work if we are running the 17 30 * default flavor of test_progs) 18 31 */ 19 32 snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "./%s", BPFTOOL_DEFAULT_PATH); 20 33 if (access(tmp, X_OK) == 0) { 21 - strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN); 34 + strscpy(buffer, tmp, size); 22 35 return 0; 23 36 } 24 37 ··· 40 27 */ 41 28 snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "../%s", BPFTOOL_DEFAULT_PATH); 42 29 if (access(tmp, X_OK) == 0) { 43 - strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN); 30 + strscpy(buffer, tmp, size); 44 31 return 0; 45 32 } 46 33 47 - /* Failed to find bpftool binary */ 34 + fprintf(stderr, "Failed to detect bpftool path, use BPFTOOL env var to override\n"); 48 35 return 1; 49 36 } 50 37 ··· 57 44 int ret; 58 45 59 46 /* Detect and cache bpftool binary location */ 60 - if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path)) 47 + if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path, sizeof(bpftool_path))) 61 48 return 1; 62 49 63 50 ret = snprintf(command, BPFTOOL_FULL_CMD_MAX_LEN, "%s %s%s",
+1 -1
tools/testing/selftests/bpf/cgroup_helpers.c
··· 86 86 enable[len] = 0; 87 87 close(fd); 88 88 } else { 89 - bpf_strlcpy(enable, controllers, sizeof(enable)); 89 + strscpy(enable, controllers); 90 90 } 91 91 92 92 snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);
+9 -9
tools/testing/selftests/bpf/jit_disasm_helpers.c
··· 122 122 pc += cnt; 123 123 } 124 124 qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32); 125 - for (i = 0; i < labels.cnt; ++i) 126 - /* gcc is unable to infer upper bound for labels.cnt and assumes 127 - * it to be U32_MAX. U32_MAX takes 10 decimal digits. 128 - * snprintf below prints into labels.names[*], 129 - * which has space only for two digits and a letter. 130 - * To avoid truncation warning use (i % MAX_LOCAL_LABELS), 131 - * which informs gcc about printed value upper bound. 132 - */ 133 - snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS); 125 + /* gcc is unable to infer upper bound for labels.cnt and 126 + * assumes it to be U32_MAX. U32_MAX takes 10 decimal digits. 127 + * snprintf below prints into labels.names[*], which has space 128 + * only for two digits and a letter. To avoid truncation 129 + * warning use (i < MAX_LOCAL_LABELS), which informs gcc about 130 + * printed value upper bound. 131 + */ 132 + for (i = 0; i < labels.cnt && i < MAX_LOCAL_LABELS; ++i) 133 + snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i); 134 134 135 135 /* now print with labels */ 136 136 labels.print_phase = true;
+2 -3
tools/testing/selftests/bpf/network_helpers.c
··· 432 432 memset(addr, 0, sizeof(*sun)); 433 433 sun->sun_family = family; 434 434 sun->sun_path[0] = 0; 435 - strcpy(sun->sun_path + 1, addr_str); 435 + strscpy(sun->sun_path + 1, addr_str, sizeof(sun->sun_path) - 1); 436 436 if (len) 437 437 *len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(addr_str); 438 438 return 0; ··· 581 581 return -1; 582 582 583 583 ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); 584 - strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); 585 - ifr.ifr_name[IFNAMSIZ - 1] = '\0'; 584 + strscpy(ifr.ifr_name, dev_name); 586 585 587 586 err = ioctl(fd, TUNSETIFF, &ifr); 588 587 if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {
+1 -2
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 346 346 close(finish_pipe[1]); 347 347 348 348 test_data = malloc(sizeof(char) * 10); 349 - strncpy(test_data, "test_data", 10); 350 - test_data[9] = '\0'; 349 + strscpy(test_data, "test_data", 10); 351 350 352 351 test_data_long = malloc(sizeof(char) * 5000); 353 352 for (int i = 0; i < 5000; ++i) {
+1 -1
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
··· 281 281 dctcp_skel = bpf_dctcp__open(); 282 282 if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) 283 283 return; 284 - strcpy(dctcp_skel->rodata->fallback_cc, "cubic"); 284 + strscpy(dctcp_skel->rodata->fallback_cc, "cubic"); 285 285 if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load")) 286 286 goto done; 287 287
+3 -1
tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
··· 202 202 203 203 iter_fd = bpf_iter_create(bpf_link__fd(link)); 204 204 if (!ASSERT_GE(iter_fd, 0, "iter_create")) 205 - goto out; 205 + goto out_link; 206 206 207 207 /* trigger the program run */ 208 208 (void)read(iter_fd, buf, sizeof(buf)); ··· 210 210 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id"); 211 211 212 212 close(iter_fd); 213 + out_link: 214 + bpf_link__destroy(link); 213 215 out: 214 216 cgrp_ls_sleepable__destroy(skel); 215 217 }
+4 -2
tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
··· 308 308 return -1; 309 309 } 310 310 311 - strncpy(type_str, type, type_sz); 312 - strncpy(field_str, field, field_sz); 311 + memcpy(type_str, type, type_sz); 312 + type_str[type_sz] = '\0'; 313 + memcpy(field_str, field, field_sz); 314 + field_str[field_sz] = '\0'; 313 315 btf_id = btf__find_by_name(btf, type_str); 314 316 if (btf_id < 0) { 315 317 PRINT_FAIL("No BTF info for type %s\n", type_str);
+4 -1
tools/testing/selftests/bpf/prog_tests/dynptr.c
··· 137 137 ); 138 138 139 139 link = bpf_program__attach(prog); 140 - if (!ASSERT_OK_PTR(link, "bpf_program__attach")) 140 + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) { 141 + bpf_object__close(obj); 141 142 goto cleanup; 143 + } 142 144 143 145 err = bpf_prog_test_run_opts(aux_prog_fd, &topts); 144 146 bpf_link__destroy(link); 147 + bpf_object__close(obj); 145 148 146 149 if (!ASSERT_OK(err, "test_run")) 147 150 goto cleanup;
+2 -2
tools/testing/selftests/bpf/prog_tests/fd_array.c
··· 412 412 ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG"); 413 413 414 414 cleanup_fds: 415 - while (i > 0) 416 - Close(extra_fds[--i]); 415 + while (i-- > 0) 416 + Close(extra_fds[i]); 417 417 } 418 418 419 419 void test_fd_array_cnt(void)
+2 -2
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 570 570 }; 571 571 int fd, ret; 572 572 573 - strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 573 + strscpy(ifr.ifr_name, ifname); 574 574 575 575 fd = open("/dev/net/tun", O_RDWR); 576 576 if (fd < 0) ··· 599 599 struct ifreq ifr = {}; 600 600 int sk, ret; 601 601 602 - strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 602 + strscpy(ifr.ifr_name, ifname); 603 603 604 604 sk = socket(PF_INET, SOCK_DGRAM, 0); 605 605 if (sk < 0)
+1
tools/testing/selftests/bpf/prog_tests/htab_update.c
··· 61 61 62 62 ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy"); 63 63 out: 64 + free(value); 64 65 htab_update__destroy(skel); 65 66 } 66 67
+2 -5
tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
··· 104 104 if (!ASSERT_GE(iter_fd, 0, "iter_create")) 105 105 goto destroy; 106 106 107 - memset(buf, 0, sizeof(buf)); 108 - while (read(iter_fd, buf, sizeof(buf)) > 0) { 109 - /* Read out all contents */ 110 - printf("%s", buf); 111 - } 107 + while (read(iter_fd, buf, sizeof(buf)) > 0) 108 + ; /* Read out all contents */ 112 109 113 110 /* Next reads should return 0 */ 114 111 ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
+5 -7
tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
··· 456 456 { 457 457 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 458 458 struct kprobe_multi_empty *skel = NULL; 459 - char **syms = NULL; 460 - size_t cnt = 0; 459 + struct ksyms *ksyms = NULL; 461 460 462 - if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms")) 461 + if (!ASSERT_OK(bpf_get_ksyms(&ksyms, kernel), "bpf_get_ksyms")) 463 462 return; 464 463 465 464 skel = kprobe_multi_empty__open_and_load(); 466 465 if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) 467 466 goto cleanup; 468 467 469 - opts.syms = (const char **) syms; 470 - opts.cnt = cnt; 468 + opts.syms = (const char **)ksyms->filtered_syms; 469 + opts.cnt = ksyms->filtered_cnt; 471 470 472 471 do_bench_test(skel, &opts); 473 472 474 473 cleanup: 475 474 kprobe_multi_empty__destroy(skel); 476 - if (syms) 477 - free(syms); 475 + free_kallsyms_local(ksyms); 478 476 } 479 477 480 478 static void test_kprobe_multi_bench_attach_addr(bool kernel)
+1 -1
tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c
··· 117 117 const char *ns1 = NETNS_BASE "1"; 118 118 const char *ns6 = NETNS_BASE "6"; 119 119 struct nstoken *nstoken = NULL; 120 - const char *foobar = "foobar"; 120 + const char foobar[] = "foobar"; 121 121 ssize_t bytes; 122 122 int sfd, cfd; 123 123 char buf[7];
+218
tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ 3 + #include <test_progs.h> 4 + #include <network_helpers.h> 5 + 6 + #include "map_kptr_race.skel.h" 7 + 8 + static int get_map_id(int map_fd) 9 + { 10 + struct bpf_map_info info = {}; 11 + __u32 len = sizeof(info); 12 + 13 + if (!ASSERT_OK(bpf_map_get_info_by_fd(map_fd, &info, &len), "get_map_info")) 14 + return -1; 15 + return info.id; 16 + } 17 + 18 + static int read_refs(struct map_kptr_race *skel) 19 + { 20 + LIBBPF_OPTS(bpf_test_run_opts, opts); 21 + int ret; 22 + 23 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &opts); 24 + if (!ASSERT_OK(ret, "count_ref run")) 25 + return -1; 26 + if (!ASSERT_OK(opts.retval, "count_ref retval")) 27 + return -1; 28 + return skel->bss->num_of_refs; 29 + } 30 + 31 + static void test_htab_leak(void) 32 + { 33 + LIBBPF_OPTS(bpf_test_run_opts, opts, 34 + .data_in = &pkt_v4, 35 + .data_size_in = sizeof(pkt_v4), 36 + .repeat = 1, 37 + ); 38 + struct map_kptr_race *skel, *watcher; 39 + int ret, map_id; 40 + 41 + skel = map_kptr_race__open_and_load(); 42 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 43 + return; 44 + 45 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_htab_leak), &opts); 46 + if (!ASSERT_OK(ret, "test_htab_leak run")) 47 + goto out_skel; 48 + if (!ASSERT_OK(opts.retval, "test_htab_leak retval")) 49 + goto out_skel; 50 + 51 + map_id = get_map_id(bpf_map__fd(skel->maps.race_hash_map)); 52 + if (!ASSERT_GE(map_id, 0, "map_id")) 53 + goto out_skel; 54 + 55 + watcher = map_kptr_race__open_and_load(); 56 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 57 + goto out_skel; 58 + 59 + watcher->bss->target_map_id = map_id; 60 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 61 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 62 + goto out_watcher; 63 + watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free); 64 + if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit")) 65 + goto out_watcher; 66 + 67 + map_kptr_race__destroy(skel); 68 + skel = NULL; 69 + 70 + kern_sync_rcu(); 71 + 72 + while (!READ_ONCE(watcher->bss->map_freed)) 73 + sched_yield(); 74 + 75 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 76 + ASSERT_EQ(read_refs(watcher), 2, "htab refcount"); 77 + 78 + out_watcher: 79 + map_kptr_race__destroy(watcher); 80 + out_skel: 81 + map_kptr_race__destroy(skel); 82 + } 83 + 84 + static void test_percpu_htab_leak(void) 85 + { 86 + LIBBPF_OPTS(bpf_test_run_opts, opts, 87 + .data_in = &pkt_v4, 88 + .data_size_in = sizeof(pkt_v4), 89 + .repeat = 1, 90 + ); 91 + struct map_kptr_race *skel, *watcher; 92 + int ret, map_id; 93 + 94 + skel = map_kptr_race__open(); 95 + if (!ASSERT_OK_PTR(skel, "open")) 96 + return; 97 + 98 + skel->rodata->nr_cpus = libbpf_num_possible_cpus(); 99 + if (skel->rodata->nr_cpus > 16) 100 + skel->rodata->nr_cpus = 16; 101 + 102 + ret = map_kptr_race__load(skel); 103 + if (!ASSERT_OK(ret, "load")) 104 + goto out_skel; 105 + 106 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_percpu_htab_leak), &opts); 107 + if (!ASSERT_OK(ret, "test_percpu_htab_leak run")) 108 + goto out_skel; 109 + if (!ASSERT_OK(opts.retval, "test_percpu_htab_leak retval")) 110 + goto out_skel; 111 + 112 + map_id = get_map_id(bpf_map__fd(skel->maps.race_percpu_hash_map)); 113 + if (!ASSERT_GE(map_id, 0, "map_id")) 114 + goto out_skel; 115 + 116 + watcher = map_kptr_race__open_and_load(); 117 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 118 + goto out_skel; 119 + 120 + watcher->bss->target_map_id = map_id; 121 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 122 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 123 + goto out_watcher; 124 + watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free); 125 + if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit")) 126 + goto out_watcher; 127 + 128 + map_kptr_race__destroy(skel); 129 + skel = NULL; 130 + 131 + kern_sync_rcu(); 132 + 133 + while (!READ_ONCE(watcher->bss->map_freed)) 134 + sched_yield(); 135 + 136 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 137 + ASSERT_EQ(read_refs(watcher), 2, "percpu_htab refcount"); 138 + 139 + out_watcher: 140 + map_kptr_race__destroy(watcher); 141 + out_skel: 142 + map_kptr_race__destroy(skel); 143 + } 144 + 145 + static void test_sk_ls_leak(void) 146 + { 147 + struct map_kptr_race *skel, *watcher; 148 + int listen_fd = -1, client_fd = -1, map_id; 149 + 150 + skel = map_kptr_race__open_and_load(); 151 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 152 + return; 153 + 154 + if (!ASSERT_OK(map_kptr_race__attach(skel), "attach")) 155 + goto out_skel; 156 + 157 + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); 158 + if (!ASSERT_GE(listen_fd, 0, "start_server")) 159 + goto out_skel; 160 + 161 + client_fd = connect_to_fd(listen_fd, 0); 162 + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) 163 + goto out_skel; 164 + 165 + if (!ASSERT_EQ(skel->bss->sk_ls_leak_done, 1, "sk_ls_leak_done")) 166 + goto out_skel; 167 + 168 + close(client_fd); 169 + client_fd = -1; 170 + close(listen_fd); 171 + listen_fd = -1; 172 + 173 + map_id = get_map_id(bpf_map__fd(skel->maps.race_sk_ls_map)); 174 + if (!ASSERT_GE(map_id, 0, "map_id")) 175 + goto out_skel; 176 + 177 + watcher = map_kptr_race__open_and_load(); 178 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 179 + goto out_skel; 180 + 181 + watcher->bss->target_map_id = map_id; 182 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 183 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 184 + goto out_watcher; 185 + watcher->links.sk_map_free = bpf_program__attach(watcher->progs.sk_map_free); 186 + if (!ASSERT_OK_PTR(watcher->links.sk_map_free, "attach fexit")) 187 + goto out_watcher; 188 + 189 + map_kptr_race__destroy(skel); 190 + skel = NULL; 191 + 192 + kern_sync_rcu(); 193 + 194 + while (!READ_ONCE(watcher->bss->map_freed)) 195 + sched_yield(); 196 + 197 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 198 + ASSERT_EQ(read_refs(watcher), 2, "sk_ls refcount"); 199 + 200 + out_watcher: 201 + map_kptr_race__destroy(watcher); 202 + out_skel: 203 + if (client_fd >= 0) 204 + close(client_fd); 205 + if (listen_fd >= 0) 206 + close(listen_fd); 207 + map_kptr_race__destroy(skel); 208 + } 209 + 210 + void serial_test_map_kptr_race(void) 211 + { 212 + if (test__start_subtest("htab_leak")) 213 + test_htab_leak(); 214 + if (test__start_subtest("percpu_htab_leak")) 215 + test_percpu_htab_leak(); 216 + if (test__start_subtest("sk_ls_leak")) 217 + test_sk_ls_leak(); 218 + }
+2 -2
tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
··· 28 28 vals[i] = rand(); 29 29 30 30 if (type == QUEUE) 31 - strncpy(file, "./test_queue_map.bpf.o", sizeof(file)); 31 + strscpy(file, "./test_queue_map.bpf.o"); 32 32 else if (type == STACK) 33 - strncpy(file, "./test_stack_map.bpf.o", sizeof(file)); 33 + strscpy(file, "./test_stack_map.bpf.o"); 34 34 else 35 35 return; 36 36
+1 -1
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
··· 2091 2091 {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}}, 2092 2092 2093 2093 {U64, U32, {0, 0x100000000}, {0, 0}}, 2094 - {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}}, 2094 + {U64, U32, {0xfffffffe, 0x300000000}, {0x80000000, 0x80000000}}, 2095 2095 2096 2096 {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}}, 2097 2097 /* these are tricky cases where lower 32 bits allow to tighten 64
+1 -1
tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
··· 212 212 if (!ASSERT_OK_PTR(skel, "open skel")) 213 213 goto done; 214 214 215 - strcpy(skel->rodata->veth, "binddevtest1"); 215 + strscpy(skel->rodata->veth, "binddevtest1"); 216 216 skel->rodata->veth_ifindex = if_nametoindex("binddevtest1"); 217 217 if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex")) 218 218 goto done;
+1 -1
tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c
··· 34 34 35 35 memset(&sockaddr, 0, sizeof(sockaddr)); 36 36 sockaddr.sun_family = AF_UNIX; 37 - strncpy(sockaddr.sun_path, sock_path, strlen(sock_path)); 37 + strscpy(sockaddr.sun_path, sock_path); 38 38 sockaddr.sun_path[0] = '\0'; 39 39 40 40 err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
+14 -14
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
··· 204 204 /* Fail since bpf_link for the same prog type has been created. */ 205 205 link2 = bpf_program__attach_sockmap(prog_clone, map); 206 206 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) { 207 - bpf_link__detach(link2); 207 + bpf_link__destroy(link2); 208 208 goto out; 209 209 } 210 210 ··· 230 230 if (!ASSERT_OK(err, "bpf_link_update")) 231 231 goto out; 232 232 out: 233 - bpf_link__detach(link); 233 + bpf_link__destroy(link); 234 234 test_skmsg_load_helpers__destroy(skel); 235 235 } 236 236 ··· 417 417 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 418 418 goto out; 419 419 420 - bpf_link__detach(link); 420 + bpf_link__destroy(link); 421 421 422 422 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0); 423 423 if (!ASSERT_OK(err, "bpf_prog_attach")) ··· 426 426 /* Fail since attaching with the same prog/map has been done. */ 427 427 link = bpf_program__attach_sockmap(prog, map); 428 428 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap")) 429 - bpf_link__detach(link); 429 + bpf_link__destroy(link); 430 430 431 431 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT); 432 432 if (!ASSERT_OK(err, "bpf_prog_detach2")) ··· 747 747 test_sockmap_skb_verdict_peek_helper(map); 748 748 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called"); 749 749 out: 750 - bpf_link__detach(link); 750 + bpf_link__destroy(link); 751 751 test_sockmap_pass_prog__destroy(pass); 752 752 } 753 753 754 754 static void test_sockmap_unconnected_unix(void) 755 755 { 756 - int err, map, stream = 0, dgram = 0, zero = 0; 756 + int err, map, stream = -1, dgram = -1, zero = 0; 757 757 struct test_sockmap_pass_prog *skel; 758 758 759 759 skel = test_sockmap_pass_prog__open_and_load(); ··· 764 764 765 765 stream = xsocket(AF_UNIX, SOCK_STREAM, 0); 766 766 if (stream < 0) 767 - return; 767 + goto out; 768 768 769 769 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 770 - if (dgram < 0) { 771 - close(stream); 772 - return; 773 - } 770 + if (dgram < 0) 771 + goto out; 774 772 775 773 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY); 776 - ASSERT_ERR(err, "bpf_map_update_elem(stream)"); 774 + if (!ASSERT_ERR(err, "bpf_map_update_elem(stream)")) 775 + goto out; 777 776 778 777 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 779 778 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 780 - 779 + out: 781 780 close(stream); 782 781 close(dgram); 782 + test_sockmap_pass_prog__destroy(skel); 783 783 } 784 784 785 785 static void test_sockmap_many_socket(void) ··· 1027 1027 if (xrecv_nonblock(conn, &buf, 1, 0) != 1) 1028 1028 FAIL("xrecv_nonblock"); 1029 1029 detach: 1030 - bpf_link__detach(link); 1030 + bpf_link__destroy(link); 1031 1031 close: 1032 1032 xclose(conn); 1033 1033 xclose(peer);
+1 -1
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
··· 899 899 900 900 redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS); 901 901 902 - bpf_link__detach(link); 902 + bpf_link__destroy(link); 903 903 } 904 904 905 905 static void redir_partial(int family, int sotype, int sock_map, int parser_map)
+1 -1
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
··· 142 142 143 143 /* TCP_CONGESTION can extend the string */ 144 144 145 - strcpy(buf.cc, "nv"); 145 + strscpy(buf.cc, "nv"); 146 146 err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv")); 147 147 if (err) { 148 148 log_err("Failed to call setsockopt(TCP_CONGESTION)");
+1 -3
tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c
··· 54 54 } 55 55 56 56 err = struct_ops_private_stack_fail__load(skel); 57 - if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load")) 58 - goto cleanup; 59 - return; 57 + ASSERT_ERR(err, "struct_ops_private_stack_fail__load"); 60 58 61 59 cleanup: 62 60 struct_ops_private_stack_fail__destroy(skel);
+1 -1
tools/testing/selftests/bpf/prog_tests/task_local_data.h
··· 262 262 if (!atomic_compare_exchange_strong(&tld_meta_p->cnt, &cnt, cnt + 1)) 263 263 goto retry; 264 264 265 - strncpy(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN); 265 + strscpy(tld_meta_p->metadata[i].name, name); 266 266 atomic_store(&tld_meta_p->metadata[i].size, size); 267 267 return (tld_key_t){(__s16)off}; 268 268 }
+2 -4
tools/testing/selftests/bpf/prog_tests/tc_opts.c
··· 1360 1360 1361 1361 assert_mprog_count_ifindex(ifindex, target, 4); 1362 1362 1363 - ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1364 - ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1365 - ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1366 - return; 1363 + goto cleanup; 1364 + 1367 1365 cleanup3: 1368 1366 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1369 1367 ASSERT_OK(err, "prog_detach");
+1 -1
tools/testing/selftests/bpf/prog_tests/tc_redirect.c
··· 1095 1095 1096 1096 ifr.ifr_flags = IFF_TUN | IFF_NO_PI; 1097 1097 if (*name) 1098 - strncpy(ifr.ifr_name, name, IFNAMSIZ); 1098 + strscpy(ifr.ifr_name, name); 1099 1099 1100 1100 err = ioctl(fd, TUNSETIFF, &ifr); 1101 1101 if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+3
tools/testing/selftests/bpf/prog_tests/test_sysctl.c
··· 27 27 OP_EPERM, 28 28 SUCCESS, 29 29 } result; 30 + struct bpf_object *obj; 30 31 }; 31 32 32 33 static struct sysctl_test tests[] = { ··· 1472 1471 return -1; 1473 1472 } 1474 1473 1474 + test->obj = obj; 1475 1475 return prog_fd; 1476 1476 } 1477 1477 ··· 1575 1573 /* Detaching w/o checking return code: best effort attempt. */ 1576 1574 if (progfd != -1) 1577 1575 bpf_prog_detach(cgfd, atype); 1576 + bpf_object__close(test->obj); 1578 1577 close(progfd); 1579 1578 printf("[%s]\n", err ? "FAIL" : "PASS"); 1580 1579 return err;
+4 -1
tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
··· 699 699 return; 700 700 701 701 if (!ASSERT_OK(setup(), "global setup")) 702 - return; 702 + goto out; 703 703 704 704 for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) { 705 705 cfg = &subtests_cfg[i]; ··· 711 711 subtest_cleanup(cfg); 712 712 } 713 713 cleanup(); 714 + 715 + out: 716 + test_tc_tunnel__destroy(skel); 714 717 }
+2 -2
tools/testing/selftests/bpf/prog_tests/test_veristat.c
··· 24 24 25 25 /* for no_alu32 and cpuv4 veristat is in parent folder */ 26 26 if (access("./veristat", F_OK) == 0) 27 - strcpy(fix->veristat, "./veristat"); 27 + strscpy(fix->veristat, "./veristat"); 28 28 else if (access("../veristat", F_OK) == 0) 29 - strcpy(fix->veristat, "../veristat"); 29 + strscpy(fix->veristat, "../veristat"); 30 30 else 31 31 PRINT_FAIL("Can't find veristat binary"); 32 32
+20 -4
tools/testing/selftests/bpf/prog_tests/test_xsk.c
··· 2003 2003 2004 2004 int testapp_stats_rx_full(struct test_spec *test) 2005 2005 { 2006 - if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) 2006 + struct pkt_stream *tmp; 2007 + 2008 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2009 + if (!tmp) 2007 2010 return TEST_FAILURE; 2008 - test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2011 + test->ifobj_tx->xsk->pkt_stream = tmp; 2012 + 2013 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2014 + if (!tmp) 2015 + return TEST_FAILURE; 2016 + test->ifobj_rx->xsk->pkt_stream = tmp; 2009 2017 2010 2018 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 2011 2019 test->ifobj_rx->release_rx = false; ··· 2023 2015 2024 2016 int testapp_stats_fill_empty(struct test_spec *test) 2025 2017 { 2026 - if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) 2018 + struct pkt_stream *tmp; 2019 + 2020 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2021 + if (!tmp) 2027 2022 return TEST_FAILURE; 2028 - test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2023 + test->ifobj_tx->xsk->pkt_stream = tmp; 2024 + 2025 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2026 + if (!tmp) 2027 + return TEST_FAILURE; 2028 + test->ifobj_rx->xsk->pkt_stream = tmp; 2029 2029 2030 2030 test->ifobj_rx->use_fill_ring = false; 2031 2031 test->ifobj_rx->validation_func = validate_fill_empty;
+5 -1
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
··· 62 62 return; 63 63 close(child->go[1]); 64 64 close(child->go[0]); 65 - if (child->thread) 65 + if (child->thread) { 66 66 pthread_join(child->thread, NULL); 67 + child->thread = 0; 68 + } 67 69 close(child->c2p[0]); 68 70 close(child->c2p[1]); 69 71 if (child->pid > 0) ··· 332 330 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) 333 331 { 334 332 static struct child child; 333 + 334 + memset(&child, 0, sizeof(child)); 335 335 336 336 /* no pid filter */ 337 337 __test_attach_api(binary, pattern, opts, NULL);
+1 -1
tools/testing/selftests/bpf/prog_tests/verifier_log.c
··· 47 47 static void verif_log_subtest(const char *name, bool expect_load_error, int log_level) 48 48 { 49 49 LIBBPF_OPTS(bpf_prog_load_opts, opts); 50 - char *exp_log, prog_name[16], op_name[32]; 50 + char *exp_log, prog_name[24], op_name[32]; 51 51 struct test_log_buf *skel; 52 52 struct bpf_program *prog; 53 53 size_t fixed_log_sz;
+2 -1
tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
··· 67 67 struct nstoken *tok = NULL; 68 68 int iifindex, stats_fd; 69 69 __u32 value, key = 0; 70 - struct bpf_link *link; 70 + struct bpf_link *link = NULL; 71 71 72 72 if (SYS_NOFAIL("nft -v")) { 73 73 fprintf(stdout, "Missing required nft tool\n"); ··· 160 160 161 161 ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed"); 162 162 out: 163 + bpf_link__destroy(link); 163 164 xdp_flowtable__destroy(skel); 164 165 if (tok) 165 166 close_netns(tok);
+2 -2
tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
··· 126 126 127 127 static void close_xsk(struct xsk *xsk) 128 128 { 129 - if (xsk->umem) 130 - xsk_umem__delete(xsk->umem); 131 129 if (xsk->socket) 132 130 xsk_socket__delete(xsk->socket); 131 + if (xsk->umem) 132 + xsk_umem__delete(xsk->umem); 133 133 munmap(xsk->umem_area, UMEM_SIZE); 134 134 } 135 135
+1 -1
tools/testing/selftests/bpf/progs/dmabuf_iter.c
··· 48 48 49 49 /* Buffers are not required to be named */ 50 50 if (pname) { 51 - if (bpf_probe_read_kernel(name, sizeof(name), pname)) 51 + if (bpf_probe_read_kernel_str(name, sizeof(name), pname) < 0) 52 52 return 1; 53 53 54 54 /* Name strings can be provided by userspace */
+197
tools/testing/selftests/bpf/progs/map_kptr_race.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "../test_kmods/bpf_testmod_kfunc.h" 7 + 8 + struct map_value { 9 + struct prog_test_ref_kfunc __kptr *ref_ptr; 10 + }; 11 + 12 + struct { 13 + __uint(type, BPF_MAP_TYPE_HASH); 14 + __uint(map_flags, BPF_F_NO_PREALLOC); 15 + __type(key, int); 16 + __type(value, struct map_value); 17 + __uint(max_entries, 1); 18 + } race_hash_map SEC(".maps"); 19 + 20 + struct { 21 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 22 + __uint(map_flags, BPF_F_NO_PREALLOC); 23 + __type(key, int); 24 + __type(value, struct map_value); 25 + __uint(max_entries, 1); 26 + } race_percpu_hash_map SEC(".maps"); 27 + 28 + struct { 29 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 30 + __uint(map_flags, BPF_F_NO_PREALLOC); 31 + __type(key, int); 32 + __type(value, struct map_value); 33 + } race_sk_ls_map SEC(".maps"); 34 + 35 + int num_of_refs; 36 + int sk_ls_leak_done; 37 + int target_map_id; 38 + int map_freed; 39 + const volatile int nr_cpus; 40 + 41 + SEC("tc") 42 + int test_htab_leak(struct __sk_buff *skb) 43 + { 44 + struct prog_test_ref_kfunc *p, *old; 45 + struct map_value val = {}; 46 + struct map_value *v; 47 + int key = 0; 48 + 49 + if (bpf_map_update_elem(&race_hash_map, &key, &val, BPF_ANY)) 50 + return 1; 51 + 52 + v = bpf_map_lookup_elem(&race_hash_map, &key); 53 + if (!v) 54 + return 2; 55 + 56 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 57 + if (!p) 58 + return 3; 59 + old = bpf_kptr_xchg(&v->ref_ptr, p); 60 + if (old) 61 + bpf_kfunc_call_test_release(old); 62 + 63 + bpf_map_delete_elem(&race_hash_map, &key); 64 + 65 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 66 + if (!p) 67 + return 4; 68 + old = bpf_kptr_xchg(&v->ref_ptr, p); 69 + if (old) 70 + bpf_kfunc_call_test_release(old); 71 + 72 + return 0; 73 + } 74 + 75 + static int fill_percpu_kptr(struct map_value *v) 76 + { 77 + struct prog_test_ref_kfunc *p, *old; 78 + 79 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 80 + if (!p) 81 + return 1; 82 + old = bpf_kptr_xchg(&v->ref_ptr, p); 83 + if (old) 84 + bpf_kfunc_call_test_release(old); 85 + return 0; 86 + } 87 + 88 + SEC("tc") 89 + int test_percpu_htab_leak(struct __sk_buff *skb) 90 + { 91 + struct map_value *v, *arr[16] = {}; 92 + struct map_value val = {}; 93 + int key = 0; 94 + int err = 0; 95 + 96 + if (bpf_map_update_elem(&race_percpu_hash_map, &key, &val, BPF_ANY)) 97 + return 1; 98 + 99 + for (int i = 0; i < nr_cpus; i++) { 100 + v = bpf_map_lookup_percpu_elem(&race_percpu_hash_map, &key, i); 101 + if (!v) 102 + return 2; 103 + arr[i] = v; 104 + } 105 + 106 + bpf_map_delete_elem(&race_percpu_hash_map, &key); 107 + 108 + for (int i = 0; i < nr_cpus; i++) { 109 + v = arr[i]; 110 + err = fill_percpu_kptr(v); 111 + if (err) 112 + return 3; 113 + } 114 + 115 + return 0; 116 + } 117 + 118 + SEC("tp_btf/inet_sock_set_state") 119 + int BPF_PROG(test_sk_ls_leak, struct sock *sk, int oldstate, int newstate) 120 + { 121 + struct prog_test_ref_kfunc *p, *old; 122 + struct map_value *v; 123 + 124 + if (newstate != BPF_TCP_SYN_SENT) 125 + return 0; 126 + 127 + if (sk_ls_leak_done) 128 + return 0; 129 + 130 + v = bpf_sk_storage_get(&race_sk_ls_map, sk, NULL, 131 + BPF_SK_STORAGE_GET_F_CREATE); 132 + if (!v) 133 + return 0; 134 + 135 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 136 + if (!p) 137 + return 0; 138 + old = bpf_kptr_xchg(&v->ref_ptr, p); 139 + if (old) 140 + bpf_kfunc_call_test_release(old); 141 + 142 + bpf_sk_storage_delete(&race_sk_ls_map, sk); 143 + 144 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 145 + if (!p) 146 + return 0; 147 + old = bpf_kptr_xchg(&v->ref_ptr, p); 148 + if (old) 149 + bpf_kfunc_call_test_release(old); 150 + 151 + sk_ls_leak_done = 1; 152 + return 0; 153 + } 154 + 155 + long target_map_ptr; 156 + 157 + SEC("fentry/bpf_map_put") 158 + int BPF_PROG(map_put, struct bpf_map *map) 159 + { 160 + if (target_map_id && map->id == (u32)target_map_id) 161 + target_map_ptr = (long)map; 162 + return 0; 163 + } 164 + 165 + SEC("fexit/htab_map_free") 166 + int BPF_PROG(htab_map_free, struct bpf_map *map) 167 + { 168 + if (target_map_ptr && (long)map == target_map_ptr) 169 + map_freed = 1; 170 + return 0; 171 + } 172 + 173 + SEC("fexit/bpf_sk_storage_map_free") 174 + int BPF_PROG(sk_map_free, struct bpf_map *map) 175 + { 176 + if (target_map_ptr && (long)map == target_map_ptr) 177 + map_freed = 1; 178 + return 0; 179 + } 180 + 181 + SEC("syscall") 182 + int count_ref(void *ctx) 183 + { 184 + struct prog_test_ref_kfunc *p; 185 + unsigned long arg = 0; 186 + 187 + p = bpf_kfunc_call_test_acquire(&arg); 188 + if (!p) 189 + return 1; 190 + 191 + num_of_refs = p->cnt.refs.counter; 192 + 193 + bpf_kfunc_call_test_release(p); 194 + return 0; 195 + } 196 + 197 + char _license[] SEC("license") = "GPL";
+137
tools/testing/selftests/bpf/progs/verifier_bounds.c
··· 1863 1863 : __clobber_all); 1864 1864 } 1865 1865 1866 + /* This test covers the bounds deduction when the u64 range and the tnum 1867 + * overlap only at umax. After instruction 3, the ranges look as follows: 1868 + * 1869 + * 0 umin=0xe01 umax=0xf00 U64_MAX 1870 + * | [xxxxxxxxxxxxxx] | 1871 + * |----------------------------|------------------------------| 1872 + * | x x | tnum values 1873 + * 1874 + * The verifier can therefore deduce that the R0=0xf0=240. 1875 + */ 1876 + SEC("socket") 1877 + __description("bounds refinement with single-value tnum on umax") 1878 + __msg("3: (15) if r0 == 0xe0 {{.*}} R0=240") 1879 + __success __log_level(2) 1880 + __flag(BPF_F_TEST_REG_INVARIANTS) 1881 + __naked void bounds_refinement_tnum_umax(void *ctx) 1882 + { 1883 + asm volatile(" \ 1884 + call %[bpf_get_prandom_u32]; \ 1885 + r0 |= 0xe0; \ 1886 + r0 &= 0xf0; \ 1887 + if r0 == 0xe0 goto +2; \ 1888 + if r0 == 0xf0 goto +1; \ 1889 + r10 = 0; \ 1890 + exit; \ 1891 + " : 1892 + : __imm(bpf_get_prandom_u32) 1893 + : __clobber_all); 1894 + } 1895 + 1896 + /* This test covers the bounds deduction when the u64 range and the tnum 1897 + * overlap only at umin. After instruction 3, the ranges look as follows: 1898 + * 1899 + * 0 umin=0xe00 umax=0xeff U64_MAX 1900 + * | [xxxxxxxxxxxxxx] | 1901 + * |----------------------------|------------------------------| 1902 + * | x x | tnum values 1903 + * 1904 + * The verifier can therefore deduce that the R0=0xe0=224. 1905 + */ 1906 + SEC("socket") 1907 + __description("bounds refinement with single-value tnum on umin") 1908 + __msg("3: (15) if r0 == 0xf0 {{.*}} R0=224") 1909 + __success __log_level(2) 1910 + __flag(BPF_F_TEST_REG_INVARIANTS) 1911 + __naked void bounds_refinement_tnum_umin(void *ctx) 1912 + { 1913 + asm volatile(" \ 1914 + call %[bpf_get_prandom_u32]; \ 1915 + r0 |= 0xe0; \ 1916 + r0 &= 0xf0; \ 1917 + if r0 == 0xf0 goto +2; \ 1918 + if r0 == 0xe0 goto +1; \ 1919 + r10 = 0; \ 1920 + exit; \ 1921 + " : 1922 + : __imm(bpf_get_prandom_u32) 1923 + : __clobber_all); 1924 + } 1925 + 1926 + /* This test covers the bounds deduction when the only possible tnum value is 1927 + * in the middle of the u64 range. After instruction 3, the ranges look as 1928 + * follows: 1929 + * 1930 + * 0 umin=0x7cf umax=0x7df U64_MAX 1931 + * | [xxxxxxxxxxxx] | 1932 + * |----------------------------|------------------------------| 1933 + * | x x x x x | tnum values 1934 + * | +--- 0x7e0 1935 + * +--- 0x7d0 1936 + * 1937 + * Since the lower four bits are zero, the tnum and the u64 range only overlap 1938 + * in R0=0x7d0=2000. Instruction 5 is therefore dead code. 1939 + */ 1940 + SEC("socket") 1941 + __description("bounds refinement with single-value tnum in middle of range") 1942 + __msg("3: (a5) if r0 < 0x7cf {{.*}} R0=2000") 1943 + __success __log_level(2) 1944 + __naked void bounds_refinement_tnum_middle(void *ctx) 1945 + { 1946 + asm volatile(" \ 1947 + call %[bpf_get_prandom_u32]; \ 1948 + if r0 & 0x0f goto +4; \ 1949 + if r0 > 0x7df goto +3; \ 1950 + if r0 < 0x7cf goto +2; \ 1951 + if r0 == 0x7d0 goto +1; \ 1952 + r10 = 0; \ 1953 + exit; \ 1954 + " : 1955 + : __imm(bpf_get_prandom_u32) 1956 + : __clobber_all); 1957 + } 1958 + 1959 + /* This test cover the negative case for the tnum/u64 overlap. Since 1960 + * they contain the same two values (i.e., {0, 1}), we can't deduce 1961 + * anything more. 1962 + */ 1963 + SEC("socket") 1964 + __description("bounds refinement: several overlaps between tnum and u64") 1965 + __msg("2: (25) if r0 > 0x1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=1,var_off=(0x0; 0x1))") 1966 + __failure __log_level(2) 1967 + __naked void bounds_refinement_several_overlaps(void *ctx) 1968 + { 1969 + asm volatile(" \ 1970 + call %[bpf_get_prandom_u32]; \ 1971 + if r0 < 0 goto +3; \ 1972 + if r0 > 1 goto +2; \ 1973 + if r0 == 1 goto +1; \ 1974 + r10 = 0; \ 1975 + exit; \ 1976 + " : 1977 + : __imm(bpf_get_prandom_u32) 1978 + : __clobber_all); 1979 + } 1980 + 1981 + /* This test cover the negative case for the tnum/u64 overlap. Since 1982 + * they overlap in the two values contained by the u64 range (i.e., 1983 + * {0xf, 0x10}), we can't deduce anything more. 1984 + */ 1985 + SEC("socket") 1986 + __description("bounds refinement: multiple overlaps between tnum and u64") 1987 + __msg("2: (25) if r0 > 0x10 {{.*}} R0=scalar(smin=umin=smin32=umin32=15,smax=umax=smax32=umax32=16,var_off=(0x0; 0x1f))") 1988 + __failure __log_level(2) 1989 + __naked void bounds_refinement_multiple_overlaps(void *ctx) 1990 + { 1991 + asm volatile(" \ 1992 + call %[bpf_get_prandom_u32]; \ 1993 + if r0 < 0xf goto +3; \ 1994 + if r0 > 0x10 goto +2; \ 1995 + if r0 == 0x10 goto +1; \ 1996 + r10 = 0; \ 1997 + exit; \ 1998 + " : 1999 + : __imm(bpf_get_prandom_u32) 2000 + : __clobber_all); 2001 + } 2002 + 1866 2003 char _license[] SEC("license") = "GPL";
+27 -11
tools/testing/selftests/bpf/test_progs.c
··· 1261 1261 return ret; 1262 1262 } 1263 1263 1264 - #define MAX_BACKTRACE_SZ 128 1265 - void crash_handler(int signum) 1264 + static void dump_crash_log(void) 1266 1265 { 1267 - void *bt[MAX_BACKTRACE_SZ]; 1268 - size_t sz; 1269 - 1270 - sz = backtrace(bt, ARRAY_SIZE(bt)); 1271 - 1272 1266 fflush(stdout); 1273 1267 stdout = env.stdout_saved; 1274 1268 stderr = env.stderr_saved; ··· 1271 1277 env.test_state->error_cnt++; 1272 1278 dump_test_log(env.test, env.test_state, true, false, NULL); 1273 1279 } 1280 + } 1281 + 1282 + #define MAX_BACKTRACE_SZ 128 1283 + 1284 + void crash_handler(int signum) 1285 + { 1286 + void *bt[MAX_BACKTRACE_SZ]; 1287 + size_t sz; 1288 + 1289 + sz = backtrace(bt, ARRAY_SIZE(bt)); 1290 + 1291 + dump_crash_log(); 1292 + 1274 1293 if (env.worker_id != -1) 1275 1294 fprintf(stderr, "[%d]: ", env.worker_id); 1276 1295 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 1277 1296 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 1278 1297 } 1298 + 1299 + #ifdef __SANITIZE_ADDRESS__ 1300 + void __asan_on_error(void) 1301 + { 1302 + dump_crash_log(); 1303 + } 1304 + #endif 1279 1305 1280 1306 void hexdump(const char *prefix, const void *buf, size_t len) 1281 1307 { ··· 1813 1799 1814 1800 msg.subtest_done.num = i; 1815 1801 1816 - strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1802 + strscpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1817 1803 1818 1804 msg.subtest_done.error_cnt = subtest_state->error_cnt; 1819 1805 msg.subtest_done.skipped = subtest_state->skipped; ··· 1958 1944 .parser = parse_arg, 1959 1945 .doc = argp_program_doc, 1960 1946 }; 1947 + int err, i; 1948 + 1949 + #ifndef __SANITIZE_ADDRESS__ 1961 1950 struct sigaction sigact = { 1962 1951 .sa_handler = crash_handler, 1963 1952 .sa_flags = SA_RESETHAND, 1964 - }; 1965 - int err, i; 1966 - 1953 + }; 1967 1954 sigaction(SIGSEGV, &sigact, NULL); 1955 + #endif 1968 1956 1969 1957 env.stdout_saved = stdout; 1970 1958 env.stderr_saved = stderr;
+1 -1
tools/testing/selftests/bpf/test_verifier.c
··· 1320 1320 printf("FAIL\nTestcase bug\n"); 1321 1321 return false; 1322 1322 } 1323 - strncpy(needle, exp, len); 1323 + memcpy(needle, exp, len); 1324 1324 needle[len] = 0; 1325 1325 q = strstr(log, needle); 1326 1326 if (!q) {
+1
tools/testing/selftests/bpf/testing_helpers.c
··· 212 212 break; 213 213 } 214 214 215 + free(buf); 215 216 fclose(f); 216 217 return err; 217 218 }
+12 -11
tools/testing/selftests/bpf/trace_helpers.c
··· 24 24 #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" 25 25 #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" 26 26 27 - struct ksyms { 28 - struct ksym *syms; 29 - size_t sym_cap; 30 - size_t sym_cnt; 31 - }; 32 - 33 27 static struct ksyms *ksyms; 34 28 static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER; 35 29 ··· 47 53 48 54 if (!ksyms) 49 55 return; 56 + 57 + free(ksyms->filtered_syms); 50 58 51 59 if (!ksyms->syms) { 52 60 free(ksyms); ··· 606 610 return compare_name(p1, p2->name); 607 611 } 608 612 609 - int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel) 613 + int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel) 610 614 { 611 615 size_t cap = 0, cnt = 0; 612 616 char *name = NULL, *ksym_name, **syms = NULL; ··· 633 637 else 634 638 f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); 635 639 636 - if (!f) 640 + if (!f) { 641 + free_kallsyms_local(ksyms); 637 642 return -EINVAL; 643 + } 638 644 639 645 map = hashmap__new(symbol_hash, symbol_equal, NULL); 640 646 if (IS_ERR(map)) { ··· 677 679 syms[cnt++] = ksym_name; 678 680 } 679 681 680 - *symsp = syms; 681 - *cntp = cnt; 682 + ksyms->filtered_syms = syms; 683 + ksyms->filtered_cnt = cnt; 684 + *ksymsp = ksyms; 682 685 683 686 error: 684 687 free(name); 685 688 fclose(f); 686 689 hashmap__free(map); 687 - if (err) 690 + if (err) { 688 691 free(syms); 692 + free_kallsyms_local(ksyms); 693 + } 689 694 return err; 690 695 } 691 696
+9 -2
tools/testing/selftests/bpf/trace_helpers.h
··· 23 23 long addr; 24 24 char *name; 25 25 }; 26 - struct ksyms; 26 + 27 + struct ksyms { 28 + struct ksym *syms; 29 + size_t sym_cap; 30 + size_t sym_cnt; 31 + char **filtered_syms; 32 + size_t filtered_cnt; 33 + }; 27 34 28 35 typedef int (*ksym_cmp_t)(const void *p1, const void *p2); 29 36 typedef int (*ksym_search_cmp_t)(const void *p1, const struct ksym *p2); ··· 60 53 61 54 int read_build_id(const char *path, char *build_id, size_t size); 62 55 63 - int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel); 56 + int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel); 64 57 int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel); 65 58 66 59 #endif
+2
tools/testing/selftests/bpf/veristat.c
··· 3378 3378 } 3379 3379 } 3380 3380 free(env.presets[i].atoms); 3381 + if (env.presets[i].value.type == ENUMERATOR) 3382 + free(env.presets[i].value.svalue); 3381 3383 } 3382 3384 free(env.presets); 3383 3385 return -err;
+2 -1
tools/testing/selftests/bpf/xdp_features.c
··· 16 16 17 17 #include <network_helpers.h> 18 18 19 + #include "bpf_util.h" 19 20 #include "xdp_features.skel.h" 20 21 #include "xdp_features.h" 21 22 ··· 213 212 env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT; 214 213 env.feature.action = -EINVAL; 215 214 env.ifindex = -ENODEV; 216 - strcpy(env.ifname, "unknown"); 215 + strscpy(env.ifname, "unknown"); 217 216 make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT, 218 217 &env.dut_ctrl_addr, NULL); 219 218 make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,
+2 -2
tools/testing/selftests/bpf/xdp_hw_metadata.c
··· 550 550 struct ifreq ifr = { 551 551 .ifr_data = (void *)&ch, 552 552 }; 553 - strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1); 553 + strscpy(ifr.ifr_name, ifname); 554 554 int fd, ret; 555 555 556 556 fd = socket(AF_UNIX, SOCK_DGRAM, 0); ··· 571 571 struct ifreq ifr = { 572 572 .ifr_data = (void *)cfg, 573 573 }; 574 - strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1); 574 + strscpy(ifr.ifr_name, ifname); 575 575 int fd, ret; 576 576 577 577 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+98 -2
tools/testing/selftests/drivers/net/hw/rss_ctx.py
··· 4 4 import datetime 5 5 import random 6 6 import re 7 + import time 7 8 from lib.py import ksft_run, ksft_pr, ksft_exit 8 9 from lib.py import ksft_eq, ksft_ne, ksft_ge, ksft_in, ksft_lt, ksft_true, ksft_raises 9 10 from lib.py import NetDrvEpEnv 10 11 from lib.py import EthtoolFamily, NetdevFamily 11 12 from lib.py import KsftSkipEx, KsftFailEx 13 + from lib.py import ksft_disruptive 12 14 from lib.py import rand_port 13 - from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure 15 + from lib.py import cmd, ethtool, ip, defer, GenerateTraffic, CmdExitFailure, wait_file 14 16 15 17 16 18 def _rss_key_str(key): ··· 811 809 'noise' : (0, 1) }) 812 810 813 811 812 + @ksft_disruptive 813 + def test_rss_context_persist_ifupdown(cfg, pre_down=False): 814 + """ 815 + Test that RSS contexts and their associated ntuple filters persist across 816 + an interface down/up cycle. 817 + 818 + """ 819 + 820 + require_ntuple(cfg) 821 + 822 + qcnt = len(_get_rx_cnts(cfg)) 823 + if qcnt < 6: 824 + try: 825 + ethtool(f"-L {cfg.ifname} combined 6") 826 + defer(ethtool, f"-L {cfg.ifname} combined {qcnt}") 827 + except Exception as exc: 828 + raise KsftSkipEx("Not enough queues for the test") from exc 829 + 830 + ethtool(f"-X {cfg.ifname} equal 2") 831 + defer(ethtool, f"-X {cfg.ifname} default") 832 + 833 + ifup = defer(ip, f"link set dev {cfg.ifname} up") 834 + if pre_down: 835 + ip(f"link set dev {cfg.ifname} down") 836 + 837 + try: 838 + ctx1_id = ethtool_create(cfg, "-X", "context new start 2 equal 2") 839 + defer(ethtool, f"-X {cfg.ifname} context {ctx1_id} delete") 840 + except CmdExitFailure as exc: 841 + raise KsftSkipEx("Create context not supported with interface down") from exc 842 + 843 + ctx2_id = ethtool_create(cfg, "-X", "context new start 4 equal 2") 844 + defer(ethtool, f"-X {cfg.ifname} context {ctx2_id} delete") 845 + 846 + port_ctx2 = rand_port() 847 + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port_ctx2} context {ctx2_id}" 848 + ntuple_id = ethtool_create(cfg, "-N", flow) 849 + defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") 850 + 851 + if not pre_down: 852 + ip(f"link set dev {cfg.ifname} down") 853 + ifup.exec() 854 + 855 + wait_file(f"/sys/class/net/{cfg.ifname}/carrier", 856 + lambda x: x.strip() == "1", deadline=20) 857 + 858 + remote_addr = cfg.remote_addr_v[cfg.addr_ipver] 859 + for _ in range(10): 860 + if cmd(f"ping -c 1 -W 1 {remote_addr}", fail=False).ret == 0: 861 + break 862 + time.sleep(1) 863 + else: 864 + raise KsftSkipEx("Cannot reach remote host after interface up") 865 + 866 + ctxs = cfg.ethnl.rss_get({'header': {'dev-name': cfg.ifname}}, dump=True) 867 + 868 + data1 = [c for c in ctxs if c.get('context') == ctx1_id] 869 + ksft_eq(len(data1), 1, f"Context {ctx1_id} should persist after ifup") 870 + 871 + data2 = [c for c in ctxs if c.get('context') == ctx2_id] 872 + ksft_eq(len(data2), 1, f"Context {ctx2_id} should persist after ifup") 873 + 874 + _ntuple_rule_check(cfg, ntuple_id, ctx2_id) 875 + 876 + cnts = _get_rx_cnts(cfg) 877 + GenerateTraffic(cfg).wait_pkts_and_stop(20000) 878 + cnts = _get_rx_cnts(cfg, prev=cnts) 879 + 880 + main_traffic = sum(cnts[0:2]) 881 + ksft_ge(main_traffic, 18000, f"Main context traffic distribution: {cnts}") 882 + ksft_lt(sum(cnts[2:6]), 500, f"Other context queues should be mostly empty: {cnts}") 883 + 884 + _send_traffic_check(cfg, port_ctx2, f"context {ctx2_id}", 885 + {'target': (4, 5), 886 + 'noise': (0, 1), 887 + 'empty': (2, 3)}) 888 + 889 + 890 + def test_rss_context_persist_create_and_ifdown(cfg): 891 + """ 892 + Create RSS contexts then cycle the interface down and up. 893 + """ 894 + test_rss_context_persist_ifupdown(cfg, pre_down=False) 895 + 896 + 897 + def test_rss_context_persist_ifdown_and_create(cfg): 898 + """ 899 + Bring interface down first, then create RSS contexts and bring up. 900 + """ 901 + test_rss_context_persist_ifupdown(cfg, pre_down=True) 902 + 903 + 814 904 def main() -> None: 815 905 with NetDrvEpEnv(__file__, nsim_test=False) as cfg: 816 906 cfg.context_cnt = None ··· 917 823 test_rss_context_out_of_order, test_rss_context4_create_with_cfg, 918 824 test_flow_add_context_missing, 919 825 test_delete_rss_context_busy, test_rss_ntuple_addition, 920 - test_rss_default_context_rule], 826 + test_rss_default_context_rule, 827 + test_rss_context_persist_create_and_ifdown, 828 + test_rss_context_persist_ifdown_and_create], 921 829 args=(cfg, )) 922 830 ksft_exit() 923 831
+1
tools/testing/selftests/drivers/net/team/Makefile
··· 5 5 dev_addr_lists.sh \ 6 6 options.sh \ 7 7 propagation.sh \ 8 + refleak.sh \ 8 9 # end of TEST_PROGS 9 10 10 11 TEST_INCLUDES := \
+17
tools/testing/selftests/drivers/net/team/refleak.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # shellcheck disable=SC2154 4 + 5 + lib_dir=$(dirname "$0") 6 + source "$lib_dir"/../../../net/lib.sh 7 + 8 + trap cleanup_all_ns EXIT 9 + 10 + # Test that there is no reference count leak and that dummy1 can be deleted. 11 + # https://lore.kernel.org/netdev/4d69abe1-ca8d-4f0b-bcf8-13899b211e57@I-love.SAKURA.ne.jp/ 12 + setup_ns ns1 ns2 13 + ip -n "$ns1" link add name team1 type team 14 + ip -n "$ns1" link add name dummy1 mtu 1499 type dummy 15 + ip -n "$ns1" link set dev dummy1 master team1 16 + ip -n "$ns1" link set dev dummy1 netns "$ns2" 17 + ip -n "$ns2" link del dev dummy1
+27
tools/testing/selftests/net/packetdrill/tcp_rcv_zero_wnd_fin.pkt
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + // Some TCP stacks send FINs even though the window is closed. We break 4 + // a possible FIN/ACK loop by accepting the FIN. 5 + 6 + --mss=1000 7 + 8 + `./defaults.sh` 9 + 10 + // Establish a connection. 11 + +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 12 + +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 13 + +0 setsockopt(3, SOL_SOCKET, SO_RCVBUF, [20000], 4) = 0 14 + +0 bind(3, ..., ...) = 0 15 + +0 listen(3, 1) = 0 16 + 17 + +0 < S 0:0(0) win 32792 <mss 1000,nop,wscale 7> 18 + +0 > S. 0:0(0) ack 1 <mss 1460,nop,wscale 0> 19 + +0 < . 1:1(0) ack 1 win 257 20 + 21 + +0 accept(3, ..., ...) = 4 22 + 23 + +0 < P. 1:60001(60000) ack 1 win 257 24 + * > . 1:1(0) ack 60001 win 0 25 + 26 + +0 < F. 60001:60001(0) ack 1 win 257 27 + +0 > . 1:1(0) ack 60002 win 0
+20 -19
tools/testing/selftests/vsock/vmtest.sh
··· 210 210 } 211 211 212 212 add_namespaces() { 213 - local orig_mode 214 - orig_mode=$(cat /proc/sys/net/vsock/child_ns_mode) 213 + ip netns add "global-parent" 2>/dev/null 214 + echo "global" | ip netns exec "global-parent" \ 215 + tee /proc/sys/net/vsock/child_ns_mode &>/dev/null 216 + ip netns add "local-parent" 2>/dev/null 217 + echo "local" | ip netns exec "local-parent" \ 218 + tee /proc/sys/net/vsock/child_ns_mode &>/dev/null 215 219 216 - for mode in "${NS_MODES[@]}"; do 217 - echo "${mode}" > /proc/sys/net/vsock/child_ns_mode 218 - ip netns add "${mode}0" 2>/dev/null 219 - ip netns add "${mode}1" 2>/dev/null 220 - done 221 - 222 - echo "${orig_mode}" > /proc/sys/net/vsock/child_ns_mode 220 + nsenter --net=/var/run/netns/global-parent \ 221 + ip netns add "global0" 2>/dev/null 222 + nsenter --net=/var/run/netns/global-parent \ 223 + ip netns add "global1" 2>/dev/null 224 + nsenter --net=/var/run/netns/local-parent \ 225 + ip netns add "local0" 2>/dev/null 226 + nsenter --net=/var/run/netns/local-parent \ 227 + ip netns add "local1" 2>/dev/null 223 228 } 224 229 225 230 init_namespaces() { ··· 242 237 log_host "removed ns ${mode}0" 243 238 log_host "removed ns ${mode}1" 244 239 done 240 + ip netns del "global-parent" &>/dev/null 241 + ip netns del "local-parent" &>/dev/null 245 242 } 246 243 247 244 vm_ssh() { ··· 294 287 } 295 288 296 289 check_deps() { 297 - for dep in vng ${QEMU} busybox pkill ssh ss socat; do 290 + for dep in vng ${QEMU} busybox pkill ssh ss socat nsenter; do 298 291 if [[ ! -x $(command -v "${dep}") ]]; then 299 292 echo -e "skip: dependency ${dep} not found!\n" 300 293 exit "${KSFT_SKIP}" ··· 1238 1231 } 1239 1232 1240 1233 test_ns_host_vsock_child_ns_mode_ok() { 1241 - local orig_mode 1242 - local rc 1234 + local rc="${KSFT_PASS}" 1243 1235 1244 - orig_mode=$(cat /proc/sys/net/vsock/child_ns_mode) 1245 - 1246 - rc="${KSFT_PASS}" 1247 1236 for mode in "${NS_MODES[@]}"; do 1248 1237 local ns="${mode}0" 1249 1238 ··· 1249 1246 continue 1250 1247 fi 1251 1248 1252 - if ! echo "${mode}" > /proc/sys/net/vsock/child_ns_mode; then 1253 - log_host "child_ns_mode should be writable to ${mode}" 1249 + if ! echo "${mode}" | ip netns exec "${ns}" \ 1250 + tee /proc/sys/net/vsock/child_ns_mode &>/dev/null; then 1254 1251 rc="${KSFT_FAIL}" 1255 1252 continue 1256 1253 fi 1257 1254 done 1258 - 1259 - echo "${orig_mode}" > /proc/sys/net/vsock/child_ns_mode 1260 1255 1261 1256 return "${rc}" 1262 1257 }
+1 -8
virt/kvm/Kconfig
··· 5 5 bool 6 6 select EVENTFD 7 7 select INTERVAL_TREE 8 + select MMU_NOTIFIER 8 9 select PREEMPT_NOTIFIERS 9 10 10 11 config HAVE_KVM_PFNCACHE ··· 94 93 config KVM_GENERIC_HARDWARE_ENABLING 95 94 bool 96 95 97 - config KVM_GENERIC_MMU_NOTIFIER 98 - select MMU_NOTIFIER 99 - bool 100 - 101 96 config KVM_ELIDE_TLB_FLUSH_IF_YOUNG 102 - depends on KVM_GENERIC_MMU_NOTIFIER 103 97 bool 104 98 105 99 config KVM_MMU_LOCKLESS_AGING 106 - depends on KVM_GENERIC_MMU_NOTIFIER 107 100 bool 108 101 109 102 config KVM_GENERIC_MEMORY_ATTRIBUTES 110 - depends on KVM_GENERIC_MMU_NOTIFIER 111 103 bool 112 104 113 105 config KVM_GUEST_MEMFD 114 - depends on KVM_GENERIC_MMU_NOTIFIER 115 106 select XARRAY_MULTI 116 107 bool 117 108
+1 -16
virt/kvm/kvm_main.c
··· 502 502 } 503 503 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus); 504 504 505 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 506 505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 507 506 { 508 507 return container_of(mn, struct kvm, mmu_notifier); ··· 900 901 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 901 902 } 902 903 903 - #else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 904 - 905 - static int kvm_init_mmu_notifier(struct kvm *kvm) 906 - { 907 - return 0; 908 - } 909 - 910 - #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 911 - 912 904 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 913 905 static int kvm_pm_notifier_call(struct notifier_block *bl, 914 906 unsigned long state, ··· 1216 1226 out_err_no_debugfs: 1217 1227 kvm_coalesced_mmio_free(kvm); 1218 1228 out_no_coalesced_mmio: 1219 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1220 1229 if (kvm->mmu_notifier.ops) 1221 1230 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1222 - #endif 1223 1231 out_err_no_mmu_notifier: 1224 1232 kvm_disable_virtualization(); 1225 1233 out_err_no_disable: ··· 1280 1292 kvm->buses[i] = NULL; 1281 1293 } 1282 1294 kvm_coalesced_mmio_free(kvm); 1283 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1284 1295 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1285 1296 /* 1286 1297 * At this point, pending calls to invalidate_range_start() ··· 1298 1311 kvm->mn_active_invalidate_count = 0; 1299 1312 else 1300 1313 WARN_ON(kvm->mmu_invalidate_in_progress); 1301 - #else 1302 - kvm_flush_shadow_all(kvm); 1303 - #endif 1304 1314 kvm_arch_destroy_vm(kvm); 1305 1315 kvm_destroy_devices(kvm); 1306 1316 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { ··· 4870 4886 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4871 4887 { 4872 4888 switch (arg) { 4889 + case KVM_CAP_SYNC_MMU: 4873 4890 case KVM_CAP_USER_MEMORY: 4874 4891 case KVM_CAP_USER_MEMORY2: 4875 4892 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: