Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

xdp_return_frame_bulk() needs to pass a xdp_buff
to __xdp_return().

strlcpy got converted to strscpy but here it makes no
functional difference, so just keep the right code.

Conflicts:
net/netfilter/nf_tables_api.c

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2519 -2371
+2
.mailmap
··· 322 322 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com> 323 323 Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws> 324 324 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> 325 + Uwe Kleine-König <u.kleine-koenig@pengutronix.de> 326 + Uwe Kleine-König <ukleinek@strlen.de> 325 327 Uwe Kleine-König <ukl@pengutronix.de> 326 328 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 327 329 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+5
CREDITS
··· 740 740 S: Portland, Oregon 741 741 S: USA 742 742 743 + N: Jason Cooper 744 + D: ARM/Marvell SOC co-maintainer 745 + D: irqchip co-maintainer 746 + D: MVEBU PCI DRIVER co-maintainer 747 + 743 748 N: Robin Cornelius 744 749 E: robincornelius@users.sourceforge.net 745 750 D: Ralink rt2x00 WLAN driver
+21 -7
MAINTAINERS
··· 1486 1486 F: drivers/iommu/arm/ 1487 1487 F: drivers/iommu/io-pgtable-arm* 1488 1488 1489 + ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS) 1490 + M: Arnd Bergmann <arnd@arndb.de> 1491 + M: Olof Johansson <olof@lixom.net> 1492 + M: soc@kernel.org 1493 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1494 + S: Maintained 1495 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git 1496 + F: arch/arm/boot/dts/Makefile 1497 + F: arch/arm64/boot/dts/Makefile 1498 + 1489 1499 ARM SUB-ARCHITECTURES 1490 1500 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1491 1501 S: Maintained 1492 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git 1502 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git 1493 1503 F: arch/arm/mach-*/ 1494 1504 F: arch/arm/plat-*/ 1495 1505 ··· 2024 2014 S: Maintained 2025 2015 2026 2016 ARM/Marvell Dove/MV78xx0/Orion SOC support 2027 - M: Jason Cooper <jason@lakedaemon.net> 2028 2017 M: Andrew Lunn <andrew@lunn.ch> 2029 2018 M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 2030 2019 M: Gregory Clement <gregory.clement@bootlin.com> ··· 2040 2031 F: drivers/soc/dove/ 2041 2032 2042 2033 ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K, CN9130 SOC support 2043 - M: Jason Cooper <jason@lakedaemon.net> 2044 2034 M: Andrew Lunn <andrew@lunn.ch> 2045 2035 M: Gregory Clement <gregory.clement@bootlin.com> 2046 2036 M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> ··· 3247 3239 R: Song Liu <songliubraving@fb.com> 3248 3240 R: Yonghong Song <yhs@fb.com> 3249 3241 R: John Fastabend <john.fastabend@gmail.com> 3250 - R: KP Singh <kpsingh@chromium.org> 3242 + R: KP Singh <kpsingh@kernel.org> 3251 3243 L: netdev@vger.kernel.org 3252 3244 L: bpf@vger.kernel.org 3253 3245 S: Supported ··· 3366 3358 X: arch/x86/net/bpf_jit_comp32.c 3367 3359 3368 3360 BPF LSM (Security Audit and Enforcement using BPF) 3369 - M: KP Singh <kpsingh@chromium.org> 3361 + M: KP Singh <kpsingh@kernel.org> 3370 3362 R: Florent Revest <revest@chromium.org> 3371 3363 R: Brendan Jackman <jackmanb@chromium.org> 3372 3364 L: bpf@vger.kernel.org ··· 4295 4287 C: irc://chat.freenode.net/clangbuiltlinux 4296 4288 F: Documentation/kbuild/llvm.rst 4297 4289 F: scripts/clang-tools/ 4290 + F: scripts/lld-version.sh 4298 4291 K: \b(?i:clang|llvm)\b 4299 4292 4300 4293 CLEANCACHE API ··· 9258 9249 9259 9250 IRQCHIP DRIVERS 9260 9251 M: Thomas Gleixner <tglx@linutronix.de> 9261 - M: Jason Cooper <jason@lakedaemon.net> 9262 9252 M: Marc Zyngier <maz@kernel.org> 9263 9253 L: linux-kernel@vger.kernel.org 9264 9254 S: Maintained ··· 10557 10549 S: Supported 10558 10550 F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst 10559 10551 F: drivers/net/ethernet/marvell/octeontx2/af/ 10552 + 10553 + MARVELL PRESTERA ETHERNET SWITCH DRIVER 10554 + M: Vadym Kochan <vkochan@marvell.com> 10555 + M: Taras Chornyi <tchornyi@marvell.com> 10556 + S: Supported 10557 + W: https://github.com/Marvell-switching/switchdev-prestera 10558 + F: drivers/net/ethernet/marvell/prestera/ 10560 10559 10561 10560 MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER 10562 10561 M: Nicolas Pitre <nico@fluxnic.net> ··· 13413 13398 13414 13399 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 13415 13400 M: Thomas Petazzoni <thomas.petazzoni@bootlin.com> 13416 - M: Jason Cooper <jason@lakedaemon.net> 13417 13401 L: linux-pci@vger.kernel.org 13418 13402 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 13419 13403 S: Maintained
+10 -2
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 10 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Kleptomaniac Octopus 7 7 8 8 # *DOCUMENTATION* ··· 826 826 DEBUG_CFLAGS += -g 827 827 endif 828 828 829 + ifneq ($(LLVM_IAS),1) 829 830 KBUILD_AFLAGS += -Wa,-gdwarf-2 831 + endif 830 832 831 833 ifdef CONFIG_DEBUG_INFO_DWARF4 832 834 DEBUG_CFLAGS += -gdwarf-4 ··· 946 944 KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) 947 945 948 946 # change __FILE__ to the relative path from the srctree 949 - KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) 947 + KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) 950 948 951 949 # ensure -fcf-protection is disabled when using retpoline as it is 952 950 # incompatible with -mindirect-branch=thunk-extern ··· 982 980 983 981 ifeq ($(CONFIG_RELR),y) 984 982 LDFLAGS_vmlinux += --pack-dyn-relocs=relr 983 + endif 984 + 985 + # We never want expected sections to be placed heuristically by the 986 + # linker. All sections should be explicitly named in the linker script. 987 + ifdef CONFIG_LD_ORPHAN_WARN 988 + LDFLAGS_vmlinux += --orphan-handling=warn 985 989 endif 986 990 987 991 # Align the bit size of userspace programs with the kernel
+9
arch/Kconfig
··· 1028 1028 bool 1029 1029 depends on HAVE_STATIC_CALL 1030 1030 1031 + config ARCH_WANT_LD_ORPHAN_WARN 1032 + bool 1033 + help 1034 + An arch should select this symbol once all linker sections are explicitly 1035 + included, size-asserted, or discarded in the linker scripts. This is 1036 + important because we never want expected sections to be placed heuristically 1037 + by the linker, since the locations of such sections can change between linker 1038 + versions. 1039 + 1031 1040 source "kernel/gcov/Kconfig" 1032 1041 1033 1042 source "scripts/gcc-plugins/Kconfig"
+1
arch/arm/Kconfig
··· 35 35 select ARCH_USE_CMPXCHG_LOCKREF 36 36 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 37 37 select ARCH_WANT_IPC_PARSE_VERSION 38 + select ARCH_WANT_LD_ORPHAN_WARN 38 39 select BINFMT_FLAT_ARGVP_ENVP_ON_STACK 39 40 select BUILDTIME_TABLE_SORT if MMU 40 41 select CLONE_BACKWARDS
-4
arch/arm/Makefile
··· 16 16 KBUILD_LDFLAGS_MODULE += --be8 17 17 endif 18 18 19 - # We never want expected sections to be placed heuristically by the 20 - # linker. All sections should be explicitly named in the linker script. 21 - LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) 22 - 23 19 GZFLAGS :=-9 24 20 #KBUILD_CFLAGS +=-pipe 25 21
+3 -1
arch/arm/boot/compressed/Makefile
··· 129 129 # Delete all temporary local symbols 130 130 LDFLAGS_vmlinux += -X 131 131 # Report orphan sections 132 - LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) 132 + ifdef CONFIG_LD_ORPHAN_WARN 133 + LDFLAGS_vmlinux += --orphan-handling=warn 134 + endif 133 135 # Next argument is a linker script 134 136 LDFLAGS_vmlinux += -T 135 137
+1 -1
arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
··· 551 551 552 552 pinctrl_i2c3: i2c3grp { 553 553 fsl,pins = < 554 - MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1 554 + MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1 555 555 MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1 556 556 >; 557 557 };
-1
arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
··· 166 166 MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 167 167 MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 168 168 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030 169 - MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1 170 169 >; 171 170 }; 172 171
+1 -2
arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
··· 223 223 }; 224 224 225 225 &ssp3 { 226 - /delete-property/ #address-cells; 227 - /delete-property/ #size-cells; 226 + #address-cells = <0>; 228 227 spi-slave; 229 228 status = "okay"; 230 229 ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>;
+1 -1
arch/arm/boot/dts/sun7i-a20-bananapi.dts
··· 132 132 pinctrl-names = "default"; 133 133 pinctrl-0 = <&gmac_rgmii_pins>; 134 134 phy-handle = <&phy1>; 135 - phy-mode = "rgmii"; 135 + phy-mode = "rgmii-id"; 136 136 phy-supply = <&reg_gmac_3v3>; 137 137 status = "okay"; 138 138 };
+2 -2
arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
··· 1 1 /* 2 - * Copyright 2015 Adam Sampson <ats@offog.org> 2 + * Copyright 2015-2020 Adam Sampson <ats@offog.org> 3 3 * 4 4 * This file is dual-licensed: you can use it either under the terms 5 5 * of the GPL or the X11 license, at your option. Note that this dual ··· 115 115 pinctrl-names = "default"; 116 116 pinctrl-0 = <&gmac_rgmii_pins>; 117 117 phy-handle = <&phy1>; 118 - phy-mode = "rgmii"; 118 + phy-mode = "rgmii-id"; 119 119 status = "okay"; 120 120 }; 121 121
+1 -1
arch/arm/boot/dts/sun8i-s3-pinecube.dts
··· 10 10 11 11 / { 12 12 model = "PineCube IP Camera"; 13 - compatible = "pine64,pinecube", "allwinner,sun8i-s3"; 13 + compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3"; 14 14 15 15 aliases { 16 16 serial0 = &uart2;
+1 -1
arch/arm/boot/dts/sun8i-v3s.dtsi
··· 539 539 gic: interrupt-controller@1c81000 { 540 540 compatible = "arm,gic-400"; 541 541 reg = <0x01c81000 0x1000>, 542 - <0x01c82000 0x1000>, 542 + <0x01c82000 0x2000>, 543 543 <0x01c84000 0x2000>, 544 544 <0x01c86000 0x2000>; 545 545 interrupt-controller;
+6 -6
arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
··· 120 120 pinctrl-names = "default"; 121 121 pinctrl-0 = <&gmac_rgmii_pins>; 122 122 phy-handle = <&phy1>; 123 - phy-mode = "rgmii"; 123 + phy-mode = "rgmii-id"; 124 124 phy-supply = <&reg_dc1sw>; 125 125 status = "okay"; 126 126 }; ··· 198 198 }; 199 199 200 200 &reg_dc1sw { 201 - regulator-min-microvolt = <3000000>; 202 - regulator-max-microvolt = <3000000>; 201 + regulator-min-microvolt = <3300000>; 202 + regulator-max-microvolt = <3300000>; 203 203 regulator-name = "vcc-gmac-phy"; 204 204 }; 205 205 206 206 &reg_dcdc1 { 207 207 regulator-always-on; 208 - regulator-min-microvolt = <3000000>; 209 - regulator-max-microvolt = <3000000>; 210 - regulator-name = "vcc-3v0"; 208 + regulator-min-microvolt = <3300000>; 209 + regulator-max-microvolt = <3300000>; 210 + regulator-name = "vcc-3v3"; 211 211 }; 212 212 213 213 &reg_dcdc2 {
-1
arch/arm/configs/omap2plus_defconfig
··· 81 81 CONFIG_BINFMT_MISC=y 82 82 CONFIG_CMA=y 83 83 CONFIG_ZSMALLOC=m 84 - CONFIG_ZSMALLOC_PGTABLE_MAPPING=y 85 84 CONFIG_NET=y 86 85 CONFIG_PACKET=y 87 86 CONFIG_UNIX=y
+1 -1
arch/arm/mach-imx/anatop.c
··· 136 136 137 137 src_np = of_find_compatible_node(NULL, NULL, 138 138 "fsl,imx6ul-src"); 139 - src_base = of_iomap(np, 0); 139 + src_base = of_iomap(src_np, 0); 140 140 of_node_put(src_np); 141 141 WARN_ON(!src_base); 142 142 sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
-3
arch/arm/mach-keystone/memory.h
··· 6 6 #ifndef __MEMORY_H 7 7 #define __MEMORY_H 8 8 9 - #define MAX_PHYSMEM_BITS 36 10 - #define SECTION_SIZE_BITS 34 11 - 12 9 #define KEYSTONE_LOW_PHYS_START 0x80000000ULL 13 10 #define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */ 14 11 #define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \
+1 -1
arch/arm/mach-omap1/board-osk.c
··· 288 288 .dev_id = "ohci", 289 289 .table = { 290 290 /* Power GPIO on the I2C-attached TPS65010 */ 291 - GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH), 291 + GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH), 292 292 GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent", 293 293 GPIO_ACTIVE_HIGH), 294 294 },
+1
arch/arm/mach-sunxi/sunxi.c
··· 66 66 "allwinner,sun8i-h2-plus", 67 67 "allwinner,sun8i-h3", 68 68 "allwinner,sun8i-r40", 69 + "allwinner,sun8i-v3", 69 70 "allwinner,sun8i-v3s", 70 71 NULL, 71 72 };
+1
arch/arm64/Kconfig
··· 81 81 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 82 82 select ARCH_WANT_FRAME_POINTERS 83 83 select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) 84 + select ARCH_WANT_LD_ORPHAN_WARN 84 85 select ARCH_HAS_UBSAN_SANITIZE_ALL 85 86 select ARM_AMBA 86 87 select ARM_ARCH_TIMER
-4
arch/arm64/Makefile
··· 28 28 endif 29 29 endif 30 30 31 - # We never want expected sections to be placed heuristically by the 32 - # linker. All sections should be explicitly named in the linker script. 33 - LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) 34 - 35 31 ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) 36 32 ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) 37 33 $(warning LSE atomics not supported by binutils)
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
··· 79 79 &emac { 80 80 pinctrl-names = "default"; 81 81 pinctrl-0 = <&rgmii_pins>; 82 - phy-mode = "rgmii"; 82 + phy-mode = "rgmii-id"; 83 83 phy-handle = <&ext_rgmii_phy>; 84 84 phy-supply = <&reg_dc1sw>; 85 85 status = "okay";
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
··· 96 96 pinctrl-0 = <&emac_rgmii_pins>; 97 97 phy-supply = <&reg_gmac_3v3>; 98 98 phy-handle = <&ext_rgmii_phy>; 99 - phy-mode = "rgmii"; 99 + phy-mode = "rgmii-id"; 100 100 status = "okay"; 101 101 }; 102 102
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
··· 27 27 &emac { 28 28 pinctrl-names = "default"; 29 29 pinctrl-0 = <&ext_rgmii_pins>; 30 - phy-mode = "rgmii"; 30 + phy-mode = "rgmii-id"; 31 31 phy-handle = <&ext_rgmii_phy>; 32 32 phy-supply = <&reg_gmac_3v3>; 33 33 allwinner,rx-delay-ps = <200>;
+1
arch/powerpc/Kconfig
··· 152 152 select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS 153 153 select ARCH_WANT_IPC_PARSE_VERSION 154 154 select ARCH_WANT_IRQS_OFF_ACTIVATE_MM 155 + select ARCH_WANT_LD_ORPHAN_WARN 155 156 select ARCH_WEAK_RELEASE_ACQUIRE 156 157 select BINFMT_ELF 157 158 select BUILDTIME_TABLE_SORT
-1
arch/powerpc/Makefile
··· 123 123 LDFLAGS_vmlinux-y := -Bstatic 124 124 LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie 125 125 LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) 126 - LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn) 127 126 128 127 ifdef CONFIG_PPC64 129 128 ifeq ($(call cc-option-yn,-mcmodel=medium),y)
+12
arch/powerpc/include/asm/book3s/64/mmu.h
··· 242 242 static inline void radix_init_pseries(void) { }; 243 243 #endif 244 244 245 + #ifdef CONFIG_HOTPLUG_CPU 246 + #define arch_clear_mm_cpumask_cpu(cpu, mm) \ 247 + do { \ 248 + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \ 249 + atomic_dec(&(mm)->context.active_cpus); \ 250 + cpumask_clear_cpu(cpu, mm_cpumask(mm)); \ 251 + } \ 252 + } while (0) 253 + 254 + void cleanup_cpu_mmu_context(void); 255 + #endif 256 + 245 257 static inline int get_user_context(mm_context_t *ctx, unsigned long ea) 246 258 { 247 259 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
+2 -5
arch/powerpc/kvm/book3s_xive.c
··· 1214 1214 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) 1215 1215 { 1216 1216 /* We have a block of xive->nr_servers VPs. We just need to check 1217 - * raw vCPU ids are below the expected limit for this guest's 1218 - * core stride ; kvmppc_pack_vcpu_id() will pack them down to an 1219 - * index that can be safely used to compute a VP id that belongs 1220 - * to the VP block. 1217 + * packed vCPU ids are below that. 1221 1218 */ 1222 - return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode; 1219 + return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; 1223 1220 } 1224 1221 1225 1222 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
+1 -1
arch/powerpc/mm/Makefile
··· 5 5 6 6 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) 7 7 8 - obj-y := fault.o mem.o pgtable.o mmap.o \ 8 + obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \ 9 9 init_$(BITS).o pgtable_$(BITS).o \ 10 10 pgtable-frag.o ioremap.o ioremap_$(BITS).o \ 11 11 init-common.o mmu_context.o drmem.o
+15 -8
arch/powerpc/mm/book3s64/hash_native.c
··· 68 68 rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); 69 69 70 70 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) 71 - : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) 71 + : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r) 72 72 : "memory"); 73 73 } 74 74 ··· 92 92 asm volatile("ptesync": : :"memory"); 93 93 94 94 /* 95 - * Flush the first set of the TLB, and any caching of partition table 96 - * entries. Then flush the remaining sets of the TLB. Hash mode uses 97 - * partition scoped TLB translations. 95 + * Flush the partition table cache if this is HV mode. 98 96 */ 99 - tlbiel_hash_set_isa300(0, is, 0, 2, 0); 100 - for (set = 1; set < num_sets; set++) 101 - tlbiel_hash_set_isa300(set, is, 0, 0, 0); 97 + if (early_cpu_has_feature(CPU_FTR_HVMODE)) 98 + tlbiel_hash_set_isa300(0, is, 0, 2, 0); 102 99 103 100 /* 104 - * Now invalidate the process table cache. 101 + * Now invalidate the process table cache. UPRT=0 HPT modes (what 102 + * current hardware implements) do not use the process table, but 103 + * add the flushes anyway. 105 104 * 106 105 * From ISA v3.0B p. 1078: 107 106 * The following forms are invalid. ··· 108 109 * HPT caching is of the Process Table.) 109 110 */ 110 111 tlbiel_hash_set_isa300(0, is, 0, 2, 1); 112 + 113 + /* 114 + * Then flush the sets of the TLB proper. Hash mode uses 115 + * partition scoped TLB translations, which may be flushed 116 + * in !HV mode. 117 + */ 118 + for (set = 0; set < num_sets; set++) 119 + tlbiel_hash_set_isa300(set, is, 0, 0, 0); 111 120 112 121 ppc_after_tlbiel_barrier(); 113 122
+20
arch/powerpc/mm/book3s64/mmu_context.c
··· 17 17 #include <linux/export.h> 18 18 #include <linux/gfp.h> 19 19 #include <linux/slab.h> 20 + #include <linux/cpu.h> 20 21 21 22 #include <asm/mmu_context.h> 22 23 #include <asm/pgalloc.h> ··· 306 305 { 307 306 mtspr(SPRN_PID, next->context.id); 308 307 isync(); 308 + } 309 + #endif 310 + 311 + /** 312 + * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined) 313 + * 314 + * This clears the CPU from mm_cpumask for all processes, and then flushes the 315 + * local TLB to ensure TLB coherency in case the CPU is onlined again. 316 + * 317 + * KVM guest translations are not necessarily flushed here. If KVM started 318 + * using mm_cpumask or the Linux APIs which do, this would have to be resolved. 319 + */ 320 + #ifdef CONFIG_HOTPLUG_CPU 321 + void cleanup_cpu_mmu_context(void) 322 + { 323 + int cpu = smp_processor_id(); 324 + 325 + clear_tasks_mm_cpumask(cpu); 326 + tlbiel_all(); 309 327 } 310 328 #endif
+9
arch/powerpc/mm/maccess.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/uaccess.h> 4 + #include <linux/kernel.h> 5 + 6 + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) 7 + { 8 + return is_kernel_addr((unsigned long)unsafe_src); 9 + }
+1 -2
arch/powerpc/mm/numa.c
··· 742 742 of_node_put(cpu); 743 743 } 744 744 745 - if (likely(nid > 0)) 746 - node_set_online(nid); 745 + node_set_online(nid); 747 746 } 748 747 749 748 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
+2
arch/powerpc/platforms/powermac/smp.c
··· 911 911 912 912 mpic_cpu_set_priority(0xf); 913 913 914 + cleanup_cpu_mmu_context(); 915 + 914 916 return 0; 915 917 } 916 918
+7 -2
arch/powerpc/platforms/powernv/setup.c
··· 211 211 add_preferred_console("hvc", 0, NULL); 212 212 213 213 if (!radix_enabled()) { 214 + size_t size = sizeof(struct slb_entry) * mmu_slb_size; 214 215 int i; 215 216 216 217 /* Allocate per cpu area to save old slb contents during MCE */ 217 - for_each_possible_cpu(i) 218 - paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i)); 218 + for_each_possible_cpu(i) { 219 + paca_ptrs[i]->mce_faulty_slbs = 220 + memblock_alloc_node(size, 221 + __alignof__(struct slb_entry), 222 + cpu_to_node(i)); 223 + } 219 224 } 220 225 } 221 226
+3
arch/powerpc/platforms/powernv/smp.c
··· 143 143 xive_smp_disable_cpu(); 144 144 else 145 145 xics_migrate_irqs_away(); 146 + 147 + cleanup_cpu_mmu_context(); 148 + 146 149 return 0; 147 150 } 148 151
+3
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 90 90 xive_smp_disable_cpu(); 91 91 else 92 92 xics_migrate_irqs_away(); 93 + 94 + cleanup_cpu_mmu_context(); 95 + 93 96 return 0; 94 97 } 95 98
+2 -1
arch/powerpc/platforms/pseries/msi.c
··· 458 458 return hwirq; 459 459 } 460 460 461 - virq = irq_create_mapping(NULL, hwirq); 461 + virq = irq_create_mapping_affinity(NULL, hwirq, 462 + entry->affinity); 462 463 463 464 if (!virq) { 464 465 pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
+1 -1
arch/sparc/lib/csum_copy.S
··· 71 71 FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ 72 72 LOAD(prefetch, %o0 + 0x000, #n_reads) 73 73 xor %o0, %o1, %g1 74 - mov 1, %o3 74 + mov -1, %o3 75 75 clr %o4 76 76 andcc %g1, 0x3, %g0 77 77 bne,pn %icc, 95f
+1
arch/x86/Kconfig
··· 100 100 select ARCH_WANT_DEFAULT_BPF_JIT if X86_64 101 101 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 102 102 select ARCH_WANT_HUGE_PMD_SHARE 103 + select ARCH_WANT_LD_ORPHAN_WARN 103 104 select ARCH_WANTS_THP_SWAP if X86_64 104 105 select BUILDTIME_TABLE_SORT 105 106 select CLKEVT_I8253
-3
arch/x86/Makefile
··· 209 209 LDFLAGS_vmlinux += -z max-page-size=0x200000 210 210 endif 211 211 212 - # We never want expected sections to be placed heuristically by the 213 - # linker. All sections should be explicitly named in the linker script. 214 - LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) 215 212 216 213 archscripts: scripts_basic 217 214 $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+3 -1
arch/x86/boot/compressed/Makefile
··· 61 61 # Compressed kernel should be built as PIE since it may be loaded at any 62 62 # address by the bootloader. 63 63 LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) 64 - LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) 64 + ifdef CONFIG_LD_ORPHAN_WARN 65 + LDFLAGS_vmlinux += --orphan-handling=warn 66 + endif 65 67 LDFLAGS_vmlinux += -T 66 68 67 69 hostprogs := mkpiggy
+2 -3
arch/x86/boot/compressed/sev-es.c
··· 32 32 */ 33 33 static bool insn_has_rep_prefix(struct insn *insn) 34 34 { 35 + insn_byte_t p; 35 36 int i; 36 37 37 38 insn_get_prefixes(insn); 38 39 39 - for (i = 0; i < insn->prefixes.nbytes; i++) { 40 - insn_byte_t p = insn->prefixes.bytes[i]; 41 - 40 + for_each_insn_prefix(insn, i, p) { 42 41 if (p == 0xf2 || p == 0xf3) 43 42 return true; 44 43 }
+2 -2
arch/x86/events/intel/ds.c
··· 1916 1916 * that caused the PEBS record. It's called collision. 1917 1917 * If collision happened, the record will be dropped. 1918 1918 */ 1919 - if (p->status != (1ULL << bit)) { 1919 + if (pebs_status != (1ULL << bit)) { 1920 1920 for_each_set_bit(i, (unsigned long *)&pebs_status, size) 1921 1921 error[i]++; 1922 1922 continue; ··· 1940 1940 if (error[bit]) { 1941 1941 perf_log_lost_samples(event, error[bit]); 1942 1942 1943 - if (perf_event_account_interrupt(event)) 1943 + if (iregs && perf_event_account_interrupt(event)) 1944 1944 x86_pmu_stop(event, 0); 1945 1945 } 1946 1946
+15
arch/x86/include/asm/insn.h
··· 201 201 return insn_offset_displacement(insn) + insn->displacement.nbytes; 202 202 } 203 203 204 + /** 205 + * for_each_insn_prefix() -- Iterate prefixes in the instruction 206 + * @insn: Pointer to struct insn. 207 + * @idx: Index storage. 208 + * @prefix: Prefix byte. 209 + * 210 + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix 211 + * and the index is stored in @idx (note that this @idx is just for a cursor, 212 + * do not change it.) 213 + * Since prefixes.nbytes can be bigger than 4 if some prefixes 214 + * are repeated, it cannot be used for looping over the prefixes. 215 + */ 216 + #define for_each_insn_prefix(insn, idx, prefix) \ 217 + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) 218 + 204 219 #define POP_SS_OPCODE 0x1f 205 220 #define MOV_SREG_OPCODE 0x8e 206 221
+1 -1
arch/x86/kernel/apic/x2apic_uv_x.c
··· 161 161 /* UV4/4A only have a revision difference */ 162 162 case UV4_HUB_PART_NUMBER: 163 163 uv_min_hub_revision_id = node_id.s.revision 164 - + UV4_HUB_REVISION_BASE; 164 + + UV4_HUB_REVISION_BASE - 1; 165 165 uv_hub_type_set(UV4); 166 166 if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE) 167 167 uv_hub_type_set(UV4|UV4A);
+4
arch/x86/kernel/cpu/resctrl/core.c
··· 570 570 571 571 if (d) { 572 572 cpumask_set_cpu(cpu, &d->cpu_mask); 573 + if (r->cache.arch_has_per_cpu_cfg) 574 + rdt_domain_reconfigure_cdp(r); 573 575 return; 574 576 } 575 577 ··· 925 923 r->rid == RDT_RESOURCE_L2CODE) { 926 924 r->cache.arch_has_sparse_bitmaps = false; 927 925 r->cache.arch_has_empty_bitmaps = false; 926 + r->cache.arch_has_per_cpu_cfg = false; 928 927 } else if (r->rid == RDT_RESOURCE_MBA) { 929 928 r->msr_base = MSR_IA32_MBA_THRTL_BASE; 930 929 r->msr_update = mba_wrmsr_intel; ··· 946 943 r->rid == RDT_RESOURCE_L2CODE) { 947 944 r->cache.arch_has_sparse_bitmaps = true; 948 945 r->cache.arch_has_empty_bitmaps = true; 946 + r->cache.arch_has_per_cpu_cfg = true; 949 947 } else if (r->rid == RDT_RESOURCE_MBA) { 950 948 r->msr_base = MSR_IA32_MBA_BW_BASE; 951 949 r->msr_update = mba_wrmsr_amd;
+3
arch/x86/kernel/cpu/resctrl/internal.h
··· 360 360 * executing entities 361 361 * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. 362 362 * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. 363 + * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache 364 + * level has CPU scope. 363 365 */ 364 366 struct rdt_cache { 365 367 unsigned int cbm_len; ··· 371 369 unsigned int shareable_bits; 372 370 bool arch_has_sparse_bitmaps; 373 371 bool arch_has_empty_bitmaps; 372 + bool arch_has_per_cpu_cfg; 374 373 }; 375 374 376 375 /**
+7 -2
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1909 1909 1910 1910 r_l = &rdt_resources_all[level]; 1911 1911 list_for_each_entry(d, &r_l->domains, list) { 1912 - /* Pick one CPU from each domain instance to update MSR */ 1913 - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1912 + if (r_l->cache.arch_has_per_cpu_cfg) 1913 + /* Pick all the CPUs in the domain instance */ 1914 + for_each_cpu(cpu, &d->cpu_mask) 1915 + cpumask_set_cpu(cpu, cpu_mask); 1916 + else 1917 + /* Pick one CPU from each domain instance to update MSR */ 1918 + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1914 1919 } 1915 1920 cpu = get_cpu(); 1916 1921 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+6 -4
arch/x86/kernel/uprobes.c
··· 255 255 256 256 static bool is_prefix_bad(struct insn *insn) 257 257 { 258 + insn_byte_t p; 258 259 int i; 259 260 260 - for (i = 0; i < insn->prefixes.nbytes; i++) { 261 + for_each_insn_prefix(insn, i, p) { 261 262 insn_attr_t attr; 262 263 263 - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); 264 + attr = inat_get_opcode_attribute(p); 264 265 switch (attr) { 265 266 case INAT_MAKE_PREFIX(INAT_PFX_ES): 266 267 case INAT_MAKE_PREFIX(INAT_PFX_CS): ··· 716 715 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) 717 716 { 718 717 u8 opc1 = OPCODE1(insn); 718 + insn_byte_t p; 719 719 int i; 720 720 721 721 switch (opc1) { ··· 748 746 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. 749 747 * No one uses these insns, reject any branch insns with such prefix. 750 748 */ 751 - for (i = 0; i < insn->prefixes.nbytes; i++) { 752 - if (insn->prefixes.bytes[i] == 0x66) 749 + for_each_insn_prefix(insn, i, p) { 750 + if (p == 0x66) 753 751 return -ENOTSUPP; 754 752 } 755 753
+5 -5
arch/x86/lib/insn-eval.c
··· 63 63 */ 64 64 bool insn_has_rep_prefix(struct insn *insn) 65 65 { 66 + insn_byte_t p; 66 67 int i; 67 68 68 69 insn_get_prefixes(insn); 69 70 70 - for (i = 0; i < insn->prefixes.nbytes; i++) { 71 - insn_byte_t p = insn->prefixes.bytes[i]; 72 - 71 + for_each_insn_prefix(insn, i, p) { 73 72 if (p == 0xf2 || p == 0xf3) 74 73 return true; 75 74 } ··· 94 95 { 95 96 int idx = INAT_SEG_REG_DEFAULT; 96 97 int num_overrides = 0, i; 98 + insn_byte_t p; 97 99 98 100 insn_get_prefixes(insn); 99 101 100 102 /* Look for any segment override prefixes. */ 101 - for (i = 0; i < insn->prefixes.nbytes; i++) { 103 + for_each_insn_prefix(insn, i, p) { 102 104 insn_attr_t attr; 103 105 104 - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); 106 + attr = inat_get_opcode_attribute(p); 105 107 switch (attr) { 106 108 case INAT_MAKE_PREFIX(INAT_PFX_CS): 107 109 idx = INAT_SEG_REG_CS;
+1 -1
block/blk-merge.c
··· 144 144 static inline unsigned get_max_io_size(struct request_queue *q, 145 145 struct bio *bio) 146 146 { 147 - unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); 147 + unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); 148 148 unsigned max_sectors = sectors; 149 149 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; 150 150 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
+4 -1
block/blk-settings.c
··· 547 547 548 548 t->io_min = max(t->io_min, b->io_min); 549 549 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 550 - t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors); 550 + 551 + /* Set non-power-of-2 compatible chunk_sectors boundary */ 552 + if (b->chunk_sectors) 553 + t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 551 554 552 555 /* Physical block size a multiple of the logical block size? */ 553 556 if (t->physical_block_size & (t->logical_block_size - 1)) {
+23 -14
drivers/accessibility/speakup/spk_ttyio.c
··· 47 47 { 48 48 struct spk_ldisc_data *ldisc_data; 49 49 50 + if (tty != speakup_tty) 51 + /* Somebody tried to use this line discipline outside speakup */ 52 + return -ENODEV; 53 + 50 54 if (!tty->ops->write) 51 55 return -EOPNOTSUPP; 52 56 53 - mutex_lock(&speakup_tty_mutex); 54 - if (speakup_tty) { 55 - mutex_unlock(&speakup_tty_mutex); 56 - return -EBUSY; 57 - } 58 - speakup_tty = tty; 59 - 60 57 ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL); 61 - if (!ldisc_data) { 62 - speakup_tty = NULL; 63 - mutex_unlock(&speakup_tty_mutex); 58 + if (!ldisc_data) 64 59 return -ENOMEM; 65 - } 66 60 67 61 init_completion(&ldisc_data->completion); 68 62 ldisc_data->buf_free = true; 69 - speakup_tty->disc_data = ldisc_data; 70 - mutex_unlock(&speakup_tty_mutex); 63 + tty->disc_data = ldisc_data; 71 64 72 65 return 0; 73 66 } ··· 184 191 185 192 tty_unlock(tty); 186 193 194 + mutex_lock(&speakup_tty_mutex); 195 + speakup_tty = tty; 187 196 ret = tty_set_ldisc(tty, N_SPEAKUP); 188 197 if (ret) 189 - pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); 198 + speakup_tty = NULL; 199 + mutex_unlock(&speakup_tty_mutex); 200 + 201 + if (!ret) 202 + /* Success */ 203 + return 0; 204 + 205 + pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); 206 + 207 + tty_lock(tty); 208 + if (tty->ops->close) 209 + tty->ops->close(tty, NULL); 210 + tty_unlock(tty); 211 + 212 + tty_kclose(tty); 190 213 191 214 return ret; 192 215 }
+2 -2
drivers/clk/imx/Kconfig
··· 5 5 depends on ARCH_MXC || COMPILE_TEST 6 6 7 7 config MXC_CLK_SCU 8 - tristate "IMX SCU clock" 9 - depends on ARCH_MXC || COMPILE_TEST 8 + tristate 9 + depends on ARCH_MXC 10 10 depends on IMX_SCU && HAVE_ARM_SMCCC 11 11 12 12 config CLK_IMX1
+1 -1
drivers/clk/renesas/r9a06g032-clocks.c
··· 55 55 u16 sel, g1, r1, g2, r2; 56 56 } dual; 57 57 }; 58 - } __packed; 58 + }; 59 59 60 60 #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \ 61 61 { .gate = _clk, .reset = _rst, \
+1 -1
drivers/firmware/xilinx/zynqmp.c
··· 29 29 #define PM_API_FEATURE_CHECK_MAX_ORDER 7 30 30 31 31 static bool feature_check_enabled; 32 - DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); 32 + static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); 33 33 34 34 /** 35 35 * struct pm_api_feature_data - PM API Feature data
+1
drivers/fpga/Kconfig
··· 142 142 tristate "FPGA Device Feature List (DFL) support" 143 143 select FPGA_BRIDGE 144 144 select FPGA_REGION 145 + depends on HAS_IOMEM 145 146 help 146 147 Device Feature List (DFL) defines a feature list structure that 147 148 creates a linked list of feature headers within the MMIO space
+1
drivers/gpio/gpio-arizona.c
··· 192 192 ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip, 193 193 arizona_gpio); 194 194 if (ret < 0) { 195 + pm_runtime_disable(&pdev->dev); 195 196 dev_err(&pdev->dev, "Could not register gpiochip, %d\n", 196 197 ret); 197 198 return ret;
+2
drivers/gpio/gpio-dwapb.c
··· 724 724 return err; 725 725 } 726 726 727 + platform_set_drvdata(pdev, gpio); 728 + 727 729 return 0; 728 730 } 729 731
+1 -1
drivers/gpio/gpio-eic-sprd.c
··· 598 598 */ 599 599 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 600 600 if (!res) 601 - continue; 601 + break; 602 602 603 603 sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res); 604 604 if (IS_ERR(sprd_eic->base[i]))
+11 -5
drivers/gpio/gpio-mvebu.c
··· 1197 1197 1198 1198 devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); 1199 1199 1200 + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ 1201 + if (IS_ENABLED(CONFIG_PWM)) { 1202 + err = mvebu_pwm_probe(pdev, mvchip, id); 1203 + if (err) 1204 + return err; 1205 + } 1206 + 1200 1207 /* Some gpio controllers do not provide irq support */ 1201 1208 if (!have_irqs) 1202 1209 return 0; ··· 1213 1206 if (!mvchip->domain) { 1214 1207 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", 1215 1208 mvchip->chip.label); 1216 - return -ENODEV; 1209 + err = -ENODEV; 1210 + goto err_pwm; 1217 1211 } 1218 1212 1219 1213 err = irq_alloc_domain_generic_chips( ··· 1262 1254 mvchip); 1263 1255 } 1264 1256 1265 - /* Some MVEBU SoCs have simple PWM support for GPIO lines */ 1266 - if (IS_ENABLED(CONFIG_PWM)) 1267 - return mvebu_pwm_probe(pdev, mvchip, id); 1268 - 1269 1257 return 0; 1270 1258 1271 1259 err_domain: 1272 1260 irq_domain_remove(mvchip->domain); 1261 + err_pwm: 1262 + pwmchip_remove(&mvchip->mvpwm->chip); 1273 1263 1274 1264 return err; 1275 1265 }
+2 -2
drivers/gpio/gpio-zynq.c
··· 574 574 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 575 575 int ret; 576 576 577 - ret = pm_runtime_get_sync(chip->parent); 577 + ret = pm_runtime_resume_and_get(chip->parent); 578 578 if (ret < 0) 579 579 return ret; 580 580 ··· 942 942 943 943 pm_runtime_set_active(&pdev->dev); 944 944 pm_runtime_enable(&pdev->dev); 945 - ret = pm_runtime_get_sync(&pdev->dev); 945 + ret = pm_runtime_resume_and_get(&pdev->dev); 946 946 if (ret < 0) 947 947 goto err_pm_dis; 948 948
+5
drivers/gpio/gpiolib.c
··· 1806 1806 */ 1807 1807 void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset) 1808 1808 { 1809 + #ifdef CONFIG_PINCTRL 1810 + if (list_empty(&gc->gpiodev->pin_ranges)) 1811 + return; 1812 + #endif 1813 + 1809 1814 pinctrl_gpio_free(gc->gpiodev->base + offset); 1810 1815 } 1811 1816 EXPORT_SYMBOL_GPL(gpiochip_generic_free);
+6 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 459 459 struct amdgpu_device *adev = drm_to_adev(dev); 460 460 struct amdgpu_bo *bo; 461 461 struct amdgpu_bo_param bp; 462 + struct drm_gem_object *gobj; 462 463 int ret; 463 464 464 465 memset(&bp, 0, sizeof(bp)); ··· 470 469 bp.type = ttm_bo_type_sg; 471 470 bp.resv = resv; 472 471 dma_resv_lock(resv, NULL); 473 - ret = amdgpu_bo_create(adev, &bp, &bo); 472 + ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, 473 + AMDGPU_GEM_DOMAIN_CPU, 474 + 0, ttm_bo_type_sg, resv, &gobj); 474 475 if (ret) 475 476 goto error; 476 477 478 + bo = gem_to_amdgpu_bo(gobj); 477 479 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 478 480 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 479 481 if (dma_buf->ops != &amdgpu_dmabuf_ops) 480 482 bo->prime_shared_count = 1; 481 483 482 484 dma_resv_unlock(resv); 483 - return &bo->tbo.base; 485 + return gobj; 484 486 485 487 error: 486 488 dma_resv_unlock(resv);
+23 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 66 66 bp.type = type; 67 67 bp.resv = resv; 68 68 bp.preferred_domain = initial_domain; 69 - retry: 70 69 bp.flags = flags; 71 70 bp.domain = initial_domain; 72 71 r = amdgpu_bo_create(adev, &bp, &bo); 73 - if (r) { 74 - if (r != -ERESTARTSYS) { 75 - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 76 - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 77 - goto retry; 78 - } 79 - 80 - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 81 - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 82 - goto retry; 83 - } 84 - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 85 - size, initial_domain, alignment, r); 86 - } 72 + if (r) 87 73 return r; 88 - } 74 + 89 75 *obj = &bo->tbo.base; 90 76 91 77 return 0; ··· 211 225 uint64_t size = args->in.bo_size; 212 226 struct dma_resv *resv = NULL; 213 227 struct drm_gem_object *gobj; 214 - uint32_t handle; 228 + uint32_t handle, initial_domain; 215 229 int r; 216 230 217 231 /* reject invalid gem flags */ ··· 255 269 resv = vm->root.base.bo->tbo.base.resv; 256 270 } 257 271 272 + retry: 273 + initial_domain = (u32)(0xffffffff & args->in.domains); 258 274 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 259 - (u32)(0xffffffff & args->in.domains), 275 + initial_domain, 260 276 flags, ttm_bo_type_device, resv, &gobj); 277 + if (r) { 278 + if (r != -ERESTARTSYS) { 279 + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 280 + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 281 + goto retry; 282 + } 283 + 284 + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 285 + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 286 + goto retry; 287 + } 288 + DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", 289 + size, initial_domain, args->in.alignment, r); 290 + } 291 + return r; 292 + } 293 + 261 294 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 262 295 if (!r) { 263 296 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 499 499 else 500 500 size = amdgpu_gmc_get_vbios_fb_size(adev); 501 501 502 + if (adev->mman.keep_stolen_vga_memory) 503 + size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 504 + 502 505 /* set to 0 if the pre-OS buffer uses up most of vram */ 503 506 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 504 507 size = 0;
+5 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 1172 1172 con->dir, &con->disable_ras_err_cnt_harvest); 1173 1173 } 1174 1174 1175 - void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1175 + static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1176 1176 struct ras_fs_if *head) 1177 1177 { 1178 1178 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); ··· 1194 1194 1195 1195 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1196 1196 { 1197 - #if defined(CONFIG_DEBUG_FS) 1198 1197 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1199 1198 struct ras_manager *obj; 1200 1199 struct ras_fs_if fs_info; ··· 1202 1203 * it won't be called in resume path, no need to check 1203 1204 * suspend and gpu reset status 1204 1205 */ 1205 - if (!con) 1206 + if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1206 1207 return; 1207 1208 1208 1209 amdgpu_ras_debugfs_create_ctrl_node(adev); ··· 1216 1217 amdgpu_ras_debugfs_create(adev, &fs_info); 1217 1218 } 1218 1219 } 1219 - #endif 1220 1220 } 1221 1221 1222 - void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, 1222 + static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, 1223 1223 struct ras_common_if *head) 1224 1224 { 1225 1225 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); ··· 1232 1234 1233 1235 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) 1234 1236 { 1235 - #if defined(CONFIG_DEBUG_FS) 1236 1237 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1237 1238 struct ras_manager *obj, *tmp; 1238 1239 ··· 1240 1243 } 1241 1244 1242 1245 con->dir = NULL; 1243 - #endif 1244 1246 } 1245 1247 /* debugfs end */ 1246 1248 ··· 1287 1291 1288 1292 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 1289 1293 { 1290 - amdgpu_ras_debugfs_remove_all(adev); 1294 + if (IS_ENABLED(CONFIG_DEBUG_FS)) 1295 + amdgpu_ras_debugfs_remove_all(adev); 1291 1296 amdgpu_ras_sysfs_remove_all(adev); 1292 1297 return 0; 1293 1298 }
-6
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 607 607 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 608 608 struct ras_common_if *head); 609 609 610 - void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 611 - struct ras_fs_if *head); 612 - 613 610 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); 614 - 615 - void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, 616 - struct ras_common_if *head); 617 611 618 612 int amdgpu_ras_error_query(struct amdgpu_device *adev, 619 613 struct ras_query_if *info);
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 186 186 if (err) 187 187 goto out; 188 188 189 - err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]); 189 + err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]); 190 190 if (err) 191 191 goto out; 192 192 }
+19 -6
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 1011 1011 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1012 1012 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); 1013 1013 1014 + /* Stall DPG before WPTR/RPTR reset */ 1015 + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1016 + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 1017 + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1018 + 1014 1019 /* set the write pointer delay */ 1015 1020 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); 1016 1021 ··· 1037 1032 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); 1038 1033 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, 1039 1034 lower_32_bits(ring->wptr)); 1035 + 1036 + /* Unstall DPG */ 1037 + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1038 + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1040 1039 1041 1040 return 0; 1042 1041 } ··· 1565 1556 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1566 1557 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1567 1558 1559 + /* Stall DPG before WPTR/RPTR reset */ 1560 + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1561 + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 1562 + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1563 + 1568 1564 /* Restore */ 1569 1565 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 1566 + ring->wptr = 0; 1570 1567 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); 1571 1568 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1572 1569 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); ··· 1580 1565 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1581 1566 1582 1567 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; 1568 + ring->wptr = 0; 1583 1569 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1584 1570 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1585 1571 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); 1586 1572 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1587 1573 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1588 1574 1589 - WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, 1590 - RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1575 + /* Unstall DPG */ 1576 + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1577 + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1591 1578 1592 1579 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1593 1580 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ··· 1646 1629 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1647 1630 { 1648 1631 struct amdgpu_device *adev = ring->adev; 1649 - 1650 - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1651 - WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, 1652 - lower_32_bits(ring->wptr) | 0x80000000); 1653 1632 1654 1633 if (ring->use_doorbell) { 1655 1634 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 1736 1736 } 1737 1737 1738 1738 mutex_unlock(&p->mutex); 1739 + dma_buf_put(dmabuf); 1739 1740 1740 1741 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); 1741 1742 ··· 1746 1745 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); 1747 1746 err_unlock: 1748 1747 mutex_unlock(&p->mutex); 1748 + dma_buf_put(dmabuf); 1749 1749 return r; 1750 1750 } 1751 1751
+4 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1058 1058 goto error; 1059 1059 } 1060 1060 1061 - /* Update the actual used number of crtc */ 1062 - adev->mode_info.num_crtc = adev->dm.display_indexes_num; 1063 - 1064 1061 /* create fake encoders for MST */ 1065 1062 dm_dp_create_fake_mst_encoders(adev); 1066 1063 ··· 3248 3251 enum dc_connection_type new_connection_type = dc_connection_none; 3249 3252 const struct dc_plane_cap *plane; 3250 3253 3254 + dm->display_indexes_num = dm->dc->caps.max_streams; 3255 + /* Update the actual used number of crtc */ 3256 + adev->mode_info.num_crtc = adev->dm.display_indexes_num; 3257 + 3251 3258 link_cnt = dm->dc->caps.max_links; 3252 3259 if (amdgpu_dm_mode_config_init(dm->adev)) { 3253 3260 DRM_ERROR("DM: Failed to initialize mode config\n"); ··· 3312 3311 DRM_ERROR("KMS: Failed to initialize crtc\n"); 3313 3312 goto fail; 3314 3313 } 3315 - 3316 - dm->display_indexes_num = dm->dc->caps.max_streams; 3317 3314 3318 3315 /* loops over all connectors on the board */ 3319 3316 for (i = 0; i < link_cnt; i++) {
+100 -6
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 163 163 new_clocks->dppclk_khz = 100000; 164 164 } 165 165 166 - if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { 167 - if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) 166 + /* 167 + * Temporally ignore thew 0 cases for disp and dpp clks. 168 + * We may have a new feature that requires 0 clks in the future. 169 + */ 170 + if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) { 171 + new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz; 172 + new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz; 173 + } 174 + 175 + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) { 176 + if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz) 168 177 dpp_clock_lowered = true; 169 178 clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; 170 179 update_dppclk = true; ··· 579 570 580 571 }; 581 572 582 - static struct wm_table ddr4_wm_table = { 573 + static struct wm_table ddr4_wm_table_gs = { 583 574 .entries = { 584 575 { 585 576 .wm_inst = WM_A, ··· 616 607 } 617 608 }; 618 609 619 - static struct wm_table lpddr4_wm_table = { 610 + static struct wm_table lpddr4_wm_table_gs = { 620 611 .entries = { 621 612 { 622 613 .wm_inst = WM_A, ··· 661 652 .pstate_latency_us = 11.65333, 662 653 .sr_exit_time_us = 8.32, 663 654 .sr_enter_plus_exit_time_us = 9.38, 655 + .valid = true, 656 + }, 657 + { 658 + .wm_inst = WM_B, 659 + .wm_type = WM_TYPE_PSTATE_CHG, 660 + .pstate_latency_us = 11.65333, 661 + .sr_exit_time_us = 9.82, 662 + .sr_enter_plus_exit_time_us = 11.196, 663 + .valid = true, 664 + }, 665 + { 666 + .wm_inst = WM_C, 667 + .wm_type = WM_TYPE_PSTATE_CHG, 668 + .pstate_latency_us = 11.65333, 669 + .sr_exit_time_us = 9.89, 670 + .sr_enter_plus_exit_time_us = 11.24, 671 + .valid = true, 672 + }, 673 + { 674 + .wm_inst = WM_D, 675 + .wm_type = WM_TYPE_PSTATE_CHG, 676 + .pstate_latency_us = 11.65333, 677 + .sr_exit_time_us = 9.748, 678 + .sr_enter_plus_exit_time_us = 11.102, 679 + .valid = true, 680 + }, 681 + } 682 + }; 683 + 684 + static struct wm_table ddr4_wm_table_rn = { 685 + .entries = { 686 + { 687 + .wm_inst = WM_A, 688 + .wm_type = WM_TYPE_PSTATE_CHG, 689 + .pstate_latency_us = 11.72, 690 + .sr_exit_time_us = 9.09, 691 + .sr_enter_plus_exit_time_us = 10.14, 692 + .valid = true, 693 + }, 694 + { 695 + .wm_inst = WM_B, 696 + .wm_type = WM_TYPE_PSTATE_CHG, 697 + .pstate_latency_us = 11.72, 698 + .sr_exit_time_us = 10.12, 699 + .sr_enter_plus_exit_time_us = 11.48, 700 + .valid = true, 701 + }, 702 + { 703 + .wm_inst = WM_C, 704 + .wm_type = WM_TYPE_PSTATE_CHG, 705 + .pstate_latency_us = 11.72, 706 + .sr_exit_time_us = 10.12, 707 + .sr_enter_plus_exit_time_us = 11.48, 708 + .valid = true, 709 + }, 710 + { 711 + .wm_inst = WM_D, 712 + .wm_type = WM_TYPE_PSTATE_CHG, 713 + .pstate_latency_us = 11.72, 714 + .sr_exit_time_us = 10.12, 715 + .sr_enter_plus_exit_time_us = 11.48, 716 + .valid = true, 717 + }, 718 + } 719 + }; 720 + 721 + static struct wm_table lpddr4_wm_table_rn = { 722 + .entries = { 723 + { 724 + .wm_inst = WM_A, 725 + .wm_type = WM_TYPE_PSTATE_CHG, 726 + .pstate_latency_us = 11.65333, 727 + .sr_exit_time_us = 7.32, 728 + .sr_enter_plus_exit_time_us = 8.38, 664 729 .valid = true, 665 730 }, 666 731 { ··· 845 762 struct dc_debug_options *debug = &ctx->dc->debug; 846 763 struct dpm_clocks clock_table = { 0 }; 847 764 enum pp_smu_status status = 0; 765 + int is_green_sardine = 0; 766 + 767 + #if defined(CONFIG_DRM_AMD_DC_DCN) 768 + is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); 769 + #endif 848 770 849 771 clk_mgr->base.ctx = ctx; 850 772 clk_mgr->base.funcs = &dcn21_funcs; ··· 890 802 if (clk_mgr->periodic_retraining_disabled) { 891 803 rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; 892 804 } else { 893 - rn_bw_params.wm_table = lpddr4_wm_table; 805 + if (is_green_sardine) 806 + rn_bw_params.wm_table = lpddr4_wm_table_gs; 807 + else 808 + rn_bw_params.wm_table = lpddr4_wm_table_rn; 894 809 } 895 810 } else { 896 - rn_bw_params.wm_table = ddr4_wm_table; 811 + if (is_green_sardine) 812 + rn_bw_params.wm_table = ddr4_wm_table_gs; 813 + else 814 + rn_bw_params.wm_table = ddr4_wm_table_rn; 897 815 } 898 816 /* Saved clocks configured at boot for debug purposes */ 899 817 rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+5 -2
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 3394 3394 { 3395 3395 uint32_t bits_per_channel = 0; 3396 3396 uint32_t kbps; 3397 + struct fixed31_32 link_bw_kbps; 3397 3398 3398 3399 if (timing->flags.DSC) { 3399 - kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); 3400 - kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); 3400 + link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz); 3401 + link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); 3402 + link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel); 3403 + kbps = dc_fixpt_ceil(link_bw_kbps); 3401 3404 return kbps; 3402 3405 } 3403 3406
+6 -8
drivers/gpu/drm/amd/pm/inc/smu10.h
··· 136 136 #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) 137 137 138 138 /* Workload bits */ 139 - #define WORKLOAD_DEFAULT_BIT 0 140 - #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 141 - #define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 142 - #define WORKLOAD_PPLIB_VIDEO_BIT 3 143 - #define WORKLOAD_PPLIB_VR_BIT 4 144 - #define WORKLOAD_PPLIB_COMPUTE_BIT 5 145 - #define WORKLOAD_PPLIB_CUSTOM_BIT 6 146 - #define WORKLOAD_PPLIB_COUNT 7 139 + #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 140 + #define WORKLOAD_PPLIB_VIDEO_BIT 2 141 + #define WORKLOAD_PPLIB_VR_BIT 3 142 + #define WORKLOAD_PPLIB_COMPUTE_BIT 4 143 + #define WORKLOAD_PPLIB_CUSTOM_BIT 5 144 + #define WORKLOAD_PPLIB_COUNT 6 147 145 148 146 typedef struct { 149 147 /* MP1_EXT_SCRATCH0 */
+102 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
··· 24 24 #include <linux/types.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/slab.h> 27 + #include <linux/pci.h> 28 + 27 29 #include <drm/amdgpu_drm.h> 28 30 #include "processpptables.h" 29 31 #include <atom-types.h> ··· 986 984 struct pp_hwmgr *hwmgr, 987 985 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) 988 986 { 987 + struct amdgpu_device *adev = hwmgr->adev; 988 + 989 989 hwmgr->thermal_controller.ucType = 990 990 powerplay_table->sThermalController.ucType; 991 991 hwmgr->thermal_controller.ucI2cLine = ··· 1012 1008 ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, 1013 1009 PHM_PlatformCaps_ThermalController); 1014 1010 1015 - hwmgr->thermal_controller.use_hw_fan_control = 1; 1011 + if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { 1012 + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = 1013 + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; 1014 + 1015 + if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) { 1016 + hwmgr->thermal_controller.use_hw_fan_control = 1; 1017 + return 0; 1018 + } else { 1019 + const ATOM_PPLIB_FANTABLE *fan_table = 1020 + (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) + 1021 + le16_to_cpu(powerplay_table3->usFanTableOffset)); 1022 + 1023 + if (1 <= fan_table->ucFanTableFormat) { 1024 + hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst = 1025 + fan_table->ucTHyst; 1026 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin = 1027 + le16_to_cpu(fan_table->usTMin); 1028 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed = 1029 + le16_to_cpu(fan_table->usTMed); 1030 + hwmgr->thermal_controller.advanceFanControlParameters.usTHigh = 1031 + le16_to_cpu(fan_table->usTHigh); 1032 + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = 1033 + le16_to_cpu(fan_table->usPWMMin); 1034 + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed = 1035 + le16_to_cpu(fan_table->usPWMMed); 1036 + hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh = 1037 + le16_to_cpu(fan_table->usPWMHigh); 1038 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900; 1039 + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000; 1040 + 1041 + phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1042 + PHM_PlatformCaps_MicrocodeFanControl); 1043 + } 1044 + 1045 + if (2 <= fan_table->ucFanTableFormat) { 1046 + const ATOM_PPLIB_FANTABLE2 *fan_table2 = 1047 + (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) + 1048 + le16_to_cpu(powerplay_table3->usFanTableOffset)); 1049 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 1050 + le16_to_cpu(fan_table2->usTMax); 1051 + } 1052 + 1053 + if (3 <= fan_table->ucFanTableFormat) { 1054 + const ATOM_PPLIB_FANTABLE3 *fan_table3 = 1055 + (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) + 1056 + le16_to_cpu(powerplay_table3->usFanTableOffset)); 1057 + 1058 + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode = 1059 + fan_table3->ucFanControlMode; 1060 + 1061 + if ((3 == fan_table->ucFanTableFormat) && 1062 + (0x67B1 == adev->pdev->device)) 1063 + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM = 1064 + 47; 1065 + else 1066 + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM = 1067 + le16_to_cpu(fan_table3->usFanPWMMax); 1068 + 1069 + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity = 1070 + 4836; 1071 + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = 1072 + le16_to_cpu(fan_table3->usFanOutputSensitivity); 1073 + } 1074 + 1075 + if (6 <= fan_table->ucFanTableFormat) { 1076 + const ATOM_PPLIB_FANTABLE4 *fan_table4 = 1077 + (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) + 1078 + le16_to_cpu(powerplay_table3->usFanTableOffset)); 1079 + 1080 + phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1081 + PHM_PlatformCaps_FanSpeedInTableIsRPM); 1082 + 1083 + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM = 1084 + le16_to_cpu(fan_table4->usFanRPMMax); 1085 + } 1086 + 1087 + if (7 <= fan_table->ucFanTableFormat) { 1088 + const ATOM_PPLIB_FANTABLE5 *fan_table5 = 1089 + (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) + 1090 + le16_to_cpu(powerplay_table3->usFanTableOffset)); 1091 + 1092 + if (0x67A2 == adev->pdev->device || 1093 + 0x67A9 == adev->pdev->device || 1094 + 0x67B9 == adev->pdev->device) { 1095 + phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1096 + PHM_PlatformCaps_GeminiRegulatorFanControlSupport); 1097 + hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow = 1098 + le16_to_cpu(fan_table5->usFanCurrentLow); 1099 + hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh = 1100 + le16_to_cpu(fan_table5->usFanCurrentHigh); 1101 + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow = 1102 + le16_to_cpu(fan_table5->usFanRPMLow); 1103 + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh = 1104 + le16_to_cpu(fan_table5->usFanRPMHigh); 1105 + } 1106 + } 1107 + } 1108 + } 1016 1109 1017 1110 return 0; 1018 1111 }
+3 -6
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
··· 1297 1297 int pplib_workload = 0; 1298 1298 1299 1299 switch (power_profile) { 1300 - case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 1301 - pplib_workload = WORKLOAD_DEFAULT_BIT; 1302 - break; 1303 1300 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 1304 1301 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 1305 - break; 1306 - case PP_SMC_POWER_PROFILE_POWERSAVING: 1307 - pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 1308 1302 break; 1309 1303 case PP_SMC_POWER_PROFILE_VIDEO: 1310 1304 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; ··· 1308 1314 break; 1309 1315 case PP_SMC_POWER_PROFILE_COMPUTE: 1310 1316 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 1317 + break; 1318 + case PP_SMC_POWER_PROFILE_CUSTOM: 1319 + pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; 1311 1320 break; 1312 1321 } 1313 1322
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 217 217 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 218 218 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 219 219 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 220 - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), 220 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 221 221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 222 222 }; 223 223
+6 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 1164 1164 if (ret) 1165 1165 return ret; 1166 1166 1167 - crystal_clock_freq = amdgpu_asic_get_xclk(adev); 1167 + /* 1168 + * crystal_clock_freq div by 4 is required since the fan control 1169 + * module refers to 25MHz 1170 + */ 1171 + 1172 + crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4; 1168 1173 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1169 1174 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1170 1175 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+12 -12
drivers/gpu/drm/i915/display/intel_display.c
··· 18021 18021 if (!HAS_GMCH(i915)) 18022 18022 sanitize_watermarks(i915); 18023 18023 18024 - /* 18025 - * Force all active planes to recompute their states. So that on 18026 - * mode_setcrtc after probe, all the intel_plane_state variables 18027 - * are already calculated and there is no assert_plane warnings 18028 - * during bootup. 18029 - */ 18030 - ret = intel_initial_commit(dev); 18031 - if (ret) 18032 - drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); 18033 - 18034 18024 return 0; 18035 18025 } 18036 18026 ··· 18029 18039 { 18030 18040 int ret; 18031 18041 18032 - intel_overlay_setup(i915); 18033 - 18034 18042 if (!HAS_DISPLAY(i915)) 18035 18043 return 0; 18044 + 18045 + /* 18046 + * Force all active planes to recompute their states. So that on 18047 + * mode_setcrtc after probe, all the intel_plane_state variables 18048 + * are already calculated and there is no assert_plane warnings 18049 + * during bootup. 18050 + */ 18051 + ret = intel_initial_commit(&i915->drm); 18052 + if (ret) 18053 + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); 18054 + 18055 + intel_overlay_setup(i915); 18036 18056 18037 18057 ret = intel_fbdev_init(&i915->drm); 18038 18058 if (ret)
+1 -1
drivers/gpu/drm/i915/display/intel_dp.c
··· 573 573 return 0; 574 574 } 575 575 /* Also take into account max slice width */ 576 - min_slice_count = min_t(u8, min_slice_count, 576 + min_slice_count = max_t(u8, min_slice_count, 577 577 DIV_ROUND_UP(mode_hdisplay, 578 578 max_slice_width)); 579 579
+5 -2
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 3097 3097 break; 3098 3098 } 3099 3099 3100 - static void eb_request_add(struct i915_execbuffer *eb) 3100 + static int eb_request_add(struct i915_execbuffer *eb, int err) 3101 3101 { 3102 3102 struct i915_request *rq = eb->request; 3103 3103 struct intel_timeline * const tl = i915_request_timeline(rq); ··· 3118 3118 /* Serialise with context_close via the add_to_timeline */ 3119 3119 i915_request_set_error_once(rq, -ENOENT); 3120 3120 __i915_request_skip(rq); 3121 + err = -ENOENT; /* override any transient errors */ 3121 3122 } 3122 3123 3123 3124 __i915_request_queue(rq, &attr); ··· 3128 3127 retire_requests(tl, prev); 3129 3128 3130 3129 mutex_unlock(&tl->mutex); 3130 + 3131 + return err; 3131 3132 } 3132 3133 3133 3134 static const i915_user_extension_fn execbuf_extensions[] = { ··· 3335 3332 err = eb_submit(&eb, batch); 3336 3333 err_request: 3337 3334 i915_request_get(eb.request); 3338 - eb_request_add(&eb); 3335 + err = eb_request_add(&eb, err); 3339 3336 3340 3337 if (eb.fences) 3341 3338 signal_fence_array(&eb);
+74 -94
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
··· 101 101 intel_gt_pm_put_async(b->irq_engine->gt); 102 102 } 103 103 104 + static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) 105 + { 106 + spin_lock(&b->irq_lock); 107 + if (b->irq_armed) 108 + __intel_breadcrumbs_disarm_irq(b); 109 + spin_unlock(&b->irq_lock); 110 + } 111 + 104 112 static void add_signaling_context(struct intel_breadcrumbs *b, 105 113 struct intel_context *ce) 106 114 { 107 - intel_context_get(ce); 108 - list_add_tail(&ce->signal_link, &b->signalers); 115 + lockdep_assert_held(&ce->signal_lock); 116 + 117 + spin_lock(&b->signalers_lock); 118 + list_add_rcu(&ce->signal_link, &b->signalers); 119 + spin_unlock(&b->signalers_lock); 109 120 } 110 121 111 - static void remove_signaling_context(struct intel_breadcrumbs *b, 122 + static bool remove_signaling_context(struct intel_breadcrumbs *b, 112 123 struct intel_context *ce) 113 124 { 114 - list_del(&ce->signal_link); 115 - intel_context_put(ce); 125 + lockdep_assert_held(&ce->signal_lock); 126 + 127 + if (!list_empty(&ce->signals)) 128 + return false; 129 + 130 + spin_lock(&b->signalers_lock); 131 + list_del_rcu(&ce->signal_link); 132 + spin_unlock(&b->signalers_lock); 133 + 134 + return true; 116 135 } 117 136 118 137 static inline bool __request_completed(const struct i915_request *rq) ··· 194 175 195 176 static bool __signal_request(struct i915_request *rq) 196 177 { 178 + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); 179 + 197 180 if (!__dma_fence_signal(&rq->fence)) { 198 181 i915_request_put(rq); 199 182 return false; ··· 216 195 struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); 217 196 const ktime_t timestamp = ktime_get(); 218 197 struct llist_node *signal, *sn; 219 - struct intel_context *ce, *cn; 220 - struct list_head *pos, *next; 198 + struct intel_context *ce; 221 199 222 200 signal = NULL; 223 201 if (unlikely(!llist_empty(&b->signaled_requests))) 224 202 signal = llist_del_all(&b->signaled_requests); 225 - 226 - spin_lock(&b->irq_lock); 227 203 228 204 /* 229 205 * Keep the irq armed until the interrupt after all listeners are gone. ··· 247 229 * interrupt draw less ire from other users of the system and tools 248 230 * like powertop. 249 231 */ 250 - if (!signal && b->irq_armed && list_empty(&b->signalers)) 251 - __intel_breadcrumbs_disarm_irq(b); 232 + if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers)) 233 + intel_breadcrumbs_disarm_irq(b); 252 234 253 - list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { 254 - GEM_BUG_ON(list_empty(&ce->signals)); 235 + rcu_read_lock(); 236 + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { 237 + struct i915_request *rq; 255 238 256 - list_for_each_safe(pos, next, &ce->signals) { 257 - struct i915_request *rq = 258 - list_entry(pos, typeof(*rq), signal_link); 239 + list_for_each_entry_rcu(rq, &ce->signals, signal_link) { 240 + bool release; 259 241 260 - GEM_BUG_ON(!check_signal_order(ce, rq)); 261 242 if (!__request_completed(rq)) 243 + break; 244 + 245 + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, 246 + &rq->fence.flags)) 262 247 break; 263 248 264 249 /* ··· 269 248 * spinlock as the callback chain may end up adding 270 249 * more signalers to the same context or engine. 271 250 */ 272 - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 251 + spin_lock(&ce->signal_lock); 252 + list_del_rcu(&rq->signal_link); 253 + release = remove_signaling_context(b, ce); 254 + spin_unlock(&ce->signal_lock); 255 + 273 256 if (__signal_request(rq)) 274 257 /* We own signal_node now, xfer to local list */ 275 258 signal = slist_add(&rq->signal_node, signal); 276 - } 277 259 278 - /* 279 - * We process the list deletion in bulk, only using a list_add 280 - * (not list_move) above but keeping the status of 281 - * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. 282 - */ 283 - if (!list_is_first(pos, &ce->signals)) { 284 - /* Advance the list to the first incomplete request */ 285 - __list_del_many(&ce->signals, pos); 286 - if (&ce->signals == pos) { /* now empty */ 260 + if (release) { 287 261 add_retire(b, ce->timeline); 288 - remove_signaling_context(b, ce); 262 + intel_context_put(ce); 289 263 } 290 264 } 291 265 } 292 - 293 - spin_unlock(&b->irq_lock); 266 + rcu_read_unlock(); 294 267 295 268 llist_for_each_safe(signal, sn, signal) { 296 269 struct i915_request *rq = ··· 313 298 if (!b) 314 299 return NULL; 315 300 316 - spin_lock_init(&b->irq_lock); 301 + b->irq_engine = irq_engine; 302 + 303 + spin_lock_init(&b->signalers_lock); 317 304 INIT_LIST_HEAD(&b->signalers); 318 305 init_llist_head(&b->signaled_requests); 319 306 307 + spin_lock_init(&b->irq_lock); 320 308 init_irq_work(&b->irq_work, signal_irq_work); 321 - 322 - b->irq_engine = irq_engine; 323 309 324 310 return b; 325 311 } ··· 363 347 kfree(b); 364 348 } 365 349 366 - static void insert_breadcrumb(struct i915_request *rq, 367 - struct intel_breadcrumbs *b) 350 + static void insert_breadcrumb(struct i915_request *rq) 368 351 { 352 + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; 369 353 struct intel_context *ce = rq->context; 370 354 struct list_head *pos; 371 355 ··· 387 371 } 388 372 389 373 if (list_empty(&ce->signals)) { 374 + intel_context_get(ce); 390 375 add_signaling_context(b, ce); 391 376 pos = &ce->signals; 392 377 } else { ··· 413 396 break; 414 397 } 415 398 } 416 - list_add(&rq->signal_link, pos); 399 + list_add_rcu(&rq->signal_link, pos); 417 400 GEM_BUG_ON(!check_signal_order(ce, rq)); 401 + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); 418 402 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 419 403 420 404 /* ··· 428 410 429 411 bool i915_request_enable_breadcrumb(struct i915_request *rq) 430 412 { 431 - struct intel_breadcrumbs *b; 413 + struct intel_context *ce = rq->context; 432 414 433 415 /* Serialises with i915_request_retire() using rq->lock */ 434 416 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) ··· 443 425 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) 444 426 return true; 445 427 446 - /* 447 - * rq->engine is locked by rq->engine->active.lock. That however 448 - * is not known until after rq->engine has been dereferenced and 449 - * the lock acquired. Hence we acquire the lock and then validate 450 - * that rq->engine still matches the lock we hold for it. 451 - * 452 - * Here, we are using the breadcrumb lock as a proxy for the 453 - * rq->engine->active.lock, and we know that since the breadcrumb 454 - * will be serialised within i915_request_submit/i915_request_unsubmit, 455 - * the engine cannot change while active as long as we hold the 456 - * breadcrumb lock on that engine. 457 - * 458 - * From the dma_fence_enable_signaling() path, we are outside of the 459 - * request submit/unsubmit path, and so we must be more careful to 460 - * acquire the right lock. 461 - */ 462 - b = READ_ONCE(rq->engine)->breadcrumbs; 463 - spin_lock(&b->irq_lock); 464 - while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { 465 - spin_unlock(&b->irq_lock); 466 - b = READ_ONCE(rq->engine)->breadcrumbs; 467 - spin_lock(&b->irq_lock); 468 - } 469 - 470 - /* 471 - * Now that we are finally serialised with request submit/unsubmit, 472 - * [with b->irq_lock] and with i915_request_retire() [via checking 473 - * SIGNALED with rq->lock] confirm the request is indeed active. If 474 - * it is no longer active, the breadcrumb will be attached upon 475 - * i915_request_submit(). 476 - */ 428 + spin_lock(&ce->signal_lock); 477 429 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) 478 - insert_breadcrumb(rq, b); 479 - 480 - spin_unlock(&b->irq_lock); 430 + insert_breadcrumb(rq); 431 + spin_unlock(&ce->signal_lock); 481 432 482 433 return true; 483 434 } 484 435 485 436 void i915_request_cancel_breadcrumb(struct i915_request *rq) 486 437 { 487 - struct intel_breadcrumbs *b = rq->engine->breadcrumbs; 438 + struct intel_context *ce = rq->context; 439 + bool release; 488 440 489 - /* 490 - * We must wait for b->irq_lock so that we know the interrupt handler 491 - * has released its reference to the intel_context and has completed 492 - * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if 493 - * required). 494 - */ 495 - spin_lock(&b->irq_lock); 496 - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { 497 - struct intel_context *ce = rq->context; 441 + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) 442 + return; 498 443 499 - list_del(&rq->signal_link); 500 - if (list_empty(&ce->signals)) 501 - remove_signaling_context(b, ce); 444 + spin_lock(&ce->signal_lock); 445 + list_del_rcu(&rq->signal_link); 446 + release = remove_signaling_context(rq->engine->breadcrumbs, ce); 447 + spin_unlock(&ce->signal_lock); 448 + if (release) 449 + intel_context_put(ce); 502 450 503 - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 504 - i915_request_put(rq); 505 - } 506 - spin_unlock(&b->irq_lock); 451 + i915_request_put(rq); 507 452 } 508 453 509 454 static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) ··· 476 495 477 496 drm_printf(p, "Signals:\n"); 478 497 479 - spin_lock_irq(&b->irq_lock); 480 - list_for_each_entry(ce, &b->signalers, signal_link) { 481 - list_for_each_entry(rq, &ce->signals, signal_link) { 498 + rcu_read_lock(); 499 + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { 500 + list_for_each_entry_rcu(rq, &ce->signals, signal_link) 482 501 drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", 483 502 rq->fence.context, rq->fence.seqno, 484 503 i915_request_completed(rq) ? "!" : 485 504 i915_request_started(rq) ? "*" : 486 505 "", 487 506 jiffies_to_msecs(jiffies - rq->emitted_jiffies)); 488 - } 489 507 } 490 - spin_unlock_irq(&b->irq_lock); 508 + rcu_read_unlock(); 491 509 } 492 510 493 511 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+2 -4
drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
··· 29 29 * the overhead of waking that client is much preferred. 30 30 */ 31 31 struct intel_breadcrumbs { 32 - spinlock_t irq_lock; /* protects the lists used in hardirq context */ 33 - 34 32 /* Not all breadcrumbs are attached to physical HW */ 35 33 struct intel_engine_cs *irq_engine; 36 34 35 + spinlock_t signalers_lock; /* protects the list of signalers */ 37 36 struct list_head signalers; 38 37 struct llist_head signaled_requests; 39 38 39 + spinlock_t irq_lock; /* protects the interrupt from hardirq context */ 40 40 struct irq_work irq_work; /* for use from inside irq_lock */ 41 - 42 41 unsigned int irq_enabled; 43 - 44 42 bool irq_armed; 45 43 }; 46 44
+11 -4
drivers/gpu/drm/i915/gt/intel_context.c
··· 25 25 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 26 26 } 27 27 28 + static void rcu_context_free(struct rcu_head *rcu) 29 + { 30 + struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); 31 + 32 + kmem_cache_free(global.slab_ce, ce); 33 + } 34 + 28 35 void intel_context_free(struct intel_context *ce) 29 36 { 30 - kmem_cache_free(global.slab_ce, ce); 37 + call_rcu(&ce->rcu, rcu_context_free); 31 38 } 32 39 33 40 struct intel_context * ··· 363 356 } 364 357 365 358 void 366 - intel_context_init(struct intel_context *ce, 367 - struct intel_engine_cs *engine) 359 + intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) 368 360 { 369 361 GEM_BUG_ON(!engine->cops); 370 362 GEM_BUG_ON(!engine->gt->vm); ··· 379 373 380 374 ce->vm = i915_vm_get(engine->gt->vm); 381 375 382 - INIT_LIST_HEAD(&ce->signal_link); 376 + /* NB ce->signal_link/lock is used under RCU */ 377 + spin_lock_init(&ce->signal_lock); 383 378 INIT_LIST_HEAD(&ce->signals); 384 379 385 380 mutex_init(&ce->pin_mutex);
+20 -3
drivers/gpu/drm/i915/gt/intel_context_types.h
··· 25 25 struct i915_gem_context; 26 26 struct i915_gem_ww_ctx; 27 27 struct i915_vma; 28 + struct intel_breadcrumbs; 28 29 struct intel_context; 29 30 struct intel_ring; 30 31 ··· 45 44 }; 46 45 47 46 struct intel_context { 48 - struct kref ref; 47 + /* 48 + * Note: Some fields may be accessed under RCU. 49 + * 50 + * Unless otherwise noted a field can safely be assumed to be protected 51 + * by strong reference counting. 52 + */ 53 + union { 54 + struct kref ref; /* no kref_get_unless_zero()! */ 55 + struct rcu_head rcu; 56 + }; 49 57 50 58 struct intel_engine_cs *engine; 51 59 struct intel_engine_cs *inflight; ··· 64 54 struct i915_address_space *vm; 65 55 struct i915_gem_context __rcu *gem_context; 66 56 67 - struct list_head signal_link; 68 - struct list_head signals; 57 + /* 58 + * @signal_lock protects the list of requests that need signaling, 59 + * @signals. While there are any requests that need signaling, 60 + * we add the context to the breadcrumbs worker, and remove it 61 + * upon completion/cancellation of the last request. 62 + */ 63 + struct list_head signal_link; /* Accessed under RCU */ 64 + struct list_head signals; /* Guarded by signal_lock */ 65 + spinlock_t signal_lock; /* protects signals, the list of requests */ 69 66 70 67 struct i915_vma *state; 71 68 struct intel_ring *ring;
+6 -1
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 2788 2788 static bool execlists_hold(struct intel_engine_cs *engine, 2789 2789 struct i915_request *rq) 2790 2790 { 2791 + if (i915_request_on_hold(rq)) 2792 + return false; 2793 + 2791 2794 spin_lock_irq(&engine->active.lock); 2792 2795 2793 2796 if (i915_request_completed(rq)) { /* too late! */ ··· 3172 3169 spin_unlock_irqrestore(&engine->active.lock, flags); 3173 3170 3174 3171 /* Recheck after serialising with direct-submission */ 3175 - if (unlikely(timeout && preempt_timeout(engine))) 3172 + if (unlikely(timeout && preempt_timeout(engine))) { 3173 + cancel_timer(&engine->execlists.preempt); 3176 3174 execlists_reset(engine, "preemption time out"); 3175 + } 3177 3176 } 3178 3177 } 3179 3178
+16 -5
drivers/gpu/drm/i915/gt/intel_mocs.c
··· 59 59 #define _L3_CACHEABILITY(value) ((value) << 4) 60 60 61 61 /* Helper defines */ 62 - #define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ 63 - #define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ 62 + #define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ 64 63 65 64 /* (e)LLC caching options */ 66 65 /* ··· 130 131 GEN9_MOCS_ENTRIES, 131 132 MOCS_ENTRY(I915_MOCS_CACHED, 132 133 LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), 133 - L3_3_WB) 134 + L3_3_WB), 135 + 136 + /* 137 + * mocs:63 138 + * - used by the L3 for all of its evictions. 139 + * Thus it is expected to allow LLC cacheability to enable coherent 140 + * flows to be maintained. 141 + * - used to force L3 uncachable cycles. 142 + * Thus it is expected to make the surface L3 uncacheable. 143 + */ 144 + MOCS_ENTRY(63, 145 + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 146 + L3_1_UC) 134 147 }; 135 148 136 149 /* NOTE: the LE_TGT_CACHE is not used on Broxton */ ··· 327 316 if (INTEL_GEN(i915) >= 12) { 328 317 table->size = ARRAY_SIZE(tgl_mocs_table); 329 318 table->table = tgl_mocs_table; 330 - table->n_entries = GEN11_NUM_MOCS_ENTRIES; 319 + table->n_entries = GEN9_NUM_MOCS_ENTRIES; 331 320 } else if (IS_GEN(i915, 11)) { 332 321 table->size = ARRAY_SIZE(icl_mocs_table); 333 322 table->table = icl_mocs_table; 334 - table->n_entries = GEN11_NUM_MOCS_ENTRIES; 323 + table->n_entries = GEN9_NUM_MOCS_ENTRIES; 335 324 } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { 336 325 table->size = ARRAY_SIZE(skl_mocs_table); 337 326 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+4
drivers/gpu/drm/i915/gt/intel_rps.c
··· 883 883 adj = -2; 884 884 rps->last_adj = adj; 885 885 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 886 + if (rps->cur_freq < rps->efficient_freq) { 887 + rps->cur_freq = rps->efficient_freq; 888 + rps->last_adj = 0; 889 + } 886 890 887 891 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 888 892 }
+6 -3
drivers/gpu/drm/i915/gt/shmem_utils.c
··· 73 73 mapping_set_unevictable(file->f_mapping); 74 74 return vaddr; 75 75 err_page: 76 - while (--i >= 0) 76 + while (i--) 77 77 put_page(pages[i]); 78 78 kvfree(pages); 79 79 return NULL; ··· 103 103 return PTR_ERR(page); 104 104 105 105 vaddr = kmap(page); 106 - if (write) 106 + if (write) { 107 107 memcpy(vaddr + offset_in_page(off), ptr, this); 108 - else 108 + set_page_dirty(page); 109 + } else { 109 110 memcpy(ptr, vaddr + offset_in_page(off), this); 111 + } 112 + mark_page_accessed(page); 110 113 kunmap(page); 111 114 put_page(page); 112 115
+2 -4
drivers/gpu/drm/i915/i915_request.h
··· 177 177 struct intel_ring *ring; 178 178 struct intel_timeline __rcu *timeline; 179 179 180 - union { 181 - struct list_head signal_link; 182 - struct llist_node signal_node; 183 - }; 180 + struct list_head signal_link; 181 + struct llist_node signal_node; 184 182 185 183 /* 186 184 * The rcu epoch of when this request was allocated. Used to judiciously
+2 -2
drivers/gpu/drm/i915/selftests/i915_gem.c
··· 211 211 return PTR_ERR(obj); 212 212 213 213 obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); 214 - if (IS_ERR(obj)) { 215 - err = PTR_ERR(obj); 214 + if (IS_ERR(obj2)) { 215 + err = PTR_ERR(obj2); 216 216 goto put1; 217 217 } 218 218
+11
drivers/gpu/drm/mxsfb/mxsfb_kms.c
··· 22 22 #include <drm/drm_fb_cma_helper.h> 23 23 #include <drm/drm_fourcc.h> 24 24 #include <drm/drm_gem_cma_helper.h> 25 + #include <drm/drm_gem_framebuffer_helper.h> 25 26 #include <drm/drm_plane.h> 26 27 #include <drm/drm_plane_helper.h> 27 28 #include <drm/drm_vblank.h> ··· 485 484 writel(ctrl, mxsfb->base + LCDC_AS_CTRL); 486 485 } 487 486 487 + static bool mxsfb_format_mod_supported(struct drm_plane *plane, 488 + uint32_t format, 489 + uint64_t modifier) 490 + { 491 + return modifier == DRM_FORMAT_MOD_LINEAR; 492 + } 493 + 488 494 static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { 495 + .prepare_fb = drm_gem_fb_prepare_fb, 489 496 .atomic_check = mxsfb_plane_atomic_check, 490 497 .atomic_update = mxsfb_plane_primary_atomic_update, 491 498 }; 492 499 493 500 static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { 501 + .prepare_fb = drm_gem_fb_prepare_fb, 494 502 .atomic_check = mxsfb_plane_atomic_check, 495 503 .atomic_update = mxsfb_plane_overlay_atomic_update, 496 504 }; 497 505 498 506 static const struct drm_plane_funcs mxsfb_plane_funcs = { 507 + .format_mod_supported = mxsfb_format_mod_supported, 499 508 .update_plane = drm_atomic_helper_update_plane, 500 509 .disable_plane = drm_atomic_helper_disable_plane, 501 510 .destroy = drm_plane_cleanup,
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1214 1214 } 1215 1215 1216 1216 reg->bus.offset = handle; 1217 - ret = 0; 1218 1217 } 1218 + ret = 0; 1219 1219 break; 1220 1220 default: 1221 1221 ret = -EINVAL;
+4 -6
drivers/gpu/drm/omapdrm/dss/sdi.c
··· 195 195 sdi->pixelclock = adjusted_mode->clock * 1000; 196 196 } 197 197 198 - static void sdi_bridge_enable(struct drm_bridge *bridge, 199 - struct drm_bridge_state *bridge_state) 198 + static void sdi_bridge_enable(struct drm_bridge *bridge) 200 199 { 201 200 struct sdi_device *sdi = drm_bridge_to_sdi(bridge); 202 201 struct dispc_clock_info dispc_cinfo; ··· 258 259 regulator_disable(sdi->vdds_sdi_reg); 259 260 } 260 261 261 - static void sdi_bridge_disable(struct drm_bridge *bridge, 262 - struct drm_bridge_state *bridge_state) 262 + static void sdi_bridge_disable(struct drm_bridge *bridge) 263 263 { 264 264 struct sdi_device *sdi = drm_bridge_to_sdi(bridge); 265 265 ··· 276 278 .mode_valid = sdi_bridge_mode_valid, 277 279 .mode_fixup = sdi_bridge_mode_fixup, 278 280 .mode_set = sdi_bridge_mode_set, 279 - .atomic_enable = sdi_bridge_enable, 280 - .atomic_disable = sdi_bridge_disable, 281 + .enable = sdi_bridge_enable, 282 + .disable = sdi_bridge_disable, 281 283 }; 282 284 283 285 static void sdi_bridge_init(struct sdi_device *sdi)
+1 -1
drivers/gpu/drm/panel/panel-sony-acx565akm.c
··· 629 629 lcd->spi = spi; 630 630 mutex_init(&lcd->mutex); 631 631 632 - lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); 632 + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); 633 633 if (IS_ERR(lcd->reset_gpio)) { 634 634 dev_err(&spi->dev, "failed to get reset GPIO\n"); 635 635 return PTR_ERR(lcd->reset_gpio);
+1 -1
drivers/gpu/drm/rockchip/rockchip_lvds.c
··· 544 544 struct device_node *port, *endpoint; 545 545 int ret = 0, child_count = 0; 546 546 const char *name; 547 - u32 endpoint_id; 547 + u32 endpoint_id = 0; 548 548 549 549 lvds->drm_dev = drm_dev; 550 550 port = of_graph_get_port_by_id(dev->of_node, 1);
+1 -1
drivers/gpu/drm/tegra/drm.c
··· 90 90 if (!fpriv) 91 91 return -ENOMEM; 92 92 93 - idr_init(&fpriv->contexts); 93 + idr_init_base(&fpriv->contexts, 1); 94 94 mutex_init(&fpriv->lock); 95 95 filp->driver_priv = fpriv; 96 96
-1
drivers/gpu/drm/tegra/output.c
··· 129 129 130 130 if (!output->ddc) { 131 131 err = -EPROBE_DEFER; 132 - of_node_put(ddc); 133 132 return err; 134 133 } 135 134 }
+35 -41
drivers/gpu/drm/tegra/sor.c
··· 397 397 struct tegra_sor_ops { 398 398 const char *name; 399 399 int (*probe)(struct tegra_sor *sor); 400 - int (*remove)(struct tegra_sor *sor); 401 400 void (*audio_enable)(struct tegra_sor *sor); 402 401 void (*audio_disable)(struct tegra_sor *sor); 403 402 }; ··· 2941 2942 .atomic_check = tegra_sor_encoder_atomic_check, 2942 2943 }; 2943 2944 2945 + static void tegra_sor_disable_regulator(void *data) 2946 + { 2947 + struct regulator *reg = data; 2948 + 2949 + regulator_disable(reg); 2950 + } 2951 + 2952 + static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg) 2953 + { 2954 + int err; 2955 + 2956 + err = regulator_enable(reg); 2957 + if (err) 2958 + return err; 2959 + 2960 + return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg); 2961 + } 2962 + 2944 2963 static int tegra_sor_hdmi_probe(struct tegra_sor *sor) 2945 2964 { 2946 2965 int err; ··· 2970 2953 return PTR_ERR(sor->avdd_io_supply); 2971 2954 } 2972 2955 2973 - err = regulator_enable(sor->avdd_io_supply); 2956 + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); 2974 2957 if (err < 0) { 2975 2958 dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", 2976 2959 err); ··· 2984 2967 return PTR_ERR(sor->vdd_pll_supply); 2985 2968 } 2986 2969 2987 - err = regulator_enable(sor->vdd_pll_supply); 2970 + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); 2988 2971 if (err < 0) { 2989 2972 dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", 2990 2973 err); ··· 2998 2981 return PTR_ERR(sor->hdmi_supply); 2999 2982 } 3000 2983 3001 - err = regulator_enable(sor->hdmi_supply); 2984 + err = tegra_sor_enable_regulator(sor, sor->hdmi_supply); 3002 2985 if (err < 0) { 3003 2986 dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); 3004 2987 return err; ··· 3009 2992 return 0; 3010 2993 } 3011 2994 3012 - static int tegra_sor_hdmi_remove(struct tegra_sor *sor) 3013 - { 3014 - regulator_disable(sor->hdmi_supply); 3015 - regulator_disable(sor->vdd_pll_supply); 3016 - regulator_disable(sor->avdd_io_supply); 3017 - 3018 - return 0; 3019 - } 3020 - 3021 2995 static const struct tegra_sor_ops tegra_sor_hdmi_ops = { 3022 2996 .name = "HDMI", 3023 2997 .probe = tegra_sor_hdmi_probe, 3024 - .remove = tegra_sor_hdmi_remove, 3025 2998 .audio_enable = tegra_sor_hdmi_audio_enable, 3026 2999 .audio_disable = tegra_sor_hdmi_audio_disable, 3027 3000 }; ··· 3024 3017 if (IS_ERR(sor->avdd_io_supply)) 3025 3018 return PTR_ERR(sor->avdd_io_supply); 3026 3019 3027 - err = regulator_enable(sor->avdd_io_supply); 3020 + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); 3028 3021 if (err < 0) 3029 3022 return err; 3030 3023 ··· 3032 3025 if (IS_ERR(sor->vdd_pll_supply)) 3033 3026 return PTR_ERR(sor->vdd_pll_supply); 3034 3027 3035 - err = regulator_enable(sor->vdd_pll_supply); 3028 + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); 3036 3029 if (err < 0) 3037 3030 return err; 3038 - 3039 - return 0; 3040 - } 3041 - 3042 - static int tegra_sor_dp_remove(struct tegra_sor *sor) 3043 - { 3044 - regulator_disable(sor->vdd_pll_supply); 3045 - regulator_disable(sor->avdd_io_supply); 3046 3031 3047 3032 return 0; 3048 3033 } ··· 3042 3043 static const struct tegra_sor_ops tegra_sor_dp_ops = { 3043 3044 .name = "DP", 3044 3045 .probe = tegra_sor_dp_probe, 3045 - .remove = tegra_sor_dp_remove, 3046 3046 }; 3047 3047 3048 3048 static int tegra_sor_init(struct host1x_client *client) ··· 3143 3145 if (err < 0) { 3144 3146 dev_err(sor->dev, "failed to deassert SOR reset: %d\n", 3145 3147 err); 3148 + clk_disable_unprepare(sor->clk); 3146 3149 return err; 3147 3150 } 3148 3151 ··· 3151 3152 } 3152 3153 3153 3154 err = clk_prepare_enable(sor->clk_safe); 3154 - if (err < 0) 3155 + if (err < 0) { 3156 + clk_disable_unprepare(sor->clk); 3155 3157 return err; 3158 + } 3156 3159 3157 3160 err = clk_prepare_enable(sor->clk_dp); 3158 - if (err < 0) 3161 + if (err < 0) { 3162 + clk_disable_unprepare(sor->clk_safe); 3163 + clk_disable_unprepare(sor->clk); 3159 3164 return err; 3165 + } 3160 3166 3161 3167 return 0; 3162 3168 } ··· 3768 3764 return err; 3769 3765 3770 3766 err = tegra_output_probe(&sor->output); 3771 - if (err < 0) { 3772 - dev_err(&pdev->dev, "failed to probe output: %d\n", err); 3773 - return err; 3774 - } 3767 + if (err < 0) 3768 + return dev_err_probe(&pdev->dev, err, 3769 + "failed to probe output\n"); 3775 3770 3776 3771 if (sor->ops && sor->ops->probe) { 3777 3772 err = sor->ops->probe(sor); 3778 3773 if (err < 0) { 3779 3774 dev_err(&pdev->dev, "failed to probe %s: %d\n", 3780 3775 sor->ops->name, err); 3781 - goto output; 3776 + goto remove; 3782 3777 } 3783 3778 } 3784 3779 ··· 3958 3955 rpm_disable: 3959 3956 pm_runtime_disable(&pdev->dev); 3960 3957 remove: 3961 - if (sor->ops && sor->ops->remove) 3962 - sor->ops->remove(sor); 3963 - output: 3964 3958 tegra_output_remove(&sor->output); 3965 3959 return err; 3966 3960 } ··· 3975 3975 } 3976 3976 3977 3977 pm_runtime_disable(&pdev->dev); 3978 - 3979 - if (sor->ops && sor->ops->remove) { 3980 - err = sor->ops->remove(sor); 3981 - if (err < 0) 3982 - dev_err(&pdev->dev, "failed to remove SOR: %d\n", err); 3983 - } 3984 3978 3985 3979 tegra_output_remove(&sor->output); 3986 3980
+1
drivers/i2c/busses/Kconfig
··· 734 734 config I2C_MLXBF 735 735 tristate "Mellanox BlueField I2C controller" 736 736 depends on MELLANOX_PLATFORM && ARM64 737 + select I2C_SLAVE 737 738 help 738 739 Enabling this option will add I2C SMBus support for Mellanox BlueField 739 740 system.
+36 -8
drivers/i2c/busses/i2c-imx.c
··· 412 412 dma->chan_using = NULL; 413 413 } 414 414 415 + static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) 416 + { 417 + unsigned int temp; 418 + 419 + /* 420 + * i2sr_clr_opcode is the value to clear all interrupts. Here we want to 421 + * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits> 422 + * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. 423 + */ 424 + temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; 425 + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); 426 + } 427 + 415 428 static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic) 416 429 { 417 430 unsigned long orig_jiffies = jiffies; ··· 437 424 438 425 /* check for arbitration lost */ 439 426 if (temp & I2SR_IAL) { 440 - temp &= ~I2SR_IAL; 441 - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); 427 + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); 442 428 return -EAGAIN; 443 429 } 444 430 ··· 481 469 */ 482 470 readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100); 483 471 i2c_imx->i2csr = regval; 484 - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); 472 + i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL); 485 473 } else { 486 474 wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); 487 475 } ··· 490 478 dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); 491 479 return -ETIMEDOUT; 492 480 } 481 + 482 + /* check for arbitration lost */ 483 + if (i2c_imx->i2csr & I2SR_IAL) { 484 + dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); 485 + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); 486 + 487 + i2c_imx->i2csr = 0; 488 + return -EAGAIN; 489 + } 490 + 493 491 dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); 494 492 i2c_imx->i2csr = 0; 495 493 return 0; ··· 615 593 /* Stop I2C transaction */ 616 594 dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); 617 595 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 596 + if (!(temp & I2CR_MSTA)) 597 + i2c_imx->stopped = 1; 618 598 temp &= ~(I2CR_MSTA | I2CR_MTX); 619 599 if (i2c_imx->dma) 620 600 temp &= ~I2CR_DMAEN; ··· 647 623 if (temp & I2SR_IIF) { 648 624 /* save status register */ 649 625 i2c_imx->i2csr = temp; 650 - temp &= ~I2SR_IIF; 651 - temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF); 652 - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); 626 + i2c_imx_clear_irq(i2c_imx, I2SR_IIF); 653 627 wake_up(&i2c_imx->queue); 654 628 return IRQ_HANDLED; 655 629 } ··· 780 758 */ 781 759 dev_dbg(dev, "<%s> clear MSTA\n", __func__); 782 760 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 761 + if (!(temp & I2CR_MSTA)) 762 + i2c_imx->stopped = 1; 783 763 temp &= ~(I2CR_MSTA | I2CR_MTX); 784 764 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); 785 - i2c_imx_bus_busy(i2c_imx, 0, false); 765 + if (!i2c_imx->stopped) 766 + i2c_imx_bus_busy(i2c_imx, 0, false); 786 767 } else { 787 768 /* 788 769 * For i2c master receiver repeat restart operation like: ··· 910 885 dev_dbg(&i2c_imx->adapter.dev, 911 886 "<%s> clear MSTA\n", __func__); 912 887 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 888 + if (!(temp & I2CR_MSTA)) 889 + i2c_imx->stopped = 1; 913 890 temp &= ~(I2CR_MSTA | I2CR_MTX); 914 891 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); 915 - i2c_imx_bus_busy(i2c_imx, 0, atomic); 892 + if (!i2c_imx->stopped) 893 + i2c_imx_bus_busy(i2c_imx, 0, atomic); 916 894 } else { 917 895 /* 918 896 * For i2c master receiver repeat restart operation like:
+6 -6
drivers/i2c/busses/i2c-mlxbf.c
··· 1258 1258 return -EFAULT; 1259 1259 1260 1260 gpio_res->io = devm_ioremap(dev, params->start, size); 1261 - if (IS_ERR(gpio_res->io)) { 1261 + if (!gpio_res->io) { 1262 1262 devm_release_mem_region(dev, params->start, size); 1263 - return PTR_ERR(gpio_res->io); 1263 + return -ENOMEM; 1264 1264 } 1265 1265 1266 1266 return 0; ··· 1323 1323 return -EFAULT; 1324 1324 1325 1325 corepll_res->io = devm_ioremap(dev, params->start, size); 1326 - if (IS_ERR(corepll_res->io)) { 1326 + if (!corepll_res->io) { 1327 1327 devm_release_mem_region(dev, params->start, size); 1328 - return PTR_ERR(corepll_res->io); 1328 + return -ENOMEM; 1329 1329 } 1330 1330 1331 1331 return 0; ··· 1717 1717 return -EFAULT; 1718 1718 1719 1719 coalesce_res->io = ioremap(params->start, size); 1720 - if (IS_ERR(coalesce_res->io)) { 1720 + if (!coalesce_res->io) { 1721 1721 release_mem_region(params->start, size); 1722 - return PTR_ERR(coalesce_res->io); 1722 + return -ENOMEM; 1723 1723 } 1724 1724 1725 1725 priv->coalesce = coalesce_res;
+2 -2
drivers/i2c/busses/i2c-qcom-cci.c
··· 194 194 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) { 195 195 if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR || 196 196 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR) 197 - cci->master[0].status = -ENXIO; 197 + cci->master[1].status = -ENXIO; 198 198 else 199 - cci->master[0].status = -EIO; 199 + cci->master[1].status = -EIO; 200 200 201 201 writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ); 202 202 ret = IRQ_HANDLED;
+2 -1
drivers/i2c/busses/i2c-qup.c
··· 801 801 if (ret || qup->bus_err || qup->qup_err) { 802 802 reinit_completion(&qup->xfer); 803 803 804 - if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { 804 + ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 805 + if (ret) { 805 806 dev_err(qup->dev, "change to run state timed out"); 806 807 goto desc_err; 807 808 }
+14 -14
drivers/idle/intel_idle.c
··· 1140 1140 return false; 1141 1141 } 1142 1142 1143 + static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) 1144 + { 1145 + unsigned long eax = flg2MWAIT(state->flags); 1146 + 1147 + if (boot_cpu_has(X86_FEATURE_ARAT)) 1148 + return false; 1149 + 1150 + /* 1151 + * Switch over to one-shot tick broadcast if the target C-state 1152 + * is deeper than C1. 1153 + */ 1154 + return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); 1155 + } 1156 + 1143 1157 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 1144 1158 #include <acpi/processor.h> 1145 1159 ··· 1222 1208 acpi_state_table.count = 0; 1223 1209 pr_debug("ACPI _CST not found or not usable\n"); 1224 1210 return false; 1225 - } 1226 - 1227 - static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) 1228 - { 1229 - unsigned long eax = flg2MWAIT(state->flags); 1230 - 1231 - if (boot_cpu_has(X86_FEATURE_ARAT)) 1232 - return false; 1233 - 1234 - /* 1235 - * Switch over to one-shot tick broadcast if the target C-state 1236 - * is deeper than C1. 1237 - */ 1238 - return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); 1239 1211 } 1240 1212 1241 1213 static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
-3
drivers/infiniband/core/cache.c
··· 1269 1269 unsigned long flags; 1270 1270 1271 1271 rdma_for_each_port(device, port_num) { 1272 - if (!rdma_ib_or_roce(device, port_num)) 1273 - continue; 1274 - 1275 1272 table = rdma_gid_table(device, port_num); 1276 1273 read_lock_irqsave(&table->rwlock, flags); 1277 1274 for (i = 0; i < table->sz; i++) {
+2
drivers/infiniband/core/cm.c
··· 1522 1522 id.local_id); 1523 1523 if (IS_ERR(cm_id_priv->timewait_info)) { 1524 1524 ret = PTR_ERR(cm_id_priv->timewait_info); 1525 + cm_id_priv->timewait_info = NULL; 1525 1526 goto out; 1526 1527 } 1527 1528 ··· 2115 2114 id.local_id); 2116 2115 if (IS_ERR(cm_id_priv->timewait_info)) { 2117 2116 ret = PTR_ERR(cm_id_priv->timewait_info); 2117 + cm_id_priv->timewait_info = NULL; 2118 2118 goto destroy; 2119 2119 } 2120 2120 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
+2 -2
drivers/infiniband/hw/efa/efa_verbs.c
··· 940 940 1); 941 941 EFA_SET(&params.modify_mask, 942 942 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1); 943 - params.cur_qp_state = qp_attr->cur_qp_state; 944 - params.qp_state = qp_attr->qp_state; 943 + params.cur_qp_state = cur_state; 944 + params.qp_state = new_state; 945 945 } 946 946 947 947 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
+9
drivers/infiniband/hw/qedr/verbs.c
··· 1936 1936 } 1937 1937 1938 1938 if (rdma_protocol_iwarp(&dev->ibdev, 1)) { 1939 + qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset; 1940 + 1941 + /* calculate the db_rec_db2 data since it is constant so no 1942 + * need to reflect from user 1943 + */ 1944 + qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid); 1945 + qp->urq.db_rec_db2_data.data.value = 1946 + cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD); 1947 + 1939 1948 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr, 1940 1949 &qp->urq.db_rec_db2_data, 1941 1950 DB_REC_WIDTH_32B,
+2
drivers/input/joystick/xpad.c
··· 241 241 { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, 242 242 { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, 243 243 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, 244 + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, 244 245 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 245 246 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, 246 247 { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ··· 419 418 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ 420 419 XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ 421 420 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ 421 + XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ 422 422 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 423 423 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 424 424 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
+1
drivers/input/misc/soc_button_array.c
··· 9 9 #include <linux/module.h> 10 10 #include <linux/input.h> 11 11 #include <linux/init.h> 12 + #include <linux/irq.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/acpi.h> 14 15 #include <linux/dmi.h>
+4
drivers/input/serio/i8042-x86ia64io.h
··· 219 219 DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), 220 220 DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), 221 221 }, 222 + .matches = { 223 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), 224 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), 225 + }, 222 226 }, 223 227 { } 224 228 };
+2 -1
drivers/input/serio/i8042.c
··· 1471 1471 if (error) 1472 1472 goto err_free_ports; 1473 1473 1474 - if (aux_enable()) 1474 + error = aux_enable(); 1475 + if (error) 1475 1476 goto err_free_irq; 1476 1477 1477 1478 i8042_aux_irq_registered = true;
+2 -2
drivers/input/touchscreen/atmel_mxt_ts.c
··· 2183 2183 msleep(MXT_FW_RESET_TIME); 2184 2184 } 2185 2185 2186 - error = mxt_acquire_irq(data); 2186 + error = mxt_check_retrigen(data); 2187 2187 if (error) 2188 2188 return error; 2189 2189 2190 - error = mxt_check_retrigen(data); 2190 + error = mxt_acquire_irq(data); 2191 2191 if (error) 2192 2192 return error; 2193 2193
+1 -1
drivers/iommu/amd/amd_iommu_types.h
··· 257 257 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 258 258 #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) 259 259 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 260 - #define DTE_IRQ_TABLE_LEN (8ULL << 1) 260 + #define DTE_IRQ_TABLE_LEN (9ULL << 1) 261 261 #define DTE_IRQ_REMAP_ENABLE 1ULL 262 262 263 263 #define PAGE_MODE_NONE 0x00
-4
drivers/md/dm-cache-target.c
··· 712 712 return cache->sectors_per_block_shift >= 0; 713 713 } 714 714 715 - /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ 716 - #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 717 - __always_inline 718 - #endif 719 715 static dm_block_t block_div(dm_block_t b, uint32_t n) 720 716 { 721 717 do_div(b, n);
+2 -2
drivers/md/dm-integrity.c
··· 3462 3462 int r; 3463 3463 3464 3464 if (a->alg_string) { 3465 - *hash = crypto_alloc_shash(a->alg_string, 0, 0); 3465 + *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 3466 3466 if (IS_ERR(*hash)) { 3467 3467 *error = error_alg; 3468 3468 r = PTR_ERR(*hash); ··· 3519 3519 struct journal_completion comp; 3520 3520 3521 3521 comp.ic = ic; 3522 - ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); 3522 + ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 3523 3523 if (IS_ERR(ic->journal_crypt)) { 3524 3524 *error = "Invalid journal cipher"; 3525 3525 r = PTR_ERR(ic->journal_crypt);
-11
drivers/md/dm-table.c
··· 18 18 #include <linux/mutex.h> 19 19 #include <linux/delay.h> 20 20 #include <linux/atomic.h> 21 - #include <linux/lcm.h> 22 21 #include <linux/blk-mq.h> 23 22 #include <linux/mount.h> 24 23 #include <linux/dax.h> ··· 1246 1247 1247 1248 void dm_table_event(struct dm_table *t) 1248 1249 { 1249 - /* 1250 - * You can no longer call dm_table_event() from interrupt 1251 - * context, use a bottom half instead. 1252 - */ 1253 - BUG_ON(in_interrupt()); 1254 - 1255 1250 mutex_lock(&_event_lock); 1256 1251 if (t->event_fn) 1257 1252 t->event_fn(t->event_context); ··· 1448 1455 zone_sectors = ti_limits.chunk_sectors; 1449 1456 } 1450 1457 1451 - /* Stack chunk_sectors if target-specific splitting is required */ 1452 - if (ti->max_io_len) 1453 - ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len, 1454 - ti_limits.chunk_sectors); 1455 1458 /* Set I/O hints portion of queue limits */ 1456 1459 if (ti->type->io_hints) 1457 1460 ti->type->io_hints(ti, &ti_limits);
+4 -2
drivers/md/dm-writecache.c
··· 319 319 #else 320 320 static int persistent_memory_claim(struct dm_writecache *wc) 321 321 { 322 - BUG(); 322 + return -EOPNOTSUPP; 323 323 } 324 324 #endif 325 325 ··· 2041 2041 struct wc_memory_superblock s; 2042 2042 2043 2043 static struct dm_arg _args[] = { 2044 - {0, 10, "Invalid number of feature args"}, 2044 + {0, 16, "Invalid number of feature args"}, 2045 2045 }; 2046 2046 2047 2047 as.argc = argc; ··· 2478 2478 if (wc->autocommit_blocks_set) 2479 2479 extra_args += 2; 2480 2480 if (wc->autocommit_time_set) 2481 + extra_args += 2; 2482 + if (wc->max_age != MAX_AGE_UNSPECIFIED) 2481 2483 extra_args += 2; 2482 2484 if (wc->cleaner) 2483 2485 extra_args++;
+15 -14
drivers/md/dm.c
··· 476 476 return -EAGAIN; 477 477 478 478 map = dm_get_live_table(md, &srcu_idx); 479 - if (!map) 480 - return -EIO; 479 + if (!map) { 480 + ret = -EIO; 481 + goto out; 482 + } 481 483 482 484 do { 483 485 struct dm_target *tgt; ··· 509 507 510 508 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 511 509 struct block_device **bdev) 512 - __acquires(md->io_barrier) 513 510 { 514 511 struct dm_target *tgt; 515 512 struct dm_table *map; ··· 542 541 } 543 542 544 543 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 545 - __releases(md->io_barrier) 546 544 { 547 545 dm_put_live_table(md, srcu_idx); 548 546 } ··· 1037 1037 sector_t max_len; 1038 1038 1039 1039 /* 1040 - * Does the target need to split even further? 1041 - * - q->limits.chunk_sectors reflects ti->max_io_len so 1042 - * blk_max_size_offset() provides required splitting. 1043 - * - blk_max_size_offset() also respects q->limits.max_sectors 1040 + * Does the target need to split IO even further? 1041 + * - varied (per target) IO splitting is a tenet of DM; this 1042 + * explains why stacked chunk_sectors based splitting via 1043 + * blk_max_size_offset() isn't possible here. So pass in 1044 + * ti->max_io_len to override stacked chunk_sectors. 1044 1045 */ 1045 - max_len = blk_max_size_offset(ti->table->md->queue, 1046 - target_offset); 1047 - if (len > max_len) 1048 - len = max_len; 1046 + if (ti->max_io_len) { 1047 + max_len = blk_max_size_offset(ti->table->md->queue, 1048 + target_offset, ti->max_io_len); 1049 + if (len > max_len) 1050 + len = max_len; 1051 + } 1049 1052 1050 1053 return len; 1051 1054 } ··· 1199 1196 * ->zero_page_range() is mandatory dax operation. If we are 1200 1197 * here, something is wrong. 1201 1198 */ 1202 - dm_put_live_table(md, srcu_idx); 1203 1199 goto out; 1204 1200 } 1205 1201 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1206 - 1207 1202 out: 1208 1203 dm_put_live_table(md, srcu_idx); 1209 1204
+34 -18
drivers/media/cec/usb/pulse8/pulse8-cec.c
··· 88 88 MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */ 89 89 MSGCODE_GET_DEVICE_TYPE, 90 90 MSGCODE_SET_DEVICE_TYPE, 91 - MSGCODE_GET_HDMI_VERSION, 91 + MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */ 92 92 MSGCODE_SET_HDMI_VERSION, 93 93 MSGCODE_GET_OSD_NAME, 94 94 MSGCODE_SET_OSD_NAME, 95 95 MSGCODE_WRITE_EEPROM, 96 96 MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */ 97 97 MSGCODE_SET_ACTIVE_SOURCE, 98 + MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */ 99 + MSGCODE_SET_AUTO_POWER_ON, 98 100 99 101 MSGCODE_FRAME_EOM = 0x80, 100 102 MSGCODE_FRAME_ACK = 0x40, ··· 145 143 "WRITE_EEPROM", 146 144 "GET_ADAPTER_TYPE", 147 145 "SET_ACTIVE_SOURCE", 146 + "GET_AUTO_POWER_ON", 147 + "SET_AUTO_POWER_ON", 148 148 }; 149 149 150 150 static const char *pulse8_msgname(u8 cmd) ··· 583 579 if (err) 584 580 goto unlock; 585 581 586 - cmd[0] = MSGCODE_SET_HDMI_VERSION; 587 - cmd[1] = adap->log_addrs.cec_version; 588 - err = pulse8_send_and_wait(pulse8, cmd, 2, 589 - MSGCODE_COMMAND_ACCEPTED, 0); 590 - if (err) 591 - goto unlock; 582 + if (pulse8->vers < 10) { 583 + cmd[0] = MSGCODE_SET_HDMI_VERSION; 584 + cmd[1] = adap->log_addrs.cec_version; 585 + err = pulse8_send_and_wait(pulse8, cmd, 2, 586 + MSGCODE_COMMAND_ACCEPTED, 0); 587 + if (err) 588 + goto unlock; 589 + } 592 590 593 591 if (adap->log_addrs.osd_name[0]) { 594 592 size_t osd_len = strlen(adap->log_addrs.osd_name); ··· 656 650 struct pulse8 *pulse8 = serio_get_drvdata(serio); 657 651 658 652 cec_unregister_adapter(pulse8->adap); 659 - pulse8->serio = NULL; 660 653 serio_set_drvdata(serio, NULL); 661 654 serio_close(serio); 662 655 } ··· 696 691 pulse8->autonomous = data[0]; 697 692 dev_dbg(pulse8->dev, "Autonomous mode: %s", 698 693 data[0] ? "on" : "off"); 694 + 695 + if (pulse8->vers >= 10) { 696 + cmd[0] = MSGCODE_GET_AUTO_POWER_ON; 697 + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); 698 + if (!err) 699 + dev_dbg(pulse8->dev, "Auto Power On: %s", 700 + data[0] ? "on" : "off"); 701 + } 699 702 700 703 cmd[0] = MSGCODE_GET_DEVICE_TYPE; 701 704 err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); ··· 766 753 dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n", 767 754 cec_phys_addr_exp(*pa)); 768 755 769 - cmd[0] = MSGCODE_GET_HDMI_VERSION; 770 - err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); 771 - if (err) 772 - return err; 773 - log_addrs->cec_version = data[0]; 774 - dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version); 756 + log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4; 757 + if (pulse8->vers < 10) { 758 + cmd[0] = MSGCODE_GET_HDMI_VERSION; 759 + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); 760 + if (err) 761 + return err; 762 + log_addrs->cec_version = data[0]; 763 + dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version); 764 + } 775 765 776 766 cmd[0] = MSGCODE_GET_OSD_NAME; 777 767 err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0); ··· 846 830 pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8, 847 831 dev_name(&serio->dev), caps, 1); 848 832 err = PTR_ERR_OR_ZERO(pulse8->adap); 849 - if (err < 0) 850 - goto free_device; 833 + if (err < 0) { 834 + kfree(pulse8); 835 + return err; 836 + } 851 837 852 838 pulse8->dev = &serio->dev; 853 839 serio_set_drvdata(serio, pulse8); ··· 892 874 serio_close(serio); 893 875 delete_adap: 894 876 cec_delete_adapter(pulse8->adap); 895 - free_device: 896 - kfree(pulse8); 897 877 return err; 898 878 } 899 879
+11
drivers/media/common/videobuf2/videobuf2-core.c
··· 414 414 vb->index = q->num_buffers + buffer; 415 415 vb->type = q->type; 416 416 vb->memory = memory; 417 + /* 418 + * We need to set these flags here so that the videobuf2 core 419 + * will call ->prepare()/->finish() cache sync/flush on vb2 420 + * buffers when appropriate. However, we can avoid explicit 421 + * ->prepare() and ->finish() cache sync for DMABUF buffers, 422 + * because DMA exporter takes care of it. 423 + */ 424 + if (q->memory != VB2_MEMORY_DMABUF) { 425 + vb->need_cache_sync_on_prepare = 1; 426 + vb->need_cache_sync_on_finish = 1; 427 + } 417 428 for (plane = 0; plane < num_planes; ++plane) { 418 429 vb->planes[plane].length = plane_sizes[plane]; 419 430 vb->planes[plane].min_length = plane_sizes[plane];
+3 -6
drivers/media/rc/mtk-cir.c
··· 151 151 { 152 152 u32 val; 153 153 154 - /* Period of raw software sampling in ns */ 155 - val = DIV_ROUND_CLOSEST(1000000000ul, 156 - clk_get_rate(ir->bus) / ir->data->div); 157 - 158 154 /* 159 155 * Period for software decoder used in the 160 156 * unit of raw software sampling 161 157 */ 162 - val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val); 158 + val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus), 159 + USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE); 163 160 164 161 dev_dbg(ir->dev, "@pwm clk = \t%lu\n", 165 162 clk_get_rate(ir->bus) / ir->data->div); ··· 409 412 mtk_irq_enable(ir, MTK_IRINT_EN); 410 413 411 414 dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n", 412 - DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000)); 415 + MTK_IR_SAMPLE); 413 416 414 417 return 0; 415 418
+2 -2
drivers/media/test-drivers/vidtv/vidtv_channel.c
··· 504 504 { 505 505 u32 i; 506 506 507 - vidtv_psi_pat_table_destroy(m->si.pat); 508 - 509 507 for (i = 0; i < m->si.pat->num_pmt; ++i) 510 508 vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]); 509 + 510 + vidtv_psi_pat_table_destroy(m->si.pat); 511 511 512 512 kfree(m->si.pmt_secs); 513 513 vidtv_psi_sdt_table_destroy(m->si.sdt);
+4 -4
drivers/media/test-drivers/vidtv/vidtv_psi.h
··· 420 420 struct vidtv_psi_desc *desc); 421 421 422 422 /** 423 - * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section. 423 + * vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section. 424 424 * @pmt: The PMT section that will contain the descriptor loop 425 425 * @to: Where in the PMT to assign this descriptor loop to 426 426 * @desc: The descriptor loop that will be assigned. ··· 434 434 struct vidtv_psi_desc *desc); 435 435 436 436 /** 437 - * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT. 437 + * vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT. 438 438 * @sdt: The SDT that will contain the descriptor loop 439 439 * @to: Where in the PMT to assign this descriptor loop to 440 440 * @desc: The descriptor loop that will be assigned. ··· 474 474 struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc); 475 475 476 476 /** 477 - * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each 477 + * vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each 478 478 * program found in the PAT 479 479 * @pat: The PAT to look for programs. 480 480 * @pcr_pid: packet ID for the PCR to be used for the program described in this ··· 743 743 struct vidtv_psi_table_eit 744 744 *vidtv_psi_eit_table_init(u16 network_id, 745 745 u16 transport_stream_id, 746 - u16 service_id); 746 + __be16 service_id); 747 747 748 748 /** 749 749 * struct vidtv_psi_eit_write_args - Arguments for writing an EIT section
+3 -1
drivers/media/test-drivers/vidtv/vidtv_s302m.c
··· 467 467 e->is_video_encoder = false; 468 468 469 469 ctx = kzalloc(priv_sz, GFP_KERNEL); 470 - if (!ctx) 470 + if (!ctx) { 471 + kfree(e); 471 472 return NULL; 473 + } 472 474 473 475 e->ctx = ctx; 474 476 ctx->last_duration = 0;
+1 -1
drivers/media/test-drivers/vidtv/vidtv_ts.h
··· 44 44 u8 adaptation_field:1; 45 45 u8 scrambling:2; 46 46 } __packed; 47 - struct vidtv_mpeg_ts_adaption adaption[]; 47 + struct vidtv_mpeg_ts_adaption *adaption; 48 48 } __packed; 49 49 50 50 /**
+8 -8
drivers/misc/habanalabs/common/device.c
··· 231 231 232 232 static void device_cdev_sysfs_del(struct hl_device *hdev) 233 233 { 234 - /* device_release() won't be called so must free devices explicitly */ 235 - if (!hdev->cdev_sysfs_created) { 236 - kfree(hdev->dev_ctrl); 237 - kfree(hdev->dev); 238 - return; 239 - } 234 + if (!hdev->cdev_sysfs_created) 235 + goto put_devices; 240 236 241 237 hl_sysfs_fini(hdev); 242 238 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); 243 239 cdev_device_del(&hdev->cdev, hdev->dev); 240 + 241 + put_devices: 242 + put_device(hdev->dev); 243 + put_device(hdev->dev_ctrl); 244 244 } 245 245 246 246 /* ··· 1371 1371 early_fini: 1372 1372 device_early_fini(hdev); 1373 1373 free_dev_ctrl: 1374 - kfree(hdev->dev_ctrl); 1374 + put_device(hdev->dev_ctrl); 1375 1375 free_dev: 1376 - kfree(hdev->dev); 1376 + put_device(hdev->dev); 1377 1377 out_disabled: 1378 1378 hdev->disabled = true; 1379 1379 if (add_cdev_sysfs_on_err)
+1
drivers/misc/habanalabs/common/memory.c
··· 1626 1626 goto host_hpage_range_err; 1627 1627 } 1628 1628 } else { 1629 + kfree(ctx->host_huge_va_range); 1629 1630 ctx->host_huge_va_range = ctx->host_va_range; 1630 1631 } 1631 1632
-10
drivers/misc/mei/Kconfig
··· 46 46 Supported SoCs: 47 47 Intel Bay Trail 48 48 49 - config INTEL_MEI_VIRTIO 50 - tristate "Intel MEI interface emulation with virtio framework" 51 - select INTEL_MEI 52 - depends on X86 && PCI && VIRTIO_PCI 53 - help 54 - This module implements mei hw emulation over virtio transport. 55 - The module will be called mei_virtio. 56 - Enable this if your virtual machine supports virtual mei 57 - device over virtio. 58 - 59 49 source "drivers/misc/mei/hdcp/Kconfig"
-3
drivers/misc/mei/Makefile
··· 22 22 mei-txe-objs := pci-txe.o 23 23 mei-txe-objs += hw-txe.o 24 24 25 - obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o 26 - mei-virtio-objs := hw-virtio.o 27 - 28 25 mei-$(CONFIG_EVENT_TRACING) += mei-trace.o 29 26 CFLAGS_mei-trace.o = -I$(src) 30 27
-874
drivers/misc/mei/hw-virtio.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Intel Management Engine Interface (Intel MEI) Linux driver 4 - * Copyright (c) 2018-2020, Intel Corporation. 5 - */ 6 - #include <linux/err.h> 7 - #include <linux/module.h> 8 - #include <linux/pm_runtime.h> 9 - #include <linux/scatterlist.h> 10 - #include <linux/spinlock.h> 11 - #include <linux/slab.h> 12 - #include <linux/virtio.h> 13 - #include <linux/virtio_config.h> 14 - #include <linux/virtio_ids.h> 15 - #include <linux/atomic.h> 16 - 17 - #include "mei_dev.h" 18 - #include "hbm.h" 19 - #include "client.h" 20 - 21 - #define MEI_VIRTIO_RPM_TIMEOUT 500 22 - /* ACRN virtio device types */ 23 - #ifndef VIRTIO_ID_MEI 24 - #define VIRTIO_ID_MEI 0xFFFE /* virtio mei */ 25 - #endif 26 - 27 - /** 28 - * struct mei_virtio_cfg - settings passed from the virtio backend 29 - * @buf_depth: read buffer depth in slots (4bytes) 30 - * @hw_ready: hw is ready for operation 31 - * @host_reset: synchronize reset with virtio backend 32 - * @reserved: reserved for alignment 33 - * @fw_status: FW status 34 - */ 35 - struct mei_virtio_cfg { 36 - u32 buf_depth; 37 - u8 hw_ready; 38 - u8 host_reset; 39 - u8 reserved[2]; 40 - u32 fw_status[MEI_FW_STATUS_MAX]; 41 - } __packed; 42 - 43 - struct mei_virtio_hw { 44 - struct mei_device mdev; 45 - char name[32]; 46 - 47 - struct virtqueue *in; 48 - struct virtqueue *out; 49 - 50 - bool host_ready; 51 - struct work_struct intr_handler; 52 - 53 - u32 *recv_buf; 54 - u8 recv_rdy; 55 - size_t recv_sz; 56 - u32 recv_idx; 57 - u32 recv_len; 58 - 59 - /* send buffer */ 60 - atomic_t hbuf_ready; 61 - const void *send_hdr; 62 - const void *send_buf; 63 - 64 - struct mei_virtio_cfg cfg; 65 - }; 66 - 67 - #define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev) 68 - 69 - /** 70 - * mei_virtio_fw_status() - read status register of mei 71 - * @dev: mei device 72 - * @fw_status: fw status register values 73 - * 74 - * Return: always 0 75 - */ 76 - static int mei_virtio_fw_status(struct mei_device *dev, 77 - struct mei_fw_status *fw_status) 78 - { 79 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 80 - 81 - fw_status->count = MEI_FW_STATUS_MAX; 82 - virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status), 83 - fw_status->status, sizeof(fw_status->status)); 84 - return 0; 85 - } 86 - 87 - /** 88 - * mei_virtio_pg_state() - translate internal pg state 89 - * to the mei power gating state 90 - * There is no power management in ACRN mode always return OFF 91 - * @dev: mei device 92 - * 93 - * Return: 94 - * * MEI_PG_OFF - if aliveness is on (always) 95 - * * MEI_PG_ON - (never) 96 - */ 97 - static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev) 98 - { 99 - return MEI_PG_OFF; 100 - } 101 - 102 - /** 103 - * mei_virtio_hw_config() - configure hw dependent settings 104 - * 105 - * @dev: mei device 106 - * 107 - * Return: always 0 108 - */ 109 - static int mei_virtio_hw_config(struct mei_device *dev) 110 - { 111 - return 0; 112 - } 113 - 114 - /** 115 - * mei_virtio_hbuf_empty_slots() - counts write empty slots. 116 - * @dev: the device structure 117 - * 118 - * Return: always return frontend buf size if buffer is ready, 0 otherwise 119 - */ 120 - static int mei_virtio_hbuf_empty_slots(struct mei_device *dev) 121 - { 122 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 123 - 124 - return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0; 125 - } 126 - 127 - /** 128 - * mei_virtio_hbuf_is_ready() - checks if write buffer is ready 129 - * @dev: the device structure 130 - * 131 - * Return: true if hbuf is ready 132 - */ 133 - static bool mei_virtio_hbuf_is_ready(struct mei_device *dev) 134 - { 135 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 136 - 137 - return atomic_read(&hw->hbuf_ready) == 1; 138 - } 139 - 140 - /** 141 - * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer. 142 - * @dev: the device structure 143 - * 144 - * Return: size of frontend write buffer in bytes 145 - */ 146 - static u32 mei_virtio_hbuf_depth(const struct mei_device *dev) 147 - { 148 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 149 - 150 - return hw->cfg.buf_depth; 151 - } 152 - 153 - /** 154 - * mei_virtio_intr_clear() - clear and stop interrupts 155 - * @dev: the device structure 156 - */ 157 - static void mei_virtio_intr_clear(struct mei_device *dev) 158 - { 159 - /* 160 - * In our virtio solution, there are two types of interrupts, 161 - * vq interrupt and config change interrupt. 162 - * 1) start/reset rely on virtio config changed interrupt; 163 - * 2) send/recv rely on virtio virtqueue interrupts. 164 - * They are all virtual interrupts. So, we don't have corresponding 165 - * operation to do here. 166 - */ 167 - } 168 - 169 - /** 170 - * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks 171 - * @dev: the device structure 172 - */ 173 - static void mei_virtio_intr_enable(struct mei_device *dev) 174 - { 175 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 176 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 177 - 178 - virtio_config_enable(vdev); 179 - 180 - virtqueue_enable_cb(hw->in); 181 - virtqueue_enable_cb(hw->out); 182 - } 183 - 184 - /** 185 - * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks 186 - * 187 - * @dev: the device structure 188 - */ 189 - static void mei_virtio_intr_disable(struct mei_device *dev) 190 - { 191 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 192 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 193 - 194 - virtio_config_disable(vdev); 195 - 196 - virtqueue_disable_cb(hw->in); 197 - virtqueue_disable_cb(hw->out); 198 - } 199 - 200 - /** 201 - * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all 202 - * virtqueue 203 - * @dev: the device structure 204 - */ 205 - static void mei_virtio_synchronize_irq(struct mei_device *dev) 206 - { 207 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 208 - 209 - /* 210 - * Now, all IRQ handlers are converted to workqueue. 211 - * Change synchronize irq to flush this work. 212 - */ 213 - flush_work(&hw->intr_handler); 214 - } 215 - 216 - static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw) 217 - { 218 - kfree(hw->send_hdr); 219 - kfree(hw->send_buf); 220 - hw->send_hdr = NULL; 221 - hw->send_buf = NULL; 222 - } 223 - 224 - /** 225 - * mei_virtio_write_message() - writes a message to mei virtio back-end service. 226 - * @dev: the device structure 227 - * @hdr: mei header of message 228 - * @hdr_len: header length 229 - * @data: message payload will be written 230 - * @data_len: message payload length 231 - * 232 - * Return: 233 - * * 0: on success 234 - * * -EIO: if write has failed 235 - * * -ENOMEM: on memory allocation failure 236 - */ 237 - static int mei_virtio_write_message(struct mei_device *dev, 238 - const void *hdr, size_t hdr_len, 239 - const void *data, size_t data_len) 240 - { 241 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 242 - struct scatterlist sg[2]; 243 - const void *hbuf, *dbuf; 244 - int ret; 245 - 246 - if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0))) 247 - return -EIO; 248 - 249 - hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL); 250 - hw->send_hdr = hbuf; 251 - 252 - dbuf = kmemdup(data, data_len, GFP_KERNEL); 253 - hw->send_buf = dbuf; 254 - 255 - if (!hbuf || !dbuf) { 256 - ret = -ENOMEM; 257 - goto fail; 258 - } 259 - 260 - sg_init_table(sg, 2); 261 - sg_set_buf(&sg[0], hbuf, hdr_len); 262 - sg_set_buf(&sg[1], dbuf, data_len); 263 - 264 - ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL); 265 - if (ret) { 266 - dev_err(dev->dev, "failed to add outbuf\n"); 267 - goto fail; 268 - } 269 - 270 - virtqueue_kick(hw->out); 271 - return 0; 272 - fail: 273 - 274 - mei_virtio_free_outbufs(hw); 275 - 276 - return ret; 277 - } 278 - 279 - /** 280 - * mei_virtio_count_full_read_slots() - counts read full slots. 281 - * @dev: the device structure 282 - * 283 - * Return: -EOVERFLOW if overflow, otherwise filled slots count 284 - */ 285 - static int mei_virtio_count_full_read_slots(struct mei_device *dev) 286 - { 287 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 288 - 289 - if (hw->recv_idx > hw->recv_len) 290 - return -EOVERFLOW; 291 - 292 - return hw->recv_len - hw->recv_idx; 293 - } 294 - 295 - /** 296 - * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer 297 - * 298 - * @dev: the device structure 299 - * 300 - * Return: 32bit dword of receive buffer (u32) 301 - */ 302 - static inline u32 mei_virtio_read_hdr(const struct mei_device *dev) 303 - { 304 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 305 - 306 - WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1); 307 - 308 - return hw->recv_buf[hw->recv_idx++]; 309 - } 310 - 311 - static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer, 312 - unsigned long len) 313 - { 314 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 315 - u32 slots = mei_data2slots(len); 316 - 317 - if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots)) 318 - return -EOVERFLOW; 319 - 320 - /* 321 - * Assumption: There is only one MEI message in recv_buf each time. 322 - * Backend service need follow this rule too. 323 - */ 324 - memcpy(buffer, hw->recv_buf + hw->recv_idx, len); 325 - hw->recv_idx += slots; 326 - 327 - return 0; 328 - } 329 - 330 - static bool mei_virtio_pg_is_enabled(struct mei_device *dev) 331 - { 332 - return false; 333 - } 334 - 335 - static bool mei_virtio_pg_in_transition(struct mei_device *dev) 336 - { 337 - return false; 338 - } 339 - 340 - static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw) 341 - { 342 - struct scatterlist sg; 343 - 344 - if (hw->recv_rdy) /* not needed */ 345 - return; 346 - 347 - /* refill the recv_buf to IN virtqueue to get next message */ 348 - sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth)); 349 - hw->recv_len = 0; 350 - hw->recv_idx = 0; 351 - hw->recv_rdy = 1; 352 - virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL); 353 - virtqueue_kick(hw->in); 354 - } 355 - 356 - /** 357 - * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready 358 - * @dev: mei device 359 - * Return: bool 360 - */ 361 - static bool mei_virtio_hw_is_ready(struct mei_device *dev) 362 - { 363 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 364 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 365 - 366 - virtio_cread(vdev, struct mei_virtio_cfg, 367 - hw_ready, &hw->cfg.hw_ready); 368 - 369 - dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready); 370 - 371 - return hw->cfg.hw_ready; 372 - } 373 - 374 - /** 375 - * mei_virtio_hw_reset - resets virtio hw. 376 - * 377 - * @dev: the device structure 378 - * @intr_enable: virtio use data/config callbacks 379 - * 380 - * Return: 0 on success an error code otherwise 381 - */ 382 - static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable) 383 - { 384 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 385 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 386 - 387 - dev_dbg(dev->dev, "hw reset\n"); 388 - 389 - dev->recvd_hw_ready = false; 390 - hw->host_ready = false; 391 - atomic_set(&hw->hbuf_ready, 0); 392 - hw->recv_len = 0; 393 - hw->recv_idx = 0; 394 - 395 - hw->cfg.host_reset = 1; 396 - virtio_cwrite(vdev, struct mei_virtio_cfg, 397 - host_reset, &hw->cfg.host_reset); 398 - 399 - mei_virtio_hw_is_ready(dev); 400 - 401 - if (intr_enable) 402 - mei_virtio_intr_enable(dev); 403 - 404 - return 0; 405 - } 406 - 407 - /** 408 - * mei_virtio_hw_reset_release() - release device from the reset 409 - * @dev: the device structure 410 - */ 411 - static void mei_virtio_hw_reset_release(struct mei_device *dev) 412 - { 413 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 414 - struct virtio_device *vdev = dev_to_virtio(dev->dev); 415 - 416 - dev_dbg(dev->dev, "hw reset release\n"); 417 - hw->cfg.host_reset = 0; 418 - virtio_cwrite(vdev, struct mei_virtio_cfg, 419 - host_reset, &hw->cfg.host_reset); 420 - } 421 - 422 - /** 423 - * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready 424 - * or timeout is reached 425 - * @dev: mei device 426 - * 427 - * Return: 0 on success, error otherwise 428 - */ 429 - static int mei_virtio_hw_ready_wait(struct mei_device *dev) 430 - { 431 - mutex_unlock(&dev->device_lock); 432 - wait_event_timeout(dev->wait_hw_ready, 433 - dev->recvd_hw_ready, 434 - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); 435 - mutex_lock(&dev->device_lock); 436 - if (!dev->recvd_hw_ready) { 437 - dev_err(dev->dev, "wait hw ready failed\n"); 438 - return -ETIMEDOUT; 439 - } 440 - 441 - dev->recvd_hw_ready = false; 442 - return 0; 443 - } 444 - 445 - /** 446 - * mei_virtio_hw_start() - hw start routine 447 - * @dev: mei device 448 - * 449 - * Return: 0 on success, error otherwise 450 - */ 451 - static int mei_virtio_hw_start(struct mei_device *dev) 452 - { 453 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 454 - int ret; 455 - 456 - dev_dbg(dev->dev, "hw start\n"); 457 - mei_virtio_hw_reset_release(dev); 458 - 459 - ret = mei_virtio_hw_ready_wait(dev); 460 - if (ret) 461 - return ret; 462 - 463 - mei_virtio_add_recv_buf(hw); 464 - atomic_set(&hw->hbuf_ready, 1); 465 - dev_dbg(dev->dev, "hw is ready\n"); 466 - hw->host_ready = true; 467 - 468 - return 0; 469 - } 470 - 471 - /** 472 - * mei_virtio_host_is_ready() - check whether the FE has turned ready 473 - * @dev: mei device 474 - * 475 - * Return: bool 476 - */ 477 - static bool mei_virtio_host_is_ready(struct mei_device *dev) 478 - { 479 - struct mei_virtio_hw *hw = to_virtio_hw(dev); 480 - 481 - dev_dbg(dev->dev, "host ready %d\n", hw->host_ready); 482 - 483 - return hw->host_ready; 484 - } 485 - 486 - /** 487 - * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei 488 - * @vq: receiving virtqueue 489 - */ 490 - static void mei_virtio_data_in(struct virtqueue *vq) 491 - { 492 - struct mei_virtio_hw *hw = vq->vdev->priv; 493 - 494 - /* disable interrupts (enabled again from in the interrupt worker) */ 495 - virtqueue_disable_cb(hw->in); 496 - 497 - schedule_work(&hw->intr_handler); 498 - } 499 - 500 - /** 501 - * mei_virtio_data_out() - The callback of send virtqueue of virtio mei 502 - * @vq: transmitting virtqueue 503 - */ 504 - static void mei_virtio_data_out(struct virtqueue *vq) 505 - { 506 - struct mei_virtio_hw *hw = vq->vdev->priv; 507 - 508 - schedule_work(&hw->intr_handler); 509 - } 510 - 511 - static void mei_virtio_intr_handler(struct work_struct *work) 512 - { 513 - struct mei_virtio_hw *hw = 514 - container_of(work, struct mei_virtio_hw, intr_handler); 515 - struct mei_device *dev = &hw->mdev; 516 - LIST_HEAD(complete_list); 517 - s32 slots; 518 - int rets = 0; 519 - void *data; 520 - unsigned int len; 521 - 522 - mutex_lock(&dev->device_lock); 523 - 524 - if (dev->dev_state == MEI_DEV_DISABLED) { 525 - dev_warn(dev->dev, "Interrupt in disabled state.\n"); 526 - mei_virtio_intr_disable(dev); 527 - goto end; 528 - } 529 - 530 - /* check if ME wants a reset */ 531 - if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { 532 - dev_warn(dev->dev, "BE service not ready: resetting.\n"); 533 - schedule_work(&dev->reset_work); 534 - goto end; 535 - } 536 - 537 - /* check if we need to start the dev */ 538 - if (!mei_host_is_ready(dev)) { 539 - if (mei_hw_is_ready(dev)) { 540 - dev_dbg(dev->dev, "we need to start the dev.\n"); 541 - dev->recvd_hw_ready = true; 542 - wake_up(&dev->wait_hw_ready); 543 - } else { 544 - dev_warn(dev->dev, "Spurious Interrupt\n"); 545 - } 546 - goto end; 547 - } 548 - 549 - /* read */ 550 - if (hw->recv_rdy) { 551 - data = virtqueue_get_buf(hw->in, &len); 552 - if (!data || !len) { 553 - dev_dbg(dev->dev, "No data %d", len); 554 - } else { 555 - dev_dbg(dev->dev, "data_in %d\n", len); 556 - WARN_ON(data != hw->recv_buf); 557 - hw->recv_len = mei_data2slots(len); 558 - hw->recv_rdy = 0; 559 - } 560 - } 561 - 562 - /* write */ 563 - if (!atomic_read(&hw->hbuf_ready)) { 564 - if (!virtqueue_get_buf(hw->out, &len)) { 565 - dev_warn(dev->dev, "Failed to getbuf\n"); 566 - } else { 567 - mei_virtio_free_outbufs(hw); 568 - atomic_inc(&hw->hbuf_ready); 569 - } 570 - } 571 - 572 - /* check slots available for reading */ 573 - slots = mei_count_full_read_slots(dev); 574 - while (slots > 0) { 575 - dev_dbg(dev->dev, "slots to read = %08x\n", slots); 576 - rets = mei_irq_read_handler(dev, &complete_list, &slots); 577 - 578 - if (rets && 579 - (dev->dev_state != MEI_DEV_RESETTING && 580 - dev->dev_state != MEI_DEV_POWER_DOWN)) { 581 - dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", 582 - rets); 583 - schedule_work(&dev->reset_work); 584 - goto end; 585 - } 586 - } 587 - 588 - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 589 - 590 - mei_irq_write_handler(dev, &complete_list); 591 - 592 - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 593 - 594 - mei_irq_compl_handler(dev, &complete_list); 595 - 596 - mei_virtio_add_recv_buf(hw); 597 - 598 - end: 599 - if (dev->dev_state != MEI_DEV_DISABLED) { 600 - if (!virtqueue_enable_cb(hw->in)) 601 - schedule_work(&hw->intr_handler); 602 - } 603 - 604 - mutex_unlock(&dev->device_lock); 605 - } 606 - 607 - static void mei_virtio_config_changed(struct virtio_device *vdev) 608 - { 609 - struct mei_virtio_hw *hw = vdev->priv; 610 - struct mei_device *dev = &hw->mdev; 611 - 612 - virtio_cread(vdev, struct mei_virtio_cfg, 613 - hw_ready, &hw->cfg.hw_ready); 614 - 615 - if (dev->dev_state == MEI_DEV_DISABLED) { 616 - dev_dbg(dev->dev, "disabled state don't start\n"); 617 - return; 618 - } 619 - 620 - /* Run intr handler once to handle reset notify */ 621 - schedule_work(&hw->intr_handler); 622 - } 623 - 624 - static void mei_virtio_remove_vqs(struct virtio_device *vdev) 625 - { 626 - struct mei_virtio_hw *hw = vdev->priv; 627 - 628 - virtqueue_detach_unused_buf(hw->in); 629 - hw->recv_len = 0; 630 - hw->recv_idx = 0; 631 - hw->recv_rdy = 0; 632 - 633 - virtqueue_detach_unused_buf(hw->out); 634 - 635 - mei_virtio_free_outbufs(hw); 636 - 637 - vdev->config->del_vqs(vdev); 638 - } 639 - 640 - /* 641 - * There are two virtqueues, one is for send and another is for recv. 642 - */ 643 - static int mei_virtio_init_vqs(struct mei_virtio_hw *hw, 644 - struct virtio_device *vdev) 645 - { 646 - struct virtqueue *vqs[2]; 647 - 648 - vq_callback_t *cbs[] = { 649 - mei_virtio_data_in, 650 - mei_virtio_data_out, 651 - }; 652 - static const char * const names[] = { 653 - "in", 654 - "out", 655 - }; 656 - int ret; 657 - 658 - ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL); 659 - if (ret) 660 - return ret; 661 - 662 - hw->in = vqs[0]; 663 - hw->out = vqs[1]; 664 - 665 - return 0; 666 - } 667 - 668 - static const struct mei_hw_ops mei_virtio_ops = { 669 - .fw_status = mei_virtio_fw_status, 670 - .pg_state = mei_virtio_pg_state, 671 - 672 - .host_is_ready = mei_virtio_host_is_ready, 673 - 674 - .hw_is_ready = mei_virtio_hw_is_ready, 675 - .hw_reset = mei_virtio_hw_reset, 676 - .hw_config = mei_virtio_hw_config, 677 - .hw_start = mei_virtio_hw_start, 678 - 679 - .pg_in_transition = mei_virtio_pg_in_transition, 680 - .pg_is_enabled = mei_virtio_pg_is_enabled, 681 - 682 - .intr_clear = mei_virtio_intr_clear, 683 - .intr_enable = mei_virtio_intr_enable, 684 - .intr_disable = mei_virtio_intr_disable, 685 - .synchronize_irq = mei_virtio_synchronize_irq, 686 - 687 - .hbuf_free_slots = mei_virtio_hbuf_empty_slots, 688 - .hbuf_is_ready = mei_virtio_hbuf_is_ready, 689 - .hbuf_depth = mei_virtio_hbuf_depth, 690 - 691 - .write = mei_virtio_write_message, 692 - 693 - .rdbuf_full_slots = mei_virtio_count_full_read_slots, 694 - .read_hdr = mei_virtio_read_hdr, 695 - .read = mei_virtio_read, 696 - }; 697 - 698 - static int mei_virtio_probe(struct virtio_device *vdev) 699 - { 700 - struct mei_virtio_hw *hw; 701 - int ret; 702 - 703 - hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL); 704 - if (!hw) 705 - return -ENOMEM; 706 - 707 - vdev->priv = hw; 708 - 709 - INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler); 710 - 711 - ret = mei_virtio_init_vqs(hw, vdev); 712 - if (ret) 713 - goto vqs_failed; 714 - 715 - virtio_cread(vdev, struct mei_virtio_cfg, 716 - buf_depth, &hw->cfg.buf_depth); 717 - 718 - hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL); 719 - if (!hw->recv_buf) { 720 - ret = -ENOMEM; 721 - goto hbuf_failed; 722 - } 723 - atomic_set(&hw->hbuf_ready, 0); 724 - 725 - virtio_device_ready(vdev); 726 - 727 - mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops); 728 - 729 - pm_runtime_get_noresume(&vdev->dev); 730 - pm_runtime_set_active(&vdev->dev); 731 - pm_runtime_enable(&vdev->dev); 732 - 733 - ret = mei_start(&hw->mdev); 734 - if (ret) 735 - goto mei_start_failed; 736 - 737 - pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT); 738 - pm_runtime_use_autosuspend(&vdev->dev); 739 - 740 - ret = mei_register(&hw->mdev, &vdev->dev); 741 - if (ret) 742 - goto mei_failed; 743 - 744 - pm_runtime_put(&vdev->dev); 745 - 746 - return 0; 747 - 748 - mei_failed: 749 - mei_stop(&hw->mdev); 750 - mei_start_failed: 751 - mei_cancel_work(&hw->mdev); 752 - mei_disable_interrupts(&hw->mdev); 753 - kfree(hw->recv_buf); 754 - hbuf_failed: 755 - vdev->config->del_vqs(vdev); 756 - vqs_failed: 757 - return ret; 758 - } 759 - 760 - static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device) 761 - { 762 - struct virtio_device *vdev = dev_to_virtio(device); 763 - struct mei_virtio_hw *hw = vdev->priv; 764 - 765 - dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n"); 766 - 767 - if (!hw) 768 - return -ENODEV; 769 - 770 - if (mei_write_is_idle(&hw->mdev)) 771 - pm_runtime_autosuspend(device); 772 - 773 - return -EBUSY; 774 - } 775 - 776 - static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device) 777 - { 778 - return 0; 779 - } 780 - 781 - static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device) 782 - { 783 - return 0; 784 - } 785 - 786 - static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev) 787 - { 788 - struct mei_virtio_hw *hw = vdev->priv; 789 - 790 - dev_dbg(&vdev->dev, "freeze\n"); 791 - 792 - if (!hw) 793 - return -ENODEV; 794 - 795 - mei_stop(&hw->mdev); 796 - mei_disable_interrupts(&hw->mdev); 797 - cancel_work_sync(&hw->intr_handler); 798 - vdev->config->reset(vdev); 799 - mei_virtio_remove_vqs(vdev); 800 - 801 - return 0; 802 - } 803 - 804 - static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev) 805 - { 806 - struct mei_virtio_hw *hw = vdev->priv; 807 - int ret; 808 - 809 - dev_dbg(&vdev->dev, "restore\n"); 810 - 811 - if (!hw) 812 - return -ENODEV; 813 - 814 - ret = mei_virtio_init_vqs(hw, vdev); 815 - if (ret) 816 - return ret; 817 - 818 - virtio_device_ready(vdev); 819 - 820 - ret = mei_restart(&hw->mdev); 821 - if (ret) 822 - return ret; 823 - 824 - /* Start timer if stopped in suspend */ 825 - schedule_delayed_work(&hw->mdev.timer_work, HZ); 826 - 827 - return 0; 828 - } 829 - 830 - static const struct dev_pm_ops mei_virtio_pm_ops = { 831 - SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend, 832 - mei_virtio_pm_runtime_resume, 833 - mei_virtio_pm_runtime_idle) 834 - }; 835 - 836 - static void mei_virtio_remove(struct virtio_device *vdev) 837 - { 838 - struct mei_virtio_hw *hw = vdev->priv; 839 - 840 - mei_stop(&hw->mdev); 841 - mei_disable_interrupts(&hw->mdev); 842 - cancel_work_sync(&hw->intr_handler); 843 - mei_deregister(&hw->mdev); 844 - vdev->config->reset(vdev); 845 - mei_virtio_remove_vqs(vdev); 846 - kfree(hw->recv_buf); 847 - pm_runtime_disable(&vdev->dev); 848 - } 849 - 850 - static struct virtio_device_id id_table[] = { 851 - { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID }, 852 - { } 853 - }; 854 - 855 - static struct virtio_driver mei_virtio_driver = { 856 - .id_table = id_table, 857 - .probe = mei_virtio_probe, 858 - .remove = mei_virtio_remove, 859 - .config_changed = mei_virtio_config_changed, 860 - .driver = { 861 - .name = KBUILD_MODNAME, 862 - .owner = THIS_MODULE, 863 - .pm = &mei_virtio_pm_ops, 864 - }, 865 - #ifdef CONFIG_PM_SLEEP 866 - .freeze = mei_virtio_freeze, 867 - .restore = mei_virtio_restore, 868 - #endif 869 - }; 870 - 871 - module_virtio_driver(mei_virtio_driver); 872 - MODULE_DEVICE_TABLE(virtio, id_table); 873 - MODULE_DESCRIPTION("Virtio MEI frontend driver"); 874 - MODULE_LICENSE("GPL v2");
+1 -1
drivers/mmc/core/block.c
··· 580 580 581 581 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); 582 582 583 - if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) { 583 + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 584 584 /* 585 585 * Ensure RPMB/R1B command has completed by polling CMD13 586 586 * "Send Status".
+28 -11
drivers/mmc/host/mtk-sd.c
··· 446 446 447 447 static const struct mtk_mmc_compatible mt8135_compat = { 448 448 .clk_div_bits = 8, 449 - .recheck_sdio_irq = false, 449 + .recheck_sdio_irq = true, 450 450 .hs400_tune = false, 451 451 .pad_tune_reg = MSDC_PAD_TUNE, 452 452 .async_fifo = false, ··· 485 485 486 486 static const struct mtk_mmc_compatible mt2701_compat = { 487 487 .clk_div_bits = 12, 488 - .recheck_sdio_irq = false, 488 + .recheck_sdio_irq = true, 489 489 .hs400_tune = false, 490 490 .pad_tune_reg = MSDC_PAD_TUNE0, 491 491 .async_fifo = true, ··· 511 511 512 512 static const struct mtk_mmc_compatible mt7622_compat = { 513 513 .clk_div_bits = 12, 514 - .recheck_sdio_irq = false, 514 + .recheck_sdio_irq = true, 515 515 .hs400_tune = false, 516 516 .pad_tune_reg = MSDC_PAD_TUNE0, 517 517 .async_fifo = true, ··· 524 524 525 525 static const struct mtk_mmc_compatible mt8516_compat = { 526 526 .clk_div_bits = 12, 527 - .recheck_sdio_irq = false, 527 + .recheck_sdio_irq = true, 528 528 .hs400_tune = false, 529 529 .pad_tune_reg = MSDC_PAD_TUNE0, 530 530 .async_fifo = true, ··· 535 535 536 536 static const struct mtk_mmc_compatible mt7620_compat = { 537 537 .clk_div_bits = 8, 538 - .recheck_sdio_irq = false, 538 + .recheck_sdio_irq = true, 539 539 .hs400_tune = false, 540 540 .pad_tune_reg = MSDC_PAD_TUNE, 541 541 .async_fifo = false, ··· 548 548 549 549 static const struct mtk_mmc_compatible mt6779_compat = { 550 550 .clk_div_bits = 12, 551 + .recheck_sdio_irq = false, 551 552 .hs400_tune = false, 552 553 .pad_tune_reg = MSDC_PAD_TUNE0, 553 554 .async_fifo = true, ··· 2604 2603 return 0; 2605 2604 } 2606 2605 2607 - #ifdef CONFIG_PM 2608 2606 static void msdc_save_reg(struct msdc_host *host) 2609 2607 { 2610 2608 u32 tune_reg = host->dev_comp->pad_tune_reg; ··· 2662 2662 __msdc_enable_sdio_irq(host, 1); 2663 2663 } 2664 2664 2665 - static int msdc_runtime_suspend(struct device *dev) 2665 + static int __maybe_unused msdc_runtime_suspend(struct device *dev) 2666 2666 { 2667 2667 struct mmc_host *mmc = dev_get_drvdata(dev); 2668 2668 struct msdc_host *host = mmc_priv(mmc); ··· 2672 2672 return 0; 2673 2673 } 2674 2674 2675 - static int msdc_runtime_resume(struct device *dev) 2675 + static int __maybe_unused msdc_runtime_resume(struct device *dev) 2676 2676 { 2677 2677 struct mmc_host *mmc = dev_get_drvdata(dev); 2678 2678 struct msdc_host *host = mmc_priv(mmc); ··· 2681 2681 msdc_restore_reg(host); 2682 2682 return 0; 2683 2683 } 2684 - #endif 2684 + 2685 + static int __maybe_unused msdc_suspend(struct device *dev) 2686 + { 2687 + struct mmc_host *mmc = dev_get_drvdata(dev); 2688 + int ret; 2689 + 2690 + if (mmc->caps2 & MMC_CAP2_CQE) { 2691 + ret = cqhci_suspend(mmc); 2692 + if (ret) 2693 + return ret; 2694 + } 2695 + 2696 + return pm_runtime_force_suspend(dev); 2697 + } 2698 + 2699 + static int __maybe_unused msdc_resume(struct device *dev) 2700 + { 2701 + return pm_runtime_force_resume(dev); 2702 + } 2685 2703 2686 2704 static const struct dev_pm_ops msdc_dev_pm_ops = { 2687 - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2688 - pm_runtime_force_resume) 2705 + SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) 2689 2706 SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) 2690 2707 }; 2691 2708
+3
drivers/mmc/host/sdhci-of-arasan.c
··· 1186 1186 static struct sdhci_arasan_of_data intel_keembay_emmc_data = { 1187 1187 .soc_ctl_map = &intel_keembay_soc_ctl_map, 1188 1188 .pdata = &sdhci_keembay_emmc_pdata, 1189 + .clk_ops = &arasan_clk_ops, 1189 1190 }; 1190 1191 1191 1192 static struct sdhci_arasan_of_data intel_keembay_sd_data = { 1192 1193 .soc_ctl_map = &intel_keembay_soc_ctl_map, 1193 1194 .pdata = &sdhci_keembay_sd_pdata, 1195 + .clk_ops = &arasan_clk_ops, 1194 1196 }; 1195 1197 1196 1198 static struct sdhci_arasan_of_data intel_keembay_sdio_data = { 1197 1199 .soc_ctl_map = &intel_keembay_soc_ctl_map, 1198 1200 .pdata = &sdhci_keembay_sdio_pdata, 1201 + .clk_ops = &arasan_clk_ops, 1199 1202 }; 1200 1203 1201 1204 static const struct of_device_id sdhci_arasan_of_match[] = {
+3 -3
drivers/mmc/host/tmio_mmc_core.c
··· 927 927 switch (ios->power_mode) { 928 928 case MMC_POWER_OFF: 929 929 tmio_mmc_power_off(host); 930 - /* Downgrade ensures a sane state for tuning HW (e.g. SCC) */ 931 - if (host->mmc->ops->hs400_downgrade) 932 - host->mmc->ops->hs400_downgrade(host->mmc); 930 + /* For R-Car Gen2+, we need to reset SDHI specific SCC */ 931 + if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) 932 + host->reset(host); 933 933 host->set_clock(host, 0); 934 934 break; 935 935 case MMC_POWER_UP:
+3 -1
drivers/mtd/nand/raw/ams-delta.c
··· 218 218 static int gpio_nand_attach_chip(struct nand_chip *chip) 219 219 { 220 220 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 221 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 221 + 222 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 223 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 222 224 223 225 return 0; 224 226 }
+3 -1
drivers/mtd/nand/raw/au1550nd.c
··· 239 239 static int au1550nd_attach_chip(struct nand_chip *chip) 240 240 { 241 241 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 242 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 242 + 243 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 244 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 243 245 244 246 return 0; 245 247 }
+3 -1
drivers/mtd/nand/raw/gpio.c
··· 164 164 static int gpio_nand_attach_chip(struct nand_chip *chip) 165 165 { 166 166 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 167 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 167 + 168 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 169 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 168 170 169 171 return 0; 170 172 }
+3 -1
drivers/mtd/nand/raw/mpc5121_nfc.c
··· 606 606 static int mpc5121_nfc_attach_chip(struct nand_chip *chip) 607 607 { 608 608 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 609 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 609 + 610 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 611 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 610 612 611 613 return 0; 612 614 }
+3 -1
drivers/mtd/nand/raw/orion_nand.c
··· 86 86 static int orion_nand_attach_chip(struct nand_chip *chip) 87 87 { 88 88 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 89 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 89 + 90 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 91 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 90 92 91 93 return 0; 92 94 }
+3 -1
drivers/mtd/nand/raw/pasemi_nand.c
··· 77 77 static int pasemi_attach_chip(struct nand_chip *chip) 78 78 { 79 79 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 80 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 80 + 81 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 82 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 81 83 82 84 return 0; 83 85 }
+3 -1
drivers/mtd/nand/raw/plat_nand.c
··· 22 22 static int plat_nand_attach_chip(struct nand_chip *chip) 23 23 { 24 24 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 25 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 25 + 26 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 27 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 26 28 27 29 return 0; 28 30 }
+3 -1
drivers/mtd/nand/raw/socrates_nand.c
··· 120 120 static int socrates_attach_chip(struct nand_chip *chip) 121 121 { 122 122 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 123 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 123 + 124 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 125 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 124 126 125 127 return 0; 126 128 }
+3 -1
drivers/mtd/nand/raw/xway_nand.c
··· 149 149 static int xway_attach_chip(struct nand_chip *chip) 150 150 { 151 151 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 152 - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 152 + 153 + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) 154 + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 153 155 154 156 return 0; 155 157 }
+15 -7
drivers/net/bonding/bond_options.c
··· 745 745 return &bond_opts[option]; 746 746 } 747 747 748 + static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode) 749 + { 750 + if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD)) 751 + return; 752 + 753 + if (mode == BOND_MODE_ACTIVEBACKUP) 754 + bond_dev->wanted_features |= BOND_XFRM_FEATURES; 755 + else 756 + bond_dev->wanted_features &= ~BOND_XFRM_FEATURES; 757 + 758 + netdev_update_features(bond_dev); 759 + } 760 + 748 761 static int bond_option_mode_set(struct bonding *bond, 749 762 const struct bond_opt_value *newval) 750 763 { ··· 780 767 if (newval->value == BOND_MODE_ALB) 781 768 bond->params.tlb_dynamic_lb = 1; 782 769 783 - #ifdef CONFIG_XFRM_OFFLOAD 784 - if (newval->value == BOND_MODE_ACTIVEBACKUP) 785 - bond->dev->wanted_features |= BOND_XFRM_FEATURES; 786 - else 787 - bond->dev->wanted_features &= ~BOND_XFRM_FEATURES; 788 - netdev_change_features(bond->dev); 789 - #endif /* CONFIG_XFRM_OFFLOAD */ 770 + if (bond->dev->reg_state == NETREG_REGISTERED) 771 + bond_set_xfrm_features(bond->dev, newval->value); 790 772 791 773 /* don't cache arp_validate between modes */ 792 774 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+7 -2
drivers/net/can/softing/softing_main.c
··· 382 382 383 383 /* check or determine and set bittime */ 384 384 ret = open_candev(ndev); 385 - if (!ret) 386 - ret = softing_startstop(ndev, 1); 385 + if (ret) 386 + return ret; 387 + 388 + ret = softing_startstop(ndev, 1); 389 + if (ret < 0) 390 + close_candev(ndev); 391 + 387 392 return ret; 388 393 } 389 394
-7
drivers/net/dsa/ocelot/felix.c
··· 588 588 struct ocelot *ocelot = ds->priv; 589 589 struct felix *felix = ocelot_to_felix(ocelot); 590 590 int port, err; 591 - int tc; 592 591 593 592 err = felix_init_structs(felix, ds->num_ports); 594 593 if (err) ··· 626 627 ocelot_write_rix(ocelot, 627 628 ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), 628 629 ANA_PGID_PGID, PGID_UC); 629 - /* Setup the per-traffic class flooding PGIDs */ 630 - for (tc = 0; tc < FELIX_NUM_TC; tc++) 631 - ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | 632 - ANA_FLOODING_FLD_BROADCAST(PGID_MC) | 633 - ANA_FLOODING_FLD_UNICAST(PGID_UC), 634 - ANA_FLOODING, tc); 635 630 636 631 ds->mtu_enforcement_ingress = true; 637 632 ds->configure_vlan_while_not_filtering = true;
+1
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1429 1429 pci_set_drvdata(pdev, felix); 1430 1430 ocelot = &felix->ocelot; 1431 1431 ocelot->dev = &pdev->dev; 1432 + ocelot->num_flooding_pgids = FELIX_NUM_TC; 1432 1433 felix->info = &felix_info_vsc9959; 1433 1434 felix->switch_base = pci_resource_start(pdev, 1434 1435 felix->info->switch_pci_bar);
+1
drivers/net/dsa/ocelot/seville_vsc9953.c
··· 1210 1210 1211 1211 ocelot = &felix->ocelot; 1212 1212 ocelot->dev = &pdev->dev; 1213 + ocelot->num_flooding_pgids = 1; 1213 1214 felix->info = &seville_info_vsc9953; 1214 1215 1215 1216 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+1
drivers/net/ethernet/agere/Kconfig
··· 21 21 tristate "Agere ET-1310 Gigabit Ethernet support" 22 22 depends on PCI 23 23 select PHYLIB 24 + select CRC32 24 25 help 25 26 This driver supports Agere ET-1310 ethernet adapters. 26 27
+1
drivers/net/ethernet/cadence/Kconfig
··· 23 23 tristate "Cadence MACB/GEM support" 24 24 depends on HAS_DMA && COMMON_CLK 25 25 select PHYLINK 26 + select CRC32 26 27 help 27 28 The Cadence MACB ethernet interface is found on many Atmel AT32 and 28 29 AT91 parts. This driver also supports the Cadence GEM (Gigabit
+1 -5
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
··· 987 987 struct fw_eth_tx_pkt_wr *wr; 988 988 struct cpl_tx_pkt_core *cpl; 989 989 u32 ctrl, iplen, maclen; 990 - #if IS_ENABLED(CONFIG_IPV6) 991 990 struct ipv6hdr *ip6; 992 - #endif 993 991 unsigned int ndesc; 994 992 struct tcphdr *tcp; 995 993 int len16, pktlen; ··· 1041 1043 cpl->len = htons(pktlen); 1042 1044 1043 1045 memcpy(buf, skb->data, pktlen); 1044 - if (tx_info->ip_family == AF_INET) { 1046 + if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) { 1045 1047 /* we need to correct ip header len */ 1046 1048 ip = (struct iphdr *)(buf + maclen); 1047 1049 ip->tot_len = htons(pktlen - maclen); 1048 1050 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP); 1049 - #if IS_ENABLED(CONFIG_IPV6) 1050 1051 } else { 1051 1052 ip6 = (struct ipv6hdr *)(buf + maclen); 1052 1053 ip6->payload_len = htons(pktlen - maclen - iplen); 1053 1054 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6); 1054 - #endif 1055 1055 } 1056 1056 1057 1057 cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+1
drivers/net/ethernet/faraday/Kconfig
··· 33 33 depends on !64BIT || BROKEN 34 34 select PHYLIB 35 35 select MDIO_ASPEED if MACH_ASPEED_G6 36 + select CRC32 36 37 help 37 38 This driver supports the FTGMAC100 Gigabit Ethernet controller 38 39 from Faraday. It is used on Faraday A369, Andes AG102 and some
+1
drivers/net/ethernet/freescale/Kconfig
··· 25 25 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ 26 26 ARCH_MXC || SOC_IMX28 || COMPILE_TEST) 27 27 default ARCH_MXC || SOC_IMX28 if ARM 28 + select CRC32 28 29 select PHYLIB 29 30 imply PTP_1588_CLOCK 30 31 help
+1
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
··· 269 269 270 270 if (!of_device_is_available(node)) { 271 271 netdev_err(mac->net_dev, "pcs-handle node not available\n"); 272 + of_node_put(node); 272 273 return -ENODEV; 273 274 } 274 275
+7 -3
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
··· 143 143 { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, 144 144 { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, 145 145 { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, 146 - { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, 147 - { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, 146 + { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" }, 147 + { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" }, 148 148 { ENETC_PM0_ROVR, "MAC rx oversized packets" }, 149 149 { ENETC_PM0_RJBR, "MAC rx jabber packets" }, 150 150 { ENETC_PM0_RFRG, "MAC rx fragment packets" }, ··· 163 163 { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, 164 164 { ENETC_PM0_TPKT, "MAC tx packets" }, 165 165 { ENETC_PM0_TUND, "MAC tx undersized packets" }, 166 + { ENETC_PM0_T64, "MAC tx 64 byte packets" }, 166 167 { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, 168 + { ENETC_PM0_T255, "MAC tx 128-255 byte packets" }, 169 + { ENETC_PM0_T511, "MAC tx 256-511 byte packets" }, 167 170 { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, 168 - { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, 171 + { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" }, 172 + { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" }, 169 173 { ENETC_PM0_TCNP, "MAC tx control packets" }, 170 174 { ENETC_PM0_TDFR, "MAC tx deferred packets" }, 171 175 { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
+7 -3
drivers/net/ethernet/freescale/enetc/enetc_hw.h
··· 267 267 #define ENETC_PM0_R255 0x8180 268 268 #define ENETC_PM0_R511 0x8188 269 269 #define ENETC_PM0_R1023 0x8190 270 - #define ENETC_PM0_R1518 0x8198 271 - #define ENETC_PM0_R1519X 0x81A0 270 + #define ENETC_PM0_R1522 0x8198 271 + #define ENETC_PM0_R1523X 0x81A0 272 272 #define ENETC_PM0_ROVR 0x81A8 273 273 #define ENETC_PM0_RJBR 0x81B0 274 274 #define ENETC_PM0_RFRG 0x81B8 ··· 287 287 #define ENETC_PM0_TBCA 0x8250 288 288 #define ENETC_PM0_TPKT 0x8260 289 289 #define ENETC_PM0_TUND 0x8268 290 + #define ENETC_PM0_T64 0x8270 290 291 #define ENETC_PM0_T127 0x8278 292 + #define ENETC_PM0_T255 0x8280 293 + #define ENETC_PM0_T511 0x8288 291 294 #define ENETC_PM0_T1023 0x8290 292 - #define ENETC_PM0_T1518 0x8298 295 + #define ENETC_PM0_T1522 0x8298 296 + #define ENETC_PM0_T1523X 0x82A0 293 297 #define ENETC_PM0_TCNP 0x82C0 294 298 #define ENETC_PM0_TDFR 0x82D0 295 299 #define ENETC_PM0_TMCOL 0x82D8
+1
drivers/net/ethernet/freescale/fman/Kconfig
··· 4 4 depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST 5 5 select GENERIC_ALLOCATOR 6 6 select PHYLIB 7 + select CRC32 7 8 default n 8 9 help 9 10 Freescale Data-Path Acceleration Architecture Frame Manager
-4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
··· 35 35 36 36 #define HCLGE_DBG_DFX_SSU_2_OFFSET 12 37 37 38 - #pragma pack(1) 39 - 40 38 struct hclge_qos_pri_map_cmd { 41 39 u8 pri0_tc : 4, 42 40 pri1_tc : 4; ··· 82 84 const struct hclge_dbg_dfx_message *dfx_msg; 83 85 struct hclge_dbg_reg_common_msg reg_msg; 84 86 }; 85 - 86 - #pragma pack() 87 87 88 88 static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { 89 89 {false, "Reserved"},
+4 -4
drivers/net/ethernet/intel/e1000e/netdev.c
··· 6475 6475 6476 6476 /* Ungate PGCB clock */ 6477 6477 mac_data = er32(FEXTNVM9); 6478 - mac_data |= BIT(28); 6478 + mac_data &= ~BIT(28); 6479 6479 ew32(FEXTNVM9, mac_data); 6480 6480 6481 6481 /* Enable K1 off to enable mPHY Power Gating */ 6482 6482 mac_data = er32(FEXTNVM6); 6483 6483 mac_data |= BIT(31); 6484 - ew32(FEXTNVM12, mac_data); 6484 + ew32(FEXTNVM6, mac_data); 6485 6485 6486 6486 /* Enable mPHY power gating for any link and speed */ 6487 6487 mac_data = er32(FEXTNVM8); ··· 6525 6525 /* Disable K1 off */ 6526 6526 mac_data = er32(FEXTNVM6); 6527 6527 mac_data &= ~BIT(31); 6528 - ew32(FEXTNVM12, mac_data); 6528 + ew32(FEXTNVM6, mac_data); 6529 6529 6530 6530 /* Disable Ungate PGCB clock */ 6531 6531 mac_data = er32(FEXTNVM9); 6532 - mac_data &= ~BIT(28); 6532 + mac_data |= BIT(28); 6533 6533 ew32(FEXTNVM9, mac_data); 6534 6534 6535 6535 /* Cancel not waking from dynamic
+20 -7
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1861 1861 * the adapter for another receive 1862 1862 * 1863 1863 * @rx_buffer: buffer containing the page 1864 + * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call 1864 1865 * 1865 1866 * If page is reusable, rx_buffer->page_offset is adjusted to point to 1866 1867 * an unused region in the page. ··· 1884 1883 * 1885 1884 * In either case, if the page is reusable its refcount is increased. 1886 1885 **/ 1887 - static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) 1886 + static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, 1887 + int rx_buffer_pgcnt) 1888 1888 { 1889 1889 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1890 1890 struct page *page = rx_buffer->page; ··· 1896 1894 1897 1895 #if (PAGE_SIZE < 8192) 1898 1896 /* if we are only owner of page we can reuse it */ 1899 - if (unlikely((page_count(page) - pagecnt_bias) > 1)) 1897 + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1900 1898 return false; 1901 1899 #else 1902 1900 #define I40E_LAST_OFFSET \ ··· 1955 1953 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use 1956 1954 * @rx_ring: rx descriptor ring to transact packets on 1957 1955 * @size: size of buffer to add to skb 1956 + * @rx_buffer_pgcnt: buffer page refcount 1958 1957 * 1959 1958 * This function will pull an Rx buffer from the ring and synchronize it 1960 1959 * for use by the CPU. 1961 1960 */ 1962 1961 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, 1963 - const unsigned int size) 1962 + const unsigned int size, 1963 + int *rx_buffer_pgcnt) 1964 1964 { 1965 1965 struct i40e_rx_buffer *rx_buffer; 1966 1966 1967 1967 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 1968 + *rx_buffer_pgcnt = 1969 + #if (PAGE_SIZE < 8192) 1970 + page_count(rx_buffer->page); 1971 + #else 1972 + 0; 1973 + #endif 1968 1974 prefetch_page_address(rx_buffer->page); 1969 1975 1970 1976 /* we are reusing so sync this buffer for CPU use */ ··· 2123 2113 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free 2124 2114 * @rx_ring: rx descriptor ring to transact packets on 2125 2115 * @rx_buffer: rx buffer to pull data from 2116 + * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call 2126 2117 * 2127 2118 * This function will clean up the contents of the rx_buffer. It will 2128 2119 * either recycle the buffer or unmap it and free the associated resources. 2129 2120 */ 2130 2121 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, 2131 - struct i40e_rx_buffer *rx_buffer) 2122 + struct i40e_rx_buffer *rx_buffer, 2123 + int rx_buffer_pgcnt) 2132 2124 { 2133 - if (i40e_can_reuse_rx_page(rx_buffer)) { 2125 + if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2134 2126 /* hand second half of page back to the ring */ 2135 2127 i40e_reuse_rx_page(rx_ring, rx_buffer); 2136 2128 } else { ··· 2359 2347 while (likely(total_rx_packets < (unsigned int)budget)) { 2360 2348 struct i40e_rx_buffer *rx_buffer; 2361 2349 union i40e_rx_desc *rx_desc; 2350 + int rx_buffer_pgcnt; 2362 2351 unsigned int size; 2363 2352 u64 qword; 2364 2353 ··· 2402 2389 break; 2403 2390 2404 2391 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); 2405 - rx_buffer = i40e_get_rx_buffer(rx_ring, size); 2392 + rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2406 2393 2407 2394 /* retrieve a buffer from the ring */ 2408 2395 if (!skb) { ··· 2445 2432 break; 2446 2433 } 2447 2434 2448 - i40e_put_rx_buffer(rx_ring, rx_buffer); 2435 + i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2449 2436 cleaned_count++; 2450 2437 2451 2438 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+22 -9
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 762 762 /** 763 763 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 764 764 * @rx_buf: buffer containing the page 765 + * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 765 766 * 766 767 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 767 768 * which will assign the current buffer to the buffer that next_to_alloc is 768 769 * pointing to; otherwise, the DMA mapping needs to be destroyed and 769 770 * page freed 770 771 */ 771 - static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 772 + static bool 773 + ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 772 774 { 773 775 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 774 776 struct page *page = rx_buf->page; ··· 781 779 782 780 #if (PAGE_SIZE < 8192) 783 781 /* if we are only owner of page we can reuse it */ 784 - if (unlikely((page_count(page) - pagecnt_bias) > 1)) 782 + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 785 783 return false; 786 784 #else 787 785 #define ICE_LAST_OFFSET \ ··· 866 864 * @rx_ring: Rx descriptor ring to transact packets on 867 865 * @skb: skb to be used 868 866 * @size: size of buffer to add to skb 867 + * @rx_buf_pgcnt: rx_buf page refcount 869 868 * 870 869 * This function will pull an Rx buffer from the ring and synchronize it 871 870 * for use by the CPU. 872 871 */ 873 872 static struct ice_rx_buf * 874 873 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 875 - const unsigned int size) 874 + const unsigned int size, int *rx_buf_pgcnt) 876 875 { 877 876 struct ice_rx_buf *rx_buf; 878 877 879 878 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 879 + *rx_buf_pgcnt = 880 + #if (PAGE_SIZE < 8192) 881 + page_count(rx_buf->page); 882 + #else 883 + 0; 884 + #endif 880 885 prefetchw(rx_buf->page); 881 886 *skb = rx_buf->skb; 882 887 ··· 1015 1006 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1016 1007 * @rx_ring: Rx descriptor ring to transact packets on 1017 1008 * @rx_buf: Rx buffer to pull data from 1009 + * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 1018 1010 * 1019 1011 * This function will update next_to_clean and then clean up the contents 1020 1012 * of the rx_buf. It will either recycle the buffer or unmap it and free 1021 1013 * the associated resources. 1022 1014 */ 1023 - static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 1015 + static void 1016 + ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 1017 + int rx_buf_pgcnt) 1024 1018 { 1025 1019 u16 ntc = rx_ring->next_to_clean + 1; 1026 1020 ··· 1034 1022 if (!rx_buf) 1035 1023 return; 1036 1024 1037 - if (ice_can_reuse_rx_page(rx_buf)) { 1025 + if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1038 1026 /* hand second half of page back to the ring */ 1039 1027 ice_reuse_rx_page(rx_ring, rx_buf); 1040 1028 } else { ··· 1109 1097 struct sk_buff *skb; 1110 1098 unsigned int size; 1111 1099 u16 stat_err_bits; 1100 + int rx_buf_pgcnt; 1112 1101 u16 vlan_tag = 0; 1113 1102 u8 rx_ptype; 1114 1103 ··· 1132 1119 dma_rmb(); 1133 1120 1134 1121 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1135 - ice_put_rx_buf(rx_ring, NULL); 1122 + ice_put_rx_buf(rx_ring, NULL, 0); 1136 1123 cleaned_count++; 1137 1124 continue; 1138 1125 } ··· 1141 1128 ICE_RX_FLX_DESC_PKT_LEN_M; 1142 1129 1143 1130 /* retrieve a buffer from the ring */ 1144 - rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1131 + rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt); 1145 1132 1146 1133 if (!size) { 1147 1134 xdp.data = NULL; ··· 1181 1168 total_rx_pkts++; 1182 1169 1183 1170 cleaned_count++; 1184 - ice_put_rx_buf(rx_ring, rx_buf); 1171 + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1185 1172 continue; 1186 1173 construct_skb: 1187 1174 if (skb) { ··· 1200 1187 break; 1201 1188 } 1202 1189 1203 - ice_put_rx_buf(rx_ring, rx_buf); 1190 + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1204 1191 cleaned_count++; 1205 1192 1206 1193 /* skip if it is NOP desc */
+5
drivers/net/ethernet/intel/igb/igb.h
··· 138 138 /* this is the size past which hardware will drop packets when setting LPE=0 */ 139 139 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 140 140 141 + #define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 142 + 141 143 /* Supported Rx Buffer Sizes */ 142 144 #define IGB_RXBUFFER_256 256 143 145 #define IGB_RXBUFFER_1536 1536 ··· 249 247 #define IGB_SFF_ADDRESSING_MODE 0x4 250 248 #define IGB_SFF_8472_UNSUP 0x00 251 249 250 + /* TX resources are shared between XDP and netstack 251 + * and we need to tag the buffer type to distinguish them 252 + */ 252 253 enum igb_tx_buf_type { 253 254 IGB_TYPE_SKB = 0, 254 255 IGB_TYPE_XDP,
+26 -11
drivers/net/ethernet/intel/igb/igb_main.c
··· 2824 2824 } 2825 2825 } 2826 2826 2827 - static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog) 2827 + static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) 2828 2828 { 2829 - int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2829 + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; 2830 2830 struct igb_adapter *adapter = netdev_priv(dev); 2831 + struct bpf_prog *prog = bpf->prog, *old_prog; 2831 2832 bool running = netif_running(dev); 2832 - struct bpf_prog *old_prog; 2833 2833 bool need_reset; 2834 2834 2835 2835 /* verify igb ring attributes are sufficient for XDP */ 2836 2836 for (i = 0; i < adapter->num_rx_queues; i++) { 2837 2837 struct igb_ring *ring = adapter->rx_ring[i]; 2838 2838 2839 - if (frame_size > igb_rx_bufsz(ring)) 2839 + if (frame_size > igb_rx_bufsz(ring)) { 2840 + NL_SET_ERR_MSG_MOD(bpf->extack, 2841 + "The RX buffer size is too small for the frame size"); 2842 + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", 2843 + igb_rx_bufsz(ring), frame_size); 2840 2844 return -EINVAL; 2845 + } 2841 2846 } 2842 2847 2843 2848 old_prog = xchg(&adapter->xdp_prog, prog); ··· 2874 2869 { 2875 2870 switch (xdp->command) { 2876 2871 case XDP_SETUP_PROG: 2877 - return igb_xdp_setup(dev, xdp->prog); 2872 + return igb_xdp_setup(dev, xdp); 2878 2873 default: 2879 2874 return -EINVAL; 2880 2875 } ··· 2915 2910 */ 2916 2911 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; 2917 2912 if (unlikely(!tx_ring)) 2918 - return -ENXIO; 2913 + return IGB_XDP_CONSUMED; 2919 2914 2920 2915 nq = txring_txq(tx_ring); 2921 2916 __netif_tx_lock(nq, cpu); 2917 + /* Avoid transmit queue timeout since we share it with the slow path */ 2918 + nq->trans_start = jiffies; 2922 2919 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); 2923 2920 __netif_tx_unlock(nq); 2924 2921 ··· 2952 2945 2953 2946 nq = txring_txq(tx_ring); 2954 2947 __netif_tx_lock(nq, cpu); 2948 + 2949 + /* Avoid transmit queue timeout since we share it with the slow path */ 2950 + nq->trans_start = jiffies; 2955 2951 2956 2952 for (i = 0; i < n; i++) { 2957 2953 struct xdp_frame *xdpf = frames[i]; ··· 3960 3950 /* set default work limits */ 3961 3951 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; 3962 3952 3963 - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 3964 - VLAN_HLEN; 3953 + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; 3965 3954 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3966 3955 3967 3956 spin_lock_init(&adapter->nfc_lock); ··· 6500 6491 static int igb_change_mtu(struct net_device *netdev, int new_mtu) 6501 6492 { 6502 6493 struct igb_adapter *adapter = netdev_priv(netdev); 6503 - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 6494 + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; 6504 6495 6505 6496 if (adapter->xdp_prog) { 6506 6497 int i; ··· 6509 6500 struct igb_ring *ring = adapter->rx_ring[i]; 6510 6501 6511 6502 if (max_frame > igb_rx_bufsz(ring)) { 6512 - netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n"); 6503 + netdev_warn(adapter->netdev, 6504 + "Requested MTU size is not supported with XDP. Max frame size is %d\n", 6505 + max_frame); 6513 6506 return -EINVAL; 6514 6507 } 6515 6508 } ··· 8362 8351 SKB_DATA_ALIGN(xdp->data_end - 8363 8352 xdp->data_hard_start); 8364 8353 #endif 8354 + unsigned int metasize = xdp->data - xdp->data_meta; 8365 8355 struct sk_buff *skb; 8366 8356 8367 8357 /* prefetch first cache line of first page */ ··· 8376 8364 /* update pointers within the skb to store the data */ 8377 8365 skb_reserve(skb, xdp->data - xdp->data_hard_start); 8378 8366 __skb_put(skb, xdp->data_end - xdp->data); 8367 + 8368 + if (metasize) 8369 + skb_metadata_set(skb, metasize); 8379 8370 8380 8371 /* pull timestamp out of packet data */ 8381 8372 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { ··· 8786 8771 rx_ring->skb = skb; 8787 8772 8788 8773 if (xdp_xmit & IGB_XDP_REDIR) 8789 - xdp_do_flush_map(); 8774 + xdp_do_flush(); 8790 8775 8791 8776 if (xdp_xmit & IGB_XDP_TX) { 8792 8777 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+17 -7
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 1945 1945 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1946 1946 } 1947 1947 1948 - static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) 1948 + static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, 1949 + int rx_buffer_pgcnt) 1949 1950 { 1950 1951 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1951 1952 struct page *page = rx_buffer->page; ··· 1957 1956 1958 1957 #if (PAGE_SIZE < 8192) 1959 1958 /* if we are only owner of page we can reuse it */ 1960 - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 1959 + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1961 1960 return false; 1962 1961 #else 1963 1962 /* The last offset is a bit aggressive in that we assume the ··· 2022 2021 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, 2023 2022 union ixgbe_adv_rx_desc *rx_desc, 2024 2023 struct sk_buff **skb, 2025 - const unsigned int size) 2024 + const unsigned int size, 2025 + int *rx_buffer_pgcnt) 2026 2026 { 2027 2027 struct ixgbe_rx_buffer *rx_buffer; 2028 2028 2029 2029 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 2030 + *rx_buffer_pgcnt = 2031 + #if (PAGE_SIZE < 8192) 2032 + page_count(rx_buffer->page); 2033 + #else 2034 + 0; 2035 + #endif 2030 2036 prefetchw(rx_buffer->page); 2031 2037 *skb = rx_buffer->skb; 2032 2038 ··· 2063 2055 2064 2056 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, 2065 2057 struct ixgbe_rx_buffer *rx_buffer, 2066 - struct sk_buff *skb) 2058 + struct sk_buff *skb, 2059 + int rx_buffer_pgcnt) 2067 2060 { 2068 - if (ixgbe_can_reuse_rx_page(rx_buffer)) { 2061 + if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2069 2062 /* hand second half of page back to the ring */ 2070 2063 ixgbe_reuse_rx_page(rx_ring, rx_buffer); 2071 2064 } else { ··· 2312 2303 union ixgbe_adv_rx_desc *rx_desc; 2313 2304 struct ixgbe_rx_buffer *rx_buffer; 2314 2305 struct sk_buff *skb; 2306 + int rx_buffer_pgcnt; 2315 2307 unsigned int size; 2316 2308 2317 2309 /* return some buffers to hardware, one at a time is too slow */ ··· 2332 2322 */ 2333 2323 dma_rmb(); 2334 2324 2335 - rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); 2325 + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); 2336 2326 2337 2327 /* retrieve a buffer from the ring */ 2338 2328 if (!skb) { ··· 2377 2367 break; 2378 2368 } 2379 2369 2380 - ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); 2370 + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); 2381 2371 cleaned_count++; 2382 2372 2383 2373 /* place incomplete frames back on ring for completion */
+3 -1
drivers/net/ethernet/marvell/prestera/prestera_main.c
··· 313 313 goto err_port_init; 314 314 } 315 315 316 - if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) 316 + if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) { 317 + err = -EINVAL; 317 318 goto err_port_init; 319 + } 318 320 319 321 /* firmware requires that port's MAC address consist of the first 320 322 * 5 bytes of the base MAC address
+14 -7
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 1378 1378 tx_ring->cons, tx_ring->prod); 1379 1379 1380 1380 priv->port_stats.tx_timeout++; 1381 - en_dbg(DRV, priv, "Scheduling watchdog\n"); 1382 - queue_work(mdev->workqueue, &priv->watchdog_task); 1381 + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { 1382 + en_dbg(DRV, priv, "Scheduling port restart\n"); 1383 + queue_work(mdev->workqueue, &priv->restart_task); 1384 + } 1383 1385 } 1384 1386 1385 1387 ··· 1735 1733 mlx4_en_deactivate_cq(priv, cq); 1736 1734 goto tx_err; 1737 1735 } 1736 + clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state); 1738 1737 if (t != TX_XDP) { 1739 1738 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1740 1739 tx_ring->recycle_ring = NULL; ··· 1832 1829 local_bh_enable(); 1833 1830 } 1834 1831 1832 + clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); 1835 1833 netif_tx_start_all_queues(dev); 1836 1834 netif_device_attach(dev); 1837 1835 ··· 2003 1999 static void mlx4_en_restart(struct work_struct *work) 2004 2000 { 2005 2001 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2006 - watchdog_task); 2002 + restart_task); 2007 2003 struct mlx4_en_dev *mdev = priv->mdev; 2008 2004 struct net_device *dev = priv->dev; 2009 2005 ··· 2380 2376 if (netif_running(dev)) { 2381 2377 mutex_lock(&mdev->state_lock); 2382 2378 if (!mdev->device_up) { 2383 - /* NIC is probably restarting - let watchdog task reset 2379 + /* NIC is probably restarting - let restart task reset 2384 2380 * the port */ 2385 2381 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2386 2382 } else { ··· 2389 2385 if (err) { 2390 2386 en_err(priv, "Failed restarting port:%d\n", 2391 2387 priv->port); 2392 - queue_work(mdev->workqueue, &priv->watchdog_task); 2388 + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, 2389 + &priv->state)) 2390 + queue_work(mdev->workqueue, &priv->restart_task); 2393 2391 } 2394 2392 } 2395 2393 mutex_unlock(&mdev->state_lock); ··· 2797 2791 if (err) { 2798 2792 en_err(priv, "Failed starting port %d for XDP change\n", 2799 2793 priv->port); 2800 - queue_work(mdev->workqueue, &priv->watchdog_task); 2794 + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) 2795 + queue_work(mdev->workqueue, &priv->restart_task); 2801 2796 } 2802 2797 } 2803 2798 ··· 3171 3164 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3172 3165 spin_lock_init(&priv->stats_lock); 3173 3166 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3174 - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3167 + INIT_WORK(&priv->restart_task, mlx4_en_restart); 3175 3168 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3176 3169 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3177 3170 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+33 -7
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 392 392 return cnt; 393 393 } 394 394 395 + static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe, 396 + u16 cqe_index, struct mlx4_en_tx_ring *ring) 397 + { 398 + struct mlx4_en_dev *mdev = priv->mdev; 399 + struct mlx4_en_tx_info *tx_info; 400 + struct mlx4_en_tx_desc *tx_desc; 401 + u16 wqe_index; 402 + int desc_size; 403 + 404 + en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n", 405 + ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); 406 + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), 407 + false); 408 + 409 + wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; 410 + tx_info = &ring->tx_info[wqe_index]; 411 + desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE; 412 + en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, 413 + wqe_index, desc_size); 414 + tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); 415 + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); 416 + 417 + if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) 418 + return; 419 + 420 + en_err(priv, "Scheduling port restart\n"); 421 + queue_work(mdev->workqueue, &priv->restart_task); 422 + } 423 + 395 424 int mlx4_en_process_tx_cq(struct net_device *dev, 396 425 struct mlx4_en_cq *cq, int napi_budget) 397 426 { ··· 467 438 dma_rmb(); 468 439 469 440 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 470 - MLX4_CQE_OPCODE_ERROR)) { 471 - struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; 472 - 473 - en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", 474 - cqe_err->vendor_err_syndrome, 475 - cqe_err->syndrome); 476 - } 441 + MLX4_CQE_OPCODE_ERROR)) 442 + if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) 443 + mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index, 444 + ring); 477 445 478 446 /* Skip over last polled CQE */ 479 447 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+11 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 251 251 } buf[MLX4_EN_CACHE_SIZE]; 252 252 }; 253 253 254 + enum { 255 + MLX4_EN_TX_RING_STATE_RECOVERING, 256 + }; 257 + 254 258 struct mlx4_en_priv; 255 259 256 260 struct mlx4_en_tx_ring { ··· 301 297 * Only queue_stopped might be used if BQL is not properly working. 302 298 */ 303 299 unsigned long queue_stopped; 300 + unsigned long state; 304 301 struct mlx4_hwq_resources sp_wqres; 305 302 struct mlx4_qp sp_qp; 306 303 struct mlx4_qp_context sp_context; ··· 515 510 struct mutex mutex; /* for mutual access to stats bitmap */ 516 511 }; 517 512 513 + enum { 514 + MLX4_EN_STATE_FLAG_RESTARTING, 515 + }; 516 + 518 517 struct mlx4_en_priv { 519 518 struct mlx4_en_dev *mdev; 520 519 struct mlx4_en_port_profile *prof; ··· 584 575 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; 585 576 struct mlx4_qp drop_qp; 586 577 struct work_struct rx_mode_task; 587 - struct work_struct watchdog_task; 578 + struct work_struct restart_task; 588 579 struct work_struct linkstate_task; 589 580 struct delayed_work stats_task; 590 581 struct delayed_work service_task; ··· 629 620 u32 pflags; 630 621 u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; 631 622 u8 rss_hash_fn; 623 + unsigned long state; 632 624 }; 633 625 634 626 enum mlx4_en_wol {
+1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 199 199 config MLX5_SW_STEERING 200 200 bool "Mellanox Technologies software-managed steering" 201 201 depends on MLX5_CORE_EN && MLX5_ESWITCH 202 + select CRC32 202 203 default y 203 204 help 204 205 Build support for software-managed steering in the NIC.
+1
drivers/net/ethernet/microchip/Kconfig
··· 47 47 depends on PCI 48 48 select PHYLIB 49 49 select CRC16 50 + select CRC32 50 51 help 51 52 Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip 52 53
+5 -4
drivers/net/ethernet/mscc/ocelot.c
··· 1551 1551 SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); 1552 1552 1553 1553 /* Setup flooding PGIDs */ 1554 - ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | 1555 - ANA_FLOODING_FLD_BROADCAST(PGID_MC) | 1556 - ANA_FLOODING_FLD_UNICAST(PGID_UC), 1557 - ANA_FLOODING, 0); 1554 + for (i = 0; i < ocelot->num_flooding_pgids; i++) 1555 + ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | 1556 + ANA_FLOODING_FLD_BROADCAST(PGID_MC) | 1557 + ANA_FLOODING_FLD_UNICAST(PGID_UC), 1558 + ANA_FLOODING, i); 1558 1559 ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | 1559 1560 ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | 1560 1561 ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
+1
drivers/net/ethernet/mscc/ocelot_vsc7514.c
··· 1254 1254 } 1255 1255 1256 1256 ocelot->num_phys_ports = of_get_child_count(ports); 1257 + ocelot->num_flooding_pgids = 1; 1257 1258 1258 1259 ocelot->vcap = vsc7514_vcap_props; 1259 1260 ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
+1
drivers/net/ethernet/netronome/Kconfig
··· 22 22 depends on VXLAN || VXLAN=n 23 23 depends on TLS && TLS_DEVICE || TLS_DEVICE=n 24 24 select NET_DEVLINK 25 + select CRC32 25 26 help 26 27 This driver supports the Netronome(R) NFP4000/NFP6000 based 27 28 cards working as a advanced Ethernet NIC. It works with both
-6
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 3562 3562 struct nfp_net_dp *dp; 3563 3563 int err; 3564 3564 3565 - if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) 3566 - return -EBUSY; 3567 - 3568 3565 if (!prog == !nn->dp.xdp_prog) { 3569 3566 WRITE_ONCE(nn->dp.xdp_prog, prog); 3570 3567 xdp_attachment_setup(&nn->xdp, bpf); ··· 3589 3592 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) 3590 3593 { 3591 3594 int err; 3592 - 3593 - if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) 3594 - return -EBUSY; 3595 3595 3596 3596 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); 3597 3597 if (err)
+1
drivers/net/ethernet/nxp/Kconfig
··· 3 3 tristate "NXP ethernet MAC on LPC devices" 4 4 depends on ARCH_LPC32XX || COMPILE_TEST 5 5 select PHYLIB 6 + select CRC32 6 7 help 7 8 Say Y or M here if you want to use the NXP ethernet MAC included on 8 9 some NXP LPC devices. You can safely enable this option for LPC32xx
+1
drivers/net/ethernet/rocker/Kconfig
··· 19 19 config ROCKER 20 20 tristate "Rocker switch driver (EXPERIMENTAL)" 21 21 depends on PCI && NET_SWITCHDEV && BRIDGE 22 + select CRC32 22 23 help 23 24 This driver supports Rocker switch device. 24 25
+1 -8
drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
··· 246 246 goto err_parse_dt; 247 247 } 248 248 249 - ret = dma_set_mask_and_coherent(&pdev->dev, 250 - DMA_BIT_MASK(dwmac->ops->addr_width)); 251 - if (ret) { 252 - dev_err(&pdev->dev, "DMA mask set failed\n"); 253 - goto err_dma_mask; 254 - } 255 - 249 + plat_dat->addr64 = dwmac->ops->addr_width; 256 250 plat_dat->init = imx_dwmac_init; 257 251 plat_dat->exit = imx_dwmac_exit; 258 252 plat_dat->fix_mac_speed = imx_dwmac_fix_speed; ··· 266 272 err_dwmac_init: 267 273 err_drv_probe: 268 274 imx_dwmac_exit(pdev, plat_dat->bsp_priv); 269 - err_dma_mask: 270 275 err_parse_dt: 271 276 err_match_data: 272 277 stmmac_remove_config_dt(pdev, plat_dat);
+3 -3
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
··· 30 30 #define PRG_ETH0_EXT_RMII_MODE 4 31 31 32 32 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ 33 - #define PRG_ETH0_CLK_M250_SEL_SHIFT 4 34 33 #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) 35 34 36 35 /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one ··· 154 155 return -ENOMEM; 155 156 156 157 clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; 157 - clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; 158 - clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; 158 + clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK); 159 + clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >> 160 + clk_configs->m250_mux.shift; 159 161 clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents, 160 162 ARRAY_SIZE(mux_parents), &clk_mux_ops, 161 163 &clk_configs->m250_mux.hw);
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
··· 22 22 23 23 return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, 24 24 !(value & DMA_BUS_MODE_SFT_RESET), 25 - 10000, 100000); 25 + 10000, 1000000); 26 26 } 27 27 28 28 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+40 -11
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1558 1558 } 1559 1559 1560 1560 /** 1561 + * stmmac_free_tx_skbufs - free TX skb buffers 1562 + * @priv: private structure 1563 + */ 1564 + static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1565 + { 1566 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1567 + u32 queue; 1568 + 1569 + for (queue = 0; queue < tx_queue_cnt; queue++) 1570 + dma_free_tx_skbufs(priv, queue); 1571 + } 1572 + 1573 + /** 1561 1574 * free_dma_rx_desc_resources - free RX dma desc resources 1562 1575 * @priv: private structure 1563 1576 */ ··· 2938 2925 struct stmmac_priv *priv = netdev_priv(dev); 2939 2926 u32 chan; 2940 2927 2941 - if (priv->eee_enabled) 2942 - del_timer_sync(&priv->eee_ctrl_timer); 2943 - 2944 2928 if (device_may_wakeup(priv->device)) 2945 2929 phylink_speed_down(priv->phylink, false); 2946 2930 /* Stop and disconnect the PHY */ ··· 2955 2945 free_irq(priv->wol_irq, dev); 2956 2946 if (priv->lpi_irq > 0) 2957 2947 free_irq(priv->lpi_irq, dev); 2948 + 2949 + if (priv->eee_enabled) { 2950 + priv->tx_path_in_lpi_mode = false; 2951 + del_timer_sync(&priv->eee_ctrl_timer); 2952 + } 2958 2953 2959 2954 /* Stop TX/RX DMA and clear the descriptors */ 2960 2955 stmmac_stop_all_dma(priv); ··· 4975 4960 dev_info(priv->device, "SPH feature enabled\n"); 4976 4961 } 4977 4962 4963 + /* The current IP register MAC_HW_Feature1[ADDR64] only define 4964 + * 32/40/64 bit width, but some SOC support others like i.MX8MP 4965 + * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 4966 + * So overwrite dma_cap.addr64 according to HW real design. 4967 + */ 4968 + if (priv->plat->addr64) 4969 + priv->dma_cap.addr64 = priv->plat->addr64; 4970 + 4978 4971 if (priv->dma_cap.addr64) { 4979 4972 ret = dma_set_mask_and_coherent(device, 4980 4973 DMA_BIT_MASK(priv->dma_cap.addr64)); ··· 5195 5172 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 5196 5173 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 5197 5174 5175 + if (priv->eee_enabled) { 5176 + priv->tx_path_in_lpi_mode = false; 5177 + del_timer_sync(&priv->eee_ctrl_timer); 5178 + } 5179 + 5198 5180 /* Stop TX/RX DMA */ 5199 5181 stmmac_stop_all_dma(priv); 5200 5182 ··· 5305 5277 return ret; 5306 5278 } 5307 5279 5280 + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 5281 + rtnl_lock(); 5282 + phylink_start(priv->phylink); 5283 + /* We may have called phylink_speed_down before */ 5284 + phylink_speed_up(priv->phylink); 5285 + rtnl_unlock(); 5286 + } 5287 + 5308 5288 rtnl_lock(); 5309 5289 mutex_lock(&priv->lock); 5310 5290 5311 5291 stmmac_reset_queues_param(priv); 5312 5292 5293 + stmmac_free_tx_skbufs(priv); 5313 5294 stmmac_clear_descriptors(priv); 5314 5295 5315 5296 stmmac_hw_setup(ndev, false); ··· 5331 5294 5332 5295 mutex_unlock(&priv->lock); 5333 5296 rtnl_unlock(); 5334 - 5335 - if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 5336 - rtnl_lock(); 5337 - phylink_start(priv->phylink); 5338 - /* We may have called phylink_speed_down before */ 5339 - phylink_speed_up(priv->phylink); 5340 - rtnl_unlock(); 5341 - } 5342 5297 5343 5298 phylink_mac_change(priv->phylink, true); 5344 5299
-3
drivers/net/ethernet/ti/cpsw_priv.c
··· 1265 1265 if (!priv->xdpi.prog && !prog) 1266 1266 return 0; 1267 1267 1268 - if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) 1269 - return -EBUSY; 1270 - 1271 1268 WRITE_ONCE(priv->xdp_prog, prog); 1272 1269 1273 1270 xdp_attachment_setup(&priv->xdpi, bpf);
+3 -6
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 1351 1351 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; 1352 1352 struct temac_local *lp; 1353 1353 struct net_device *ndev; 1354 - struct resource *res; 1355 1354 const void *addr; 1356 1355 __be32 *p; 1357 1356 bool little_endian; ··· 1499 1500 of_node_put(dma_np); 1500 1501 } else if (pdata) { 1501 1502 /* 2nd memory resource specifies DMA registers */ 1502 - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1503 - lp->sdma_regs = devm_ioremap(&pdev->dev, res->start, 1504 - resource_size(res)); 1505 - if (!lp->sdma_regs) { 1503 + lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); 1504 + if (IS_ERR(lp->sdma_regs)) { 1506 1505 dev_err(&pdev->dev, 1507 1506 "could not map DMA registers\n"); 1508 - return -ENOMEM; 1507 + return PTR_ERR(lp->sdma_regs); 1509 1508 } 1510 1509 if (pdata->dma_little_endian) { 1511 1510 lp->dma_in = temac_dma_in32_le;
+4 -16
drivers/net/geneve.c
··· 258 258 skb_dst_set(skb, &tun_dst->dst); 259 259 260 260 /* Ignore packet loops (and multicast echo) */ 261 - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) 262 - goto rx_error; 263 - 264 - switch (skb_protocol(skb, true)) { 265 - case htons(ETH_P_IP): 266 - if (pskb_may_pull(skb, sizeof(struct iphdr))) 267 - goto rx_error; 268 - break; 269 - case htons(ETH_P_IPV6): 270 - if (pskb_may_pull(skb, sizeof(struct ipv6hdr))) 271 - goto rx_error; 272 - break; 273 - default: 274 - goto rx_error; 261 + if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { 262 + geneve->dev->stats.rx_errors++; 263 + goto drop; 275 264 } 265 + 276 266 oiph = skb_network_header(skb); 277 267 skb_reset_network_header(skb); 278 268 ··· 299 309 dev_sw_netstats_rx_add(geneve->dev, len); 300 310 301 311 return; 302 - rx_error: 303 - geneve->dev->stats.rx_errors++; 304 312 drop: 305 313 /* Consume bad packet */ 306 314 kfree_skb(skb);
+6 -1
drivers/net/ipa/gsi_trans.c
··· 156 156 /* The allocator will give us a power-of-2 number of pages. But we 157 157 * can't guarantee that, so request it. That way we won't waste any 158 158 * memory that would be available beyond the required space. 159 + * 160 + * Note that gsi_trans_pool_exit_dma() assumes the total allocated 161 + * size is exactly (count * size). 159 162 */ 160 163 total_size = get_order(total_size) << PAGE_SHIFT; 161 164 ··· 178 175 179 176 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool) 180 177 { 181 - dma_free_coherent(dev, pool->size, pool->base, pool->addr); 178 + size_t total_size = pool->count * pool->size; 179 + 180 + dma_free_coherent(dev, total_size, pool->base, pool->addr); 182 181 memset(pool, 0, sizeof(*pool)); 183 182 } 184 183
+10 -5
drivers/net/netdevsim/bpf.c
··· 63 63 nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) 64 64 { 65 65 struct nsim_bpf_bound_prog *state; 66 + int ret = 0; 66 67 67 68 state = env->prog->aux->offload->dev_priv; 68 69 if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx) 69 70 msleep(state->nsim_dev->bpf_bind_verifier_delay); 70 71 71 - if (insn_idx == env->prog->len - 1) 72 + if (insn_idx == env->prog->len - 1) { 72 73 pr_vlog(env, "Hello from netdevsim!\n"); 73 74 74 - return 0; 75 + if (!state->nsim_dev->bpf_bind_verifier_accept) 76 + ret = -EOPNOTSUPP; 77 + } 78 + 79 + return ret; 75 80 } 76 81 77 82 static int nsim_bpf_finalize(struct bpf_verifier_env *env) ··· 194 189 struct xdp_attachment_info *xdp) 195 190 { 196 191 int err; 197 - 198 - if (!xdp_attachment_flags_ok(xdp, bpf)) 199 - return -EBUSY; 200 192 201 193 if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { 202 194 NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); ··· 600 598 &nsim_dev->bpf_bind_accept); 601 599 debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir, 602 600 &nsim_dev->bpf_bind_verifier_delay); 601 + nsim_dev->bpf_bind_verifier_accept = true; 602 + debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir, 603 + &nsim_dev->bpf_bind_verifier_accept); 603 604 return 0; 604 605 } 605 606
+1
drivers/net/netdevsim/netdevsim.h
··· 197 197 struct dentry *take_snapshot; 198 198 struct bpf_offload_dev *bpf_dev; 199 199 bool bpf_bind_accept; 200 + bool bpf_bind_verifier_accept; 200 201 u32 bpf_bind_verifier_delay; 201 202 struct dentry *ddir_bpf_bound_progs; 202 203 u32 prog_id_gen;
+8 -2
drivers/net/vrf.c
··· 1371 1371 int orig_iif = skb->skb_iif; 1372 1372 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); 1373 1373 bool is_ndisc = ipv6_ndisc_frame(skb); 1374 + bool is_ll_src; 1374 1375 1375 1376 /* loopback, multicast & non-ND link-local traffic; do not push through 1376 - * packet taps again. Reset pkt_type for upper layers to process skb 1377 + * packet taps again. Reset pkt_type for upper layers to process skb. 1378 + * for packets with lladdr src, however, skip so that the dst can be 1379 + * determine at input using original ifindex in the case that daddr 1380 + * needs strict 1377 1381 */ 1378 - if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { 1382 + is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; 1383 + if (skb->pkt_type == PACKET_LOOPBACK || 1384 + (need_strict && !is_ndisc && !is_ll_src)) { 1379 1385 skb->dev = vrf_dev; 1380 1386 skb->skb_iif = vrf_dev->ifindex; 1381 1387 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+68 -6
drivers/pinctrl/aspeed/pinctrl-aspeed.c
··· 286 286 static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) 287 287 { 288 288 /* 289 - * The signal type is GPIO if the signal name has "GPI" as a prefix. 290 - * strncmp (rather than strcmp) is used to implement the prefix 291 - * requirement. 289 + * We need to differentiate between GPIO and non-GPIO signals to 290 + * implement the gpio_request_enable() interface. For better or worse 291 + * the ASPEED pinctrl driver uses the expression names to determine 292 + * whether an expression will mux a pin for GPIO. 292 293 * 293 - * expr->signal might look like "GPIOB1" in the GPIO case. 294 - * expr->signal might look like "GPIT0" in the GPI case. 294 + * Generally we have the following - A GPIO such as B1 has: 295 + * 296 + * - expr->signal set to "GPIOB1" 297 + * - expr->function set to "GPIOB1" 298 + * 299 + * Using this fact we can determine whether the provided expression is 300 + * a GPIO expression by testing the signal name for the string prefix 301 + * "GPIO". 302 + * 303 + * However, some GPIOs are input-only, and the ASPEED datasheets name 304 + * them differently. An input-only GPIO such as T0 has: 305 + * 306 + * - expr->signal set to "GPIT0" 307 + * - expr->function set to "GPIT0" 308 + * 309 + * It's tempting to generalise the prefix test from "GPIO" to "GPI" to 310 + * account for both GPIOs and GPIs, but in doing so we run aground on 311 + * another feature: 312 + * 313 + * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO 314 + * function where the input state of one pin is replicated as the 315 + * output state of another (as if they were shorted together - a mux 316 + * configuration that is typically enabled by hardware strapping). 317 + * This feature allows the BMC to pass e.g. power button state through 318 + * to the host while the BMC is yet to boot, but take control of the 319 + * button state once the BMC has booted by muxing each pin as a 320 + * separate, pin-specific GPIO. 321 + * 322 + * Conceptually this pass-through mode is a form of GPIO and is named 323 + * as such in the datasheets, e.g. "GPID0". This naming similarity 324 + * trips us up with the simple GPI-prefixed-signal-name scheme 325 + * discussed above, as the pass-through configuration is not what we 326 + * want when muxing a pin as GPIO for the GPIO subsystem. 327 + * 328 + * On e.g. the AST2400, a pass-through function "GPID0" is grouped on 329 + * balls A18 and D16, where we have: 330 + * 331 + * For ball A18: 332 + * - expr->signal set to "GPID0IN" 333 + * - expr->function set to "GPID0" 334 + * 335 + * For ball D16: 336 + * - expr->signal set to "GPID0OUT" 337 + * - expr->function set to "GPID0" 338 + * 339 + * By contrast, the pin-specific GPIO expressions for the same pins are 340 + * as follows: 341 + * 342 + * For ball A18: 343 + * - expr->signal looks like "GPIOD0" 344 + * - expr->function looks like "GPIOD0" 345 + * 346 + * For ball D16: 347 + * - expr->signal looks like "GPIOD1" 348 + * - expr->function looks like "GPIOD1" 349 + * 350 + * Testing both the signal _and_ function names gives us the means 351 + * differentiate the pass-through GPIO pinmux configuration from the 352 + * pin-specific configuration that the GPIO subsystem is after: An 353 + * expression is a pin-specific (non-pass-through) GPIO configuration 354 + * if the signal prefix is "GPI" and the signal name matches the 355 + * function name. 295 356 */ 296 - return strncmp(expr->signal, "GPI", 3) == 0; 357 + return !strncmp(expr->signal, "GPI", 3) && 358 + !strcmp(expr->signal, expr->function); 297 359 } 298 360 299 361 static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
+4 -3
drivers/pinctrl/aspeed/pinmux-aspeed.h
··· 452 452 * evaluation of the descriptors. 453 453 * 454 454 * @signal: The signal name for the priority level on the pin. If the signal 455 - * type is GPIO, then the signal name must begin with the string 456 - * "GPIO", e.g. GPIOA0, GPIOT4 etc. 455 + * type is GPIO, then the signal name must begin with the 456 + * prefix "GPI", e.g. GPIOA0, GPIT0 etc. 457 457 * @function: The name of the function the signal participates in for the 458 - * associated expression 458 + * associated expression. For pin-specific GPIO, the function 459 + * name must match the signal name. 459 460 * @ndescs: The number of signal descriptors in the expression 460 461 * @descs: Pointer to an array of signal descriptors that comprise the 461 462 * function expression
+7 -1
drivers/pinctrl/intel/pinctrl-baytrail.c
··· 1049 1049 break; 1050 1050 case PIN_CONFIG_INPUT_DEBOUNCE: 1051 1051 debounce = readl(db_reg); 1052 - debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1053 1052 1054 1053 if (arg) 1055 1054 conf |= BYT_DEBOUNCE_EN; ··· 1057 1058 1058 1059 switch (arg) { 1059 1060 case 375: 1061 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1060 1062 debounce |= BYT_DEBOUNCE_PULSE_375US; 1061 1063 break; 1062 1064 case 750: 1065 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1063 1066 debounce |= BYT_DEBOUNCE_PULSE_750US; 1064 1067 break; 1065 1068 case 1500: 1069 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1066 1070 debounce |= BYT_DEBOUNCE_PULSE_1500US; 1067 1071 break; 1068 1072 case 3000: 1073 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1069 1074 debounce |= BYT_DEBOUNCE_PULSE_3MS; 1070 1075 break; 1071 1076 case 6000: 1077 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1072 1078 debounce |= BYT_DEBOUNCE_PULSE_6MS; 1073 1079 break; 1074 1080 case 12000: 1081 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1075 1082 debounce |= BYT_DEBOUNCE_PULSE_12MS; 1076 1083 break; 1077 1084 case 24000: 1085 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1078 1086 debounce |= BYT_DEBOUNCE_PULSE_24MS; 1079 1087 break; 1080 1088 default:
+2 -2
drivers/pinctrl/intel/pinctrl-intel.c
··· 442 442 value |= PADCFG0_PMODE_GPIO; 443 443 444 444 /* Disable input and output buffers */ 445 - value &= ~PADCFG0_GPIORXDIS; 446 - value &= ~PADCFG0_GPIOTXDIS; 445 + value |= PADCFG0_GPIORXDIS; 446 + value |= PADCFG0_GPIOTXDIS; 447 447 448 448 /* Disable SCI/SMI/NMI generation */ 449 449 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+231 -221
drivers/pinctrl/intel/pinctrl-jasperlake.c
··· 16 16 17 17 #define JSL_PAD_OWN 0x020 18 18 #define JSL_PADCFGLOCK 0x080 19 - #define JSL_HOSTSW_OWN 0x0b0 19 + #define JSL_HOSTSW_OWN 0x0c0 20 20 #define JSL_GPI_IS 0x100 21 21 #define JSL_GPI_IE 0x120 22 22 ··· 65 65 PINCTRL_PIN(17, "EMMC_CLK"), 66 66 PINCTRL_PIN(18, "EMMC_RESETB"), 67 67 PINCTRL_PIN(19, "A4WP_PRESENT"), 68 + /* SPI */ 69 + PINCTRL_PIN(20, "SPI0_IO_2"), 70 + PINCTRL_PIN(21, "SPI0_IO_3"), 71 + PINCTRL_PIN(22, "SPI0_MOSI_IO_0"), 72 + PINCTRL_PIN(23, "SPI0_MISO_IO_1"), 73 + PINCTRL_PIN(24, "SPI0_TPM_CSB"), 74 + PINCTRL_PIN(25, "SPI0_FLASH_0_CSB"), 75 + PINCTRL_PIN(26, "SPI0_FLASH_1_CSB"), 76 + PINCTRL_PIN(27, "SPI0_CLK"), 77 + PINCTRL_PIN(28, "SPI0_CLK_LOOPBK"), 68 78 /* GPP_B */ 69 - PINCTRL_PIN(20, "CORE_VID_0"), 70 - PINCTRL_PIN(21, "CORE_VID_1"), 71 - PINCTRL_PIN(22, "VRALERTB"), 72 - PINCTRL_PIN(23, "CPU_GP_2"), 73 - PINCTRL_PIN(24, "CPU_GP_3"), 74 - PINCTRL_PIN(25, "SRCCLKREQB_0"), 75 - PINCTRL_PIN(26, "SRCCLKREQB_1"), 76 - PINCTRL_PIN(27, "SRCCLKREQB_2"), 77 - PINCTRL_PIN(28, "SRCCLKREQB_3"), 78 - PINCTRL_PIN(29, "SRCCLKREQB_4"), 79 - PINCTRL_PIN(30, "SRCCLKREQB_5"), 80 - PINCTRL_PIN(31, "PMCALERTB"), 81 - PINCTRL_PIN(32, "SLP_S0B"), 82 - PINCTRL_PIN(33, "PLTRSTB"), 83 - PINCTRL_PIN(34, "SPKR"), 84 - PINCTRL_PIN(35, "GSPI0_CS0B"), 85 - PINCTRL_PIN(36, "GSPI0_CLK"), 86 - PINCTRL_PIN(37, "GSPI0_MISO"), 87 - PINCTRL_PIN(38, "GSPI0_MOSI"), 88 - PINCTRL_PIN(39, "GSPI1_CS0B"), 89 - PINCTRL_PIN(40, "GSPI1_CLK"), 90 - PINCTRL_PIN(41, "GSPI1_MISO"), 91 - PINCTRL_PIN(42, "GSPI1_MOSI"), 92 - PINCTRL_PIN(43, "DDSP_HPD_A"), 93 - PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"), 94 - PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"), 79 + PINCTRL_PIN(29, "CORE_VID_0"), 80 + PINCTRL_PIN(30, "CORE_VID_1"), 81 + PINCTRL_PIN(31, "VRALERTB"), 82 + PINCTRL_PIN(32, "CPU_GP_2"), 83 + PINCTRL_PIN(33, "CPU_GP_3"), 84 + PINCTRL_PIN(34, "SRCCLKREQB_0"), 85 + PINCTRL_PIN(35, "SRCCLKREQB_1"), 86 + PINCTRL_PIN(36, "SRCCLKREQB_2"), 87 + PINCTRL_PIN(37, "SRCCLKREQB_3"), 88 + PINCTRL_PIN(38, "SRCCLKREQB_4"), 89 + PINCTRL_PIN(39, "SRCCLKREQB_5"), 90 + PINCTRL_PIN(40, "PMCALERTB"), 91 + PINCTRL_PIN(41, "SLP_S0B"), 92 + PINCTRL_PIN(42, "PLTRSTB"), 93 + PINCTRL_PIN(43, "SPKR"), 94 + PINCTRL_PIN(44, "GSPI0_CS0B"), 95 + PINCTRL_PIN(45, "GSPI0_CLK"), 96 + PINCTRL_PIN(46, "GSPI0_MISO"), 97 + PINCTRL_PIN(47, "GSPI0_MOSI"), 98 + PINCTRL_PIN(48, "GSPI1_CS0B"), 99 + PINCTRL_PIN(49, "GSPI1_CLK"), 100 + PINCTRL_PIN(50, "GSPI1_MISO"), 101 + PINCTRL_PIN(51, "GSPI1_MOSI"), 102 + PINCTRL_PIN(52, "DDSP_HPD_A"), 103 + PINCTRL_PIN(53, "GSPI0_CLK_LOOPBK"), 104 + PINCTRL_PIN(54, "GSPI1_CLK_LOOPBK"), 95 105 /* GPP_A */ 96 - PINCTRL_PIN(46, "ESPI_IO_0"), 97 - PINCTRL_PIN(47, "ESPI_IO_1"), 98 - PINCTRL_PIN(48, "ESPI_IO_2"), 99 - PINCTRL_PIN(49, "ESPI_IO_3"), 100 - PINCTRL_PIN(50, "ESPI_CSB"), 101 - PINCTRL_PIN(51, "ESPI_CLK"), 102 - PINCTRL_PIN(52, "ESPI_RESETB"), 103 - PINCTRL_PIN(53, "SMBCLK"), 104 - PINCTRL_PIN(54, "SMBDATA"), 105 - PINCTRL_PIN(55, "SMBALERTB"), 106 - PINCTRL_PIN(56, "CPU_GP_0"), 107 - PINCTRL_PIN(57, "CPU_GP_1"), 108 - PINCTRL_PIN(58, "USB2_OCB_1"), 109 - PINCTRL_PIN(59, "USB2_OCB_2"), 110 - PINCTRL_PIN(60, "USB2_OCB_3"), 111 - PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"), 112 - PINCTRL_PIN(62, "DDSP_HPD_B"), 113 - PINCTRL_PIN(63, "DDSP_HPD_C"), 114 - PINCTRL_PIN(64, "USB2_OCB_0"), 115 - PINCTRL_PIN(65, "PCHHOTB"), 116 - PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"), 106 + PINCTRL_PIN(55, "ESPI_IO_0"), 107 + PINCTRL_PIN(56, "ESPI_IO_1"), 108 + PINCTRL_PIN(57, "ESPI_IO_2"), 109 + PINCTRL_PIN(58, "ESPI_IO_3"), 110 + PINCTRL_PIN(59, "ESPI_CSB"), 111 + PINCTRL_PIN(60, "ESPI_CLK"), 112 + PINCTRL_PIN(61, "ESPI_RESETB"), 113 + PINCTRL_PIN(62, "SMBCLK"), 114 + PINCTRL_PIN(63, "SMBDATA"), 115 + PINCTRL_PIN(64, "SMBALERTB"), 116 + PINCTRL_PIN(65, "CPU_GP_0"), 117 + PINCTRL_PIN(66, "CPU_GP_1"), 118 + PINCTRL_PIN(67, "USB2_OCB_1"), 119 + PINCTRL_PIN(68, "USB2_OCB_2"), 120 + PINCTRL_PIN(69, "USB2_OCB_3"), 121 + PINCTRL_PIN(70, "DDSP_HPD_A_TIME_SYNC_0"), 122 + PINCTRL_PIN(71, "DDSP_HPD_B"), 123 + PINCTRL_PIN(72, "DDSP_HPD_C"), 124 + PINCTRL_PIN(73, "USB2_OCB_0"), 125 + PINCTRL_PIN(74, "PCHHOTB"), 126 + PINCTRL_PIN(75, "ESPI_CLK_LOOPBK"), 117 127 /* GPP_S */ 118 - PINCTRL_PIN(67, "SNDW1_CLK"), 119 - PINCTRL_PIN(68, "SNDW1_DATA"), 120 - PINCTRL_PIN(69, "SNDW2_CLK"), 121 - PINCTRL_PIN(70, "SNDW2_DATA"), 122 - PINCTRL_PIN(71, "SNDW1_CLK"), 123 - PINCTRL_PIN(72, "SNDW1_DATA"), 124 - PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"), 125 - PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"), 128 + PINCTRL_PIN(76, "SNDW1_CLK"), 129 + PINCTRL_PIN(77, "SNDW1_DATA"), 130 + PINCTRL_PIN(78, "SNDW2_CLK"), 131 + PINCTRL_PIN(79, "SNDW2_DATA"), 132 + PINCTRL_PIN(80, "SNDW1_CLK"), 133 + PINCTRL_PIN(81, "SNDW1_DATA"), 134 + PINCTRL_PIN(82, "SNDW4_CLK_DMIC_CLK_0"), 135 + PINCTRL_PIN(83, "SNDW4_DATA_DMIC_DATA_0"), 126 136 /* GPP_R */ 127 - PINCTRL_PIN(75, "HDA_BCLK"), 128 - PINCTRL_PIN(76, "HDA_SYNC"), 129 - PINCTRL_PIN(77, "HDA_SDO"), 130 - PINCTRL_PIN(78, "HDA_SDI_0"), 131 - PINCTRL_PIN(79, "HDA_RSTB"), 132 - PINCTRL_PIN(80, "HDA_SDI_1"), 133 - PINCTRL_PIN(81, "I2S1_SFRM"), 134 - PINCTRL_PIN(82, "I2S1_TXD"), 137 + PINCTRL_PIN(84, "HDA_BCLK"), 138 + PINCTRL_PIN(85, "HDA_SYNC"), 139 + PINCTRL_PIN(86, "HDA_SDO"), 140 + PINCTRL_PIN(87, "HDA_SDI_0"), 141 + PINCTRL_PIN(88, "HDA_RSTB"), 142 + PINCTRL_PIN(89, "HDA_SDI_1"), 143 + PINCTRL_PIN(90, "I2S1_SFRM"), 144 + PINCTRL_PIN(91, "I2S1_TXD"), 135 145 /* GPP_H */ 136 - PINCTRL_PIN(83, "GPPC_H_0"), 137 - PINCTRL_PIN(84, "SD_PWR_EN_B"), 138 - PINCTRL_PIN(85, "MODEM_CLKREQ"), 139 - PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"), 140 - PINCTRL_PIN(87, "I2C2_SDA"), 141 - PINCTRL_PIN(88, "I2C2_SCL"), 142 - PINCTRL_PIN(89, "I2C3_SDA"), 143 - PINCTRL_PIN(90, "I2C3_SCL"), 144 - PINCTRL_PIN(91, "I2C4_SDA"), 145 - PINCTRL_PIN(92, "I2C4_SCL"), 146 - PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"), 147 - PINCTRL_PIN(94, "I2S2_SCLK"), 148 - PINCTRL_PIN(95, "I2S2_SFRM"), 149 - PINCTRL_PIN(96, "I2S2_TXD"), 150 - PINCTRL_PIN(97, "I2S2_RXD"), 151 - PINCTRL_PIN(98, "I2S1_SCLK"), 152 - PINCTRL_PIN(99, "GPPC_H_16"), 153 - PINCTRL_PIN(100, "GPPC_H_17"), 154 - PINCTRL_PIN(101, "GPPC_H_18"), 155 - PINCTRL_PIN(102, "GPPC_H_19"), 156 - PINCTRL_PIN(103, "GPPC_H_20"), 157 - PINCTRL_PIN(104, "GPPC_H_21"), 158 - PINCTRL_PIN(105, "GPPC_H_22"), 159 - PINCTRL_PIN(106, "GPPC_H_23"), 146 + PINCTRL_PIN(92, "GPPC_H_0"), 147 + PINCTRL_PIN(93, "SD_PWR_EN_B"), 148 + PINCTRL_PIN(94, "MODEM_CLKREQ"), 149 + PINCTRL_PIN(95, "SX_EXIT_HOLDOFFB"), 150 + PINCTRL_PIN(96, "I2C2_SDA"), 151 + PINCTRL_PIN(97, "I2C2_SCL"), 152 + PINCTRL_PIN(98, "I2C3_SDA"), 153 + PINCTRL_PIN(99, "I2C3_SCL"), 154 + PINCTRL_PIN(100, "I2C4_SDA"), 155 + PINCTRL_PIN(101, "I2C4_SCL"), 156 + PINCTRL_PIN(102, "CPU_VCCIO_PWR_GATEB"), 157 + PINCTRL_PIN(103, "I2S2_SCLK"), 158 + PINCTRL_PIN(104, "I2S2_SFRM"), 159 + PINCTRL_PIN(105, "I2S2_TXD"), 160 + PINCTRL_PIN(106, "I2S2_RXD"), 161 + PINCTRL_PIN(107, "I2S1_SCLK"), 162 + PINCTRL_PIN(108, "GPPC_H_16"), 163 + PINCTRL_PIN(109, "GPPC_H_17"), 164 + PINCTRL_PIN(110, "GPPC_H_18"), 165 + PINCTRL_PIN(111, "GPPC_H_19"), 166 + PINCTRL_PIN(112, "GPPC_H_20"), 167 + PINCTRL_PIN(113, "GPPC_H_21"), 168 + PINCTRL_PIN(114, "GPPC_H_22"), 169 + PINCTRL_PIN(115, "GPPC_H_23"), 160 170 /* GPP_D */ 161 - PINCTRL_PIN(107, "SPI1_CSB"), 162 - PINCTRL_PIN(108, "SPI1_CLK"), 163 - PINCTRL_PIN(109, "SPI1_MISO_IO_1"), 164 - PINCTRL_PIN(110, "SPI1_MOSI_IO_0"), 165 - PINCTRL_PIN(111, "ISH_I2C0_SDA"), 166 - PINCTRL_PIN(112, "ISH_I2C0_SCL"), 167 - PINCTRL_PIN(113, "ISH_I2C1_SDA"), 168 - PINCTRL_PIN(114, "ISH_I2C1_SCL"), 169 - PINCTRL_PIN(115, "ISH_SPI_CSB"), 170 - PINCTRL_PIN(116, "ISH_SPI_CLK"), 171 - PINCTRL_PIN(117, "ISH_SPI_MISO"), 172 - PINCTRL_PIN(118, "ISH_SPI_MOSI"), 173 - PINCTRL_PIN(119, "ISH_UART0_RXD"), 174 - PINCTRL_PIN(120, "ISH_UART0_TXD"), 175 - PINCTRL_PIN(121, "ISH_UART0_RTSB"), 176 - PINCTRL_PIN(122, "ISH_UART0_CTSB"), 177 - PINCTRL_PIN(123, "SPI1_IO_2"), 178 - PINCTRL_PIN(124, "SPI1_IO_3"), 179 - PINCTRL_PIN(125, "I2S_MCLK"), 180 - PINCTRL_PIN(126, "CNV_MFUART2_RXD"), 181 - PINCTRL_PIN(127, "CNV_MFUART2_TXD"), 182 - PINCTRL_PIN(128, "CNV_PA_BLANKING"), 183 - PINCTRL_PIN(129, "I2C5_SDA"), 184 - PINCTRL_PIN(130, "I2C5_SCL"), 185 - PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"), 186 - PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"), 171 + PINCTRL_PIN(116, "SPI1_CSB"), 172 + PINCTRL_PIN(117, "SPI1_CLK"), 173 + PINCTRL_PIN(118, "SPI1_MISO_IO_1"), 174 + PINCTRL_PIN(119, "SPI1_MOSI_IO_0"), 175 + PINCTRL_PIN(120, "ISH_I2C0_SDA"), 176 + PINCTRL_PIN(121, "ISH_I2C0_SCL"), 177 + PINCTRL_PIN(122, "ISH_I2C1_SDA"), 178 + PINCTRL_PIN(123, "ISH_I2C1_SCL"), 179 + PINCTRL_PIN(124, "ISH_SPI_CSB"), 180 + PINCTRL_PIN(125, "ISH_SPI_CLK"), 181 + PINCTRL_PIN(126, "ISH_SPI_MISO"), 182 + PINCTRL_PIN(127, "ISH_SPI_MOSI"), 183 + PINCTRL_PIN(128, "ISH_UART0_RXD"), 184 + PINCTRL_PIN(129, "ISH_UART0_TXD"), 185 + PINCTRL_PIN(130, "ISH_UART0_RTSB"), 186 + PINCTRL_PIN(131, "ISH_UART0_CTSB"), 187 + PINCTRL_PIN(132, "SPI1_IO_2"), 188 + PINCTRL_PIN(133, "SPI1_IO_3"), 189 + PINCTRL_PIN(134, "I2S_MCLK"), 190 + PINCTRL_PIN(135, "CNV_MFUART2_RXD"), 191 + PINCTRL_PIN(136, "CNV_MFUART2_TXD"), 192 + PINCTRL_PIN(137, "CNV_PA_BLANKING"), 193 + PINCTRL_PIN(138, "I2C5_SDA"), 194 + PINCTRL_PIN(139, "I2C5_SCL"), 195 + PINCTRL_PIN(140, "GSPI2_CLK_LOOPBK"), 196 + PINCTRL_PIN(141, "SPI1_CLK_LOOPBK"), 187 197 /* vGPIO */ 188 - PINCTRL_PIN(133, "CNV_BTEN"), 189 - PINCTRL_PIN(134, "CNV_WCEN"), 190 - PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"), 191 - PINCTRL_PIN(136, "CNV_BT_IF_SELECT"), 192 - PINCTRL_PIN(137, "vCNV_BT_UART_TXD"), 193 - PINCTRL_PIN(138, "vCNV_BT_UART_RXD"), 194 - PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"), 195 - PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"), 196 - PINCTRL_PIN(141, "vCNV_MFUART1_TXD"), 197 - PINCTRL_PIN(142, "vCNV_MFUART1_RXD"), 198 - PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"), 199 - PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"), 200 - PINCTRL_PIN(145, "vUART0_TXD"), 201 - PINCTRL_PIN(146, "vUART0_RXD"), 202 - PINCTRL_PIN(147, "vUART0_CTS_B"), 203 - PINCTRL_PIN(148, "vUART0_RTS_B"), 204 - PINCTRL_PIN(149, "vISH_UART0_TXD"), 205 - PINCTRL_PIN(150, "vISH_UART0_RXD"), 206 - PINCTRL_PIN(151, "vISH_UART0_CTS_B"), 207 - PINCTRL_PIN(152, "vISH_UART0_RTS_B"), 208 - PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"), 209 - PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"), 210 - PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"), 211 - PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"), 212 - PINCTRL_PIN(157, "vI2S2_SCLK"), 213 - PINCTRL_PIN(158, "vI2S2_SFRM"), 214 - PINCTRL_PIN(159, "vI2S2_TXD"), 215 - PINCTRL_PIN(160, "vI2S2_RXD"), 216 - PINCTRL_PIN(161, "vSD3_CD_B"), 198 + PINCTRL_PIN(142, "CNV_BTEN"), 199 + PINCTRL_PIN(143, "CNV_WCEN"), 200 + PINCTRL_PIN(144, "CNV_BT_HOST_WAKEB"), 201 + PINCTRL_PIN(145, "CNV_BT_IF_SELECT"), 202 + PINCTRL_PIN(146, "vCNV_BT_UART_TXD"), 203 + PINCTRL_PIN(147, "vCNV_BT_UART_RXD"), 204 + PINCTRL_PIN(148, "vCNV_BT_UART_CTS_B"), 205 + PINCTRL_PIN(149, "vCNV_BT_UART_RTS_B"), 206 + PINCTRL_PIN(150, "vCNV_MFUART1_TXD"), 207 + PINCTRL_PIN(151, "vCNV_MFUART1_RXD"), 208 + PINCTRL_PIN(152, "vCNV_MFUART1_CTS_B"), 209 + PINCTRL_PIN(153, "vCNV_MFUART1_RTS_B"), 210 + PINCTRL_PIN(154, "vUART0_TXD"), 211 + PINCTRL_PIN(155, "vUART0_RXD"), 212 + PINCTRL_PIN(156, "vUART0_CTS_B"), 213 + PINCTRL_PIN(157, "vUART0_RTS_B"), 214 + PINCTRL_PIN(158, "vISH_UART0_TXD"), 215 + PINCTRL_PIN(159, "vISH_UART0_RXD"), 216 + PINCTRL_PIN(160, "vISH_UART0_CTS_B"), 217 + PINCTRL_PIN(161, "vISH_UART0_RTS_B"), 218 + PINCTRL_PIN(162, "vCNV_BT_I2S_BCLK"), 219 + PINCTRL_PIN(163, "vCNV_BT_I2S_WS_SYNC"), 220 + PINCTRL_PIN(164, "vCNV_BT_I2S_SDO"), 221 + PINCTRL_PIN(165, "vCNV_BT_I2S_SDI"), 222 + PINCTRL_PIN(166, "vI2S2_SCLK"), 223 + PINCTRL_PIN(167, "vI2S2_SFRM"), 224 + PINCTRL_PIN(168, "vI2S2_TXD"), 225 + PINCTRL_PIN(169, "vI2S2_RXD"), 226 + PINCTRL_PIN(170, "vSD3_CD_B"), 217 227 /* GPP_C */ 218 - PINCTRL_PIN(162, "GPPC_C_0"), 219 - PINCTRL_PIN(163, "GPPC_C_1"), 220 - PINCTRL_PIN(164, "GPPC_C_2"), 221 - PINCTRL_PIN(165, "GPPC_C_3"), 222 - PINCTRL_PIN(166, "GPPC_C_4"), 223 - PINCTRL_PIN(167, "GPPC_C_5"), 224 - PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"), 225 - PINCTRL_PIN(169, "SUSACKB"), 226 - PINCTRL_PIN(170, "UART0_RXD"), 227 - PINCTRL_PIN(171, "UART0_TXD"), 228 - PINCTRL_PIN(172, "UART0_RTSB"), 229 - PINCTRL_PIN(173, "UART0_CTSB"), 230 - PINCTRL_PIN(174, "UART1_RXD"), 231 - PINCTRL_PIN(175, "UART1_TXD"), 232 - PINCTRL_PIN(176, "UART1_RTSB"), 233 - PINCTRL_PIN(177, "UART1_CTSB"), 234 - PINCTRL_PIN(178, "I2C0_SDA"), 235 - PINCTRL_PIN(179, "I2C0_SCL"), 236 - PINCTRL_PIN(180, "I2C1_SDA"), 237 - PINCTRL_PIN(181, "I2C1_SCL"), 238 - PINCTRL_PIN(182, "UART2_RXD"), 239 - PINCTRL_PIN(183, "UART2_TXD"), 240 - PINCTRL_PIN(184, "UART2_RTSB"), 241 - PINCTRL_PIN(185, "UART2_CTSB"), 228 + PINCTRL_PIN(171, "GPPC_C_0"), 229 + PINCTRL_PIN(172, "GPPC_C_1"), 230 + PINCTRL_PIN(173, "GPPC_C_2"), 231 + PINCTRL_PIN(174, "GPPC_C_3"), 232 + PINCTRL_PIN(175, "GPPC_C_4"), 233 + PINCTRL_PIN(176, "GPPC_C_5"), 234 + PINCTRL_PIN(177, "SUSWARNB_SUSPWRDNACK"), 235 + PINCTRL_PIN(178, "SUSACKB"), 236 + PINCTRL_PIN(179, "UART0_RXD"), 237 + PINCTRL_PIN(180, "UART0_TXD"), 238 + PINCTRL_PIN(181, "UART0_RTSB"), 239 + PINCTRL_PIN(182, "UART0_CTSB"), 240 + PINCTRL_PIN(183, "UART1_RXD"), 241 + PINCTRL_PIN(184, "UART1_TXD"), 242 + PINCTRL_PIN(185, "UART1_RTSB"), 243 + PINCTRL_PIN(186, "UART1_CTSB"), 244 + PINCTRL_PIN(187, "I2C0_SDA"), 245 + PINCTRL_PIN(188, "I2C0_SCL"), 246 + PINCTRL_PIN(189, "I2C1_SDA"), 247 + PINCTRL_PIN(190, "I2C1_SCL"), 248 + PINCTRL_PIN(191, "UART2_RXD"), 249 + PINCTRL_PIN(192, "UART2_TXD"), 250 + PINCTRL_PIN(193, "UART2_RTSB"), 251 + PINCTRL_PIN(194, "UART2_CTSB"), 242 252 /* HVCMOS */ 243 - PINCTRL_PIN(186, "L_BKLTEN"), 244 - PINCTRL_PIN(187, "L_BKLTCTL"), 245 - PINCTRL_PIN(188, "L_VDDEN"), 246 - PINCTRL_PIN(189, "SYS_PWROK"), 247 - PINCTRL_PIN(190, "SYS_RESETB"), 248 - PINCTRL_PIN(191, "MLK_RSTB"), 253 + PINCTRL_PIN(195, "L_BKLTEN"), 254 + PINCTRL_PIN(196, "L_BKLTCTL"), 255 + PINCTRL_PIN(197, "L_VDDEN"), 256 + PINCTRL_PIN(198, "SYS_PWROK"), 257 + PINCTRL_PIN(199, "SYS_RESETB"), 258 + PINCTRL_PIN(200, "MLK_RSTB"), 249 259 /* GPP_E */ 250 - PINCTRL_PIN(192, "ISH_GP_0"), 251 - PINCTRL_PIN(193, "ISH_GP_1"), 252 - PINCTRL_PIN(194, "IMGCLKOUT_1"), 253 - PINCTRL_PIN(195, "ISH_GP_2"), 254 - PINCTRL_PIN(196, "IMGCLKOUT_2"), 255 - PINCTRL_PIN(197, "SATA_LEDB"), 256 - PINCTRL_PIN(198, "IMGCLKOUT_3"), 257 - PINCTRL_PIN(199, "ISH_GP_3"), 258 - PINCTRL_PIN(200, "ISH_GP_4"), 259 - PINCTRL_PIN(201, "ISH_GP_5"), 260 - PINCTRL_PIN(202, "ISH_GP_6"), 261 - PINCTRL_PIN(203, "ISH_GP_7"), 262 - PINCTRL_PIN(204, "IMGCLKOUT_4"), 263 - PINCTRL_PIN(205, "DDPA_CTRLCLK"), 264 - PINCTRL_PIN(206, "DDPA_CTRLDATA"), 265 - PINCTRL_PIN(207, "DDPB_CTRLCLK"), 266 - PINCTRL_PIN(208, "DDPB_CTRLDATA"), 267 - PINCTRL_PIN(209, "DDPC_CTRLCLK"), 268 - PINCTRL_PIN(210, "DDPC_CTRLDATA"), 269 - PINCTRL_PIN(211, "IMGCLKOUT_5"), 270 - PINCTRL_PIN(212, "CNV_BRI_DT"), 271 - PINCTRL_PIN(213, "CNV_BRI_RSP"), 272 - PINCTRL_PIN(214, "CNV_RGI_DT"), 273 - PINCTRL_PIN(215, "CNV_RGI_RSP"), 260 + PINCTRL_PIN(201, "ISH_GP_0"), 261 + PINCTRL_PIN(202, "ISH_GP_1"), 262 + PINCTRL_PIN(203, "IMGCLKOUT_1"), 263 + PINCTRL_PIN(204, "ISH_GP_2"), 264 + PINCTRL_PIN(205, "IMGCLKOUT_2"), 265 + PINCTRL_PIN(206, "SATA_LEDB"), 266 + PINCTRL_PIN(207, "IMGCLKOUT_3"), 267 + PINCTRL_PIN(208, "ISH_GP_3"), 268 + PINCTRL_PIN(209, "ISH_GP_4"), 269 + PINCTRL_PIN(210, "ISH_GP_5"), 270 + PINCTRL_PIN(211, "ISH_GP_6"), 271 + PINCTRL_PIN(212, "ISH_GP_7"), 272 + PINCTRL_PIN(213, "IMGCLKOUT_4"), 273 + PINCTRL_PIN(214, "DDPA_CTRLCLK"), 274 + PINCTRL_PIN(215, "DDPA_CTRLDATA"), 275 + PINCTRL_PIN(216, "DDPB_CTRLCLK"), 276 + PINCTRL_PIN(217, "DDPB_CTRLDATA"), 277 + PINCTRL_PIN(218, "DDPC_CTRLCLK"), 278 + PINCTRL_PIN(219, "DDPC_CTRLDATA"), 279 + PINCTRL_PIN(220, "IMGCLKOUT_5"), 280 + PINCTRL_PIN(221, "CNV_BRI_DT"), 281 + PINCTRL_PIN(222, "CNV_BRI_RSP"), 282 + PINCTRL_PIN(223, "CNV_RGI_DT"), 283 + PINCTRL_PIN(224, "CNV_RGI_RSP"), 274 284 /* GPP_G */ 275 - PINCTRL_PIN(216, "SD3_CMD"), 276 - PINCTRL_PIN(217, "SD3_D0"), 277 - PINCTRL_PIN(218, "SD3_D1"), 278 - PINCTRL_PIN(219, "SD3_D2"), 279 - PINCTRL_PIN(220, "SD3_D3"), 280 - PINCTRL_PIN(221, "SD3_CDB"), 281 - PINCTRL_PIN(222, "SD3_CLK"), 282 - PINCTRL_PIN(223, "SD3_WP"), 285 + PINCTRL_PIN(225, "SD3_CMD"), 286 + PINCTRL_PIN(226, "SD3_D0"), 287 + PINCTRL_PIN(227, "SD3_D1"), 288 + PINCTRL_PIN(228, "SD3_D2"), 289 + PINCTRL_PIN(229, "SD3_D3"), 290 + PINCTRL_PIN(230, "SD3_CDB"), 291 + PINCTRL_PIN(231, "SD3_CLK"), 292 + PINCTRL_PIN(232, "SD3_WP"), 283 293 }; 284 294 285 295 static const struct intel_padgroup jsl_community0_gpps[] = { 286 296 JSL_GPP(0, 0, 19, 320), /* GPP_F */ 287 - JSL_GPP(1, 20, 45, 32), /* GPP_B */ 288 - JSL_GPP(2, 46, 66, 64), /* GPP_A */ 289 - JSL_GPP(3, 67, 74, 96), /* GPP_S */ 290 - JSL_GPP(4, 75, 82, 128), /* GPP_R */ 297 + JSL_GPP(1, 20, 28, INTEL_GPIO_BASE_NOMAP), /* SPI */ 298 + JSL_GPP(2, 29, 54, 32), /* GPP_B */ 299 + JSL_GPP(3, 55, 75, 64), /* GPP_A */ 300 + JSL_GPP(4, 76, 83, 96), /* GPP_S */ 301 + JSL_GPP(5, 84, 91, 128), /* GPP_R */ 291 302 }; 292 303 293 304 static const struct intel_padgroup jsl_community1_gpps[] = { 294 - JSL_GPP(0, 83, 106, 160), /* GPP_H */ 295 - JSL_GPP(1, 107, 132, 192), /* GPP_D */ 296 - JSL_GPP(2, 133, 161, 224), /* vGPIO */ 297 - JSL_GPP(3, 162, 185, 256), /* GPP_C */ 305 + JSL_GPP(0, 92, 115, 160), /* GPP_H */ 306 + JSL_GPP(1, 116, 141, 192), /* GPP_D */ 307 + JSL_GPP(2, 142, 170, 224), /* vGPIO */ 308 + JSL_GPP(3, 171, 194, 256), /* GPP_C */ 298 309 }; 299 310 300 311 static const struct intel_padgroup jsl_community4_gpps[] = { 301 - JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */ 302 - JSL_GPP(1, 192, 215, 288), /* GPP_E */ 312 + JSL_GPP(0, 195, 200, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */ 313 + JSL_GPP(1, 201, 224, 288), /* GPP_E */ 303 314 }; 304 315 305 316 static const struct intel_padgroup jsl_community5_gpps[] = { 306 - JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */ 317 + JSL_GPP(0, 225, 232, INTEL_GPIO_BASE_ZERO), /* GPP_G */ 307 318 }; 308 319 309 320 static const struct intel_community jsl_communities[] = { 310 - JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps), 311 - JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps), 312 - JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps), 313 - JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps), 321 + JSL_COMMUNITY(0, 0, 91, jsl_community0_gpps), 322 + JSL_COMMUNITY(1, 92, 194, jsl_community1_gpps), 323 + JSL_COMMUNITY(2, 195, 224, jsl_community4_gpps), 324 + JSL_COMMUNITY(3, 225, 232, jsl_community5_gpps), 314 325 }; 315 326 316 327 static const struct intel_pinctrl_soc_data jsl_soc_data = { ··· 347 336 .pm = &jsl_pinctrl_pm_ops, 348 337 }, 349 338 }; 350 - 351 339 module_platform_driver(jsl_pinctrl_driver); 352 340 353 341 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+8
drivers/pinctrl/intel/pinctrl-merrifield.c
··· 745 745 mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; 746 746 bits |= BUFCFG_PU_EN; 747 747 748 + /* Set default strength value in case none is given */ 749 + if (arg == 1) 750 + arg = 20000; 751 + 748 752 switch (arg) { 749 753 case 50000: 750 754 bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; ··· 768 764 case PIN_CONFIG_BIAS_PULL_DOWN: 769 765 mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; 770 766 bits |= BUFCFG_PD_EN; 767 + 768 + /* Set default strength value in case none is given */ 769 + if (arg == 1) 770 + arg = 20000; 771 771 772 772 switch (arg) { 773 773 case 50000:
-7
drivers/pinctrl/pinctrl-amd.c
··· 429 429 pin_reg &= ~BIT(LEVEL_TRIG_OFF); 430 430 pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); 431 431 pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; 432 - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; 433 432 irq_set_handler_locked(d, handle_edge_irq); 434 433 break; 435 434 ··· 436 437 pin_reg &= ~BIT(LEVEL_TRIG_OFF); 437 438 pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); 438 439 pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; 439 - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; 440 440 irq_set_handler_locked(d, handle_edge_irq); 441 441 break; 442 442 ··· 443 445 pin_reg &= ~BIT(LEVEL_TRIG_OFF); 444 446 pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); 445 447 pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; 446 - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; 447 448 irq_set_handler_locked(d, handle_edge_irq); 448 449 break; 449 450 ··· 450 453 pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; 451 454 pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); 452 455 pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; 453 - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); 454 - pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF; 455 456 irq_set_handler_locked(d, handle_level_irq); 456 457 break; 457 458 ··· 457 462 pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; 458 463 pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); 459 464 pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; 460 - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); 461 - pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF; 462 465 irq_set_handler_locked(d, handle_level_irq); 463 466 break; 464 467
-39
drivers/scsi/megaraid/megaraid_sas_base.c
··· 37 37 #include <linux/poll.h> 38 38 #include <linux/vmalloc.h> 39 39 #include <linux/irq_poll.h> 40 - #include <linux/blk-mq-pci.h> 41 40 42 41 #include <scsi/scsi.h> 43 42 #include <scsi/scsi_cmnd.h> ··· 112 113 unsigned int enable_sdev_max_qd; 113 114 module_param(enable_sdev_max_qd, int, 0444); 114 115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 115 - 116 - int host_tagset_enable = 1; 117 - module_param(host_tagset_enable, int, 0444); 118 - MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 119 116 120 117 MODULE_LICENSE("GPL"); 121 118 MODULE_VERSION(MEGASAS_VERSION); ··· 3119 3124 return 0; 3120 3125 } 3121 3126 3122 - static int megasas_map_queues(struct Scsi_Host *shost) 3123 - { 3124 - struct megasas_instance *instance; 3125 - 3126 - instance = (struct megasas_instance *)shost->hostdata; 3127 - 3128 - if (shost->nr_hw_queues == 1) 3129 - return 0; 3130 - 3131 - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 3132 - instance->pdev, instance->low_latency_index_start); 3133 - } 3134 - 3135 3127 static void megasas_aen_polling(struct work_struct *work); 3136 3128 3137 3129 /** ··· 3427 3445 .eh_timed_out = megasas_reset_timer, 3428 3446 .shost_attrs = megaraid_host_attrs, 3429 3447 .bios_param = megasas_bios_param, 3430 - .map_queues = megasas_map_queues, 3431 3448 .change_queue_depth = scsi_change_queue_depth, 3432 3449 .max_segment_size = 0xffffffff, 3433 3450 }; ··· 6808 6827 host->max_lun = MEGASAS_MAX_LUN; 6809 6828 host->max_cmd_len = 16; 6810 6829 6811 - /* Use shared host tagset only for fusion adaptors 6812 - * if there are managed interrupts (smp affinity enabled case). 6813 - * Single msix_vectors in kdump, so shared host tag is also disabled. 6814 - */ 6815 - 6816 - host->host_tagset = 0; 6817 - host->nr_hw_queues = 1; 6818 - 6819 - if ((instance->adapter_type != MFI_SERIES) && 6820 - (instance->msix_vectors > instance->low_latency_index_start) && 6821 - host_tagset_enable && 6822 - instance->smp_affinity_enable) { 6823 - host->host_tagset = 1; 6824 - host->nr_hw_queues = instance->msix_vectors - 6825 - instance->low_latency_index_start; 6826 - } 6827 - 6828 - dev_info(&instance->pdev->dev, 6829 - "Max firmware commands: %d shared with nr_hw_queues = %d\n", 6830 - instance->max_fw_cmds, host->nr_hw_queues); 6831 6830 /* 6832 6831 * Notify the mid-layer about the new controller 6833 6832 */
+13 -16
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 359 359 { 360 360 int sdev_busy; 361 361 362 - /* TBD - if sml remove device_busy in future, driver 363 - * should track counter in internal structure. 364 - */ 365 - sdev_busy = atomic_read(&scmd->device->device_busy); 362 + /* nr_hw_queue = 1 for MegaRAID */ 363 + struct blk_mq_hw_ctx *hctx = 364 + scmd->device->request_queue->queue_hw_ctx[0]; 365 + 366 + sdev_busy = atomic_read(&hctx->nr_active); 366 367 367 368 if (instance->perf_mode == MR_BALANCED_PERF_MODE && 368 - sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) { 369 + sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) 369 370 cmd->request_desc->SCSIIO.MSIxIndex = 370 371 mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / 371 372 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); 372 - } else if (instance->msix_load_balance) { 373 + else if (instance->msix_load_balance) 373 374 cmd->request_desc->SCSIIO.MSIxIndex = 374 375 (mega_mod64(atomic64_add_return(1, &instance->total_io_count), 375 376 instance->msix_vectors)); 376 - } else if (instance->host->nr_hw_queues > 1) { 377 - u32 tag = blk_mq_unique_tag(scmd->request); 378 - 379 - cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + 380 - instance->low_latency_index_start; 381 - } else { 377 + else 382 378 cmd->request_desc->SCSIIO.MSIxIndex = 383 379 instance->reply_map[raw_smp_processor_id()]; 384 - } 385 380 } 386 381 387 382 /** ··· 956 961 if (megasas_alloc_cmdlist_fusion(instance)) 957 962 goto fail_exit; 958 963 964 + dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", 965 + instance->max_fw_cmds); 966 + 959 967 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 960 968 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 961 969 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; ··· 1102 1104 MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) 1103 1105 instance->perf_mode = MR_BALANCED_PERF_MODE; 1104 1106 1105 - dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n", 1106 - MEGASAS_PERF_MODE_2STR(instance->perf_mode), 1107 - instance->low_latency_index_start); 1107 + dev_info(&instance->pdev->dev, "Performance mode :%s\n", 1108 + MEGASAS_PERF_MODE_2STR(instance->perf_mode)); 1108 1109 1109 1110 instance->fw_sync_cache_support = (scratch_pad_1 & 1110 1111 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+1 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 6459 6459 6460 6460 r = _base_handshake_req_reply_wait(ioc, 6461 6461 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 6462 - sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); 6462 + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); 6463 6463 6464 6464 if (r != 0) { 6465 6465 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 664 664 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 665 665 struct _pcie_device *pcie_device = NULL; 666 666 u16 smid; 667 - u8 timeout; 667 + unsigned long timeout; 668 668 u8 issue_reset; 669 669 u32 sz, sz_arg; 670 670 void *psge;
+8 -1
drivers/scsi/storvsc_drv.c
··· 1246 1246 request = (struct storvsc_cmd_request *) 1247 1247 ((unsigned long)desc->trans_id); 1248 1248 1249 + if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) - vmscsi_size_delta) { 1250 + dev_err(&device->device, "Invalid packet len\n"); 1251 + continue; 1252 + } 1253 + 1249 1254 if (request == &stor_device->init_request || 1250 1255 request == &stor_device->reset_request) { 1251 1256 memcpy(&request->vstor_packet, packet, ··· 1999 1994 alloc_ordered_workqueue("storvsc_error_wq_%d", 2000 1995 WQ_MEM_RECLAIM, 2001 1996 host->host_no); 2002 - if (!host_dev->handle_error_wq) 1997 + if (!host_dev->handle_error_wq) { 1998 + ret = -ENOMEM; 2003 1999 goto err_out2; 2000 + } 2004 2001 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); 2005 2002 /* Register the HBA and start the scsi bus scan */ 2006 2003 ret = scsi_add_host(host, &device->device);
+7 -3
drivers/thunderbolt/icm.c
··· 1976 1976 1977 1977 static void remove_unplugged_switch(struct tb_switch *sw) 1978 1978 { 1979 - pm_runtime_get_sync(sw->dev.parent); 1979 + struct device *parent = get_device(sw->dev.parent); 1980 + 1981 + pm_runtime_get_sync(parent); 1980 1982 1981 1983 /* 1982 1984 * Signal this and switches below for rpm_complete because ··· 1989 1987 bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); 1990 1988 tb_switch_remove(sw); 1991 1989 1992 - pm_runtime_mark_last_busy(sw->dev.parent); 1993 - pm_runtime_put_autosuspend(sw->dev.parent); 1990 + pm_runtime_mark_last_busy(parent); 1991 + pm_runtime_put_autosuspend(parent); 1992 + 1993 + put_device(parent); 1994 1994 } 1995 1995 1996 1996 static void icm_free_unplugged_children(struct tb_switch *sw)
+6 -1
drivers/tty/tty_io.c
··· 2897 2897 struct task_struct *g, *p; 2898 2898 struct pid *session; 2899 2899 int i; 2900 + unsigned long flags; 2900 2901 2901 2902 if (!tty) 2902 2903 return; 2903 - session = tty->session; 2904 + 2905 + spin_lock_irqsave(&tty->ctrl_lock, flags); 2906 + session = get_pid(tty->session); 2907 + spin_unlock_irqrestore(&tty->ctrl_lock, flags); 2904 2908 2905 2909 tty_ldisc_flush(tty); 2906 2910 ··· 2936 2932 task_unlock(p); 2937 2933 } while_each_thread(g, p); 2938 2934 read_unlock(&tasklist_lock); 2935 + put_pid(session); 2939 2936 #endif 2940 2937 } 2941 2938
+31 -13
drivers/tty/tty_jobctrl.c
··· 103 103 put_pid(tty->session); 104 104 put_pid(tty->pgrp); 105 105 tty->pgrp = get_pid(task_pgrp(current)); 106 - spin_unlock_irqrestore(&tty->ctrl_lock, flags); 107 106 tty->session = get_pid(task_session(current)); 107 + spin_unlock_irqrestore(&tty->ctrl_lock, flags); 108 108 if (current->signal->tty) { 109 109 tty_debug(tty, "current tty %s not NULL!!\n", 110 110 current->signal->tty->name); ··· 293 293 spin_lock_irq(&current->sighand->siglock); 294 294 put_pid(current->signal->tty_old_pgrp); 295 295 current->signal->tty_old_pgrp = NULL; 296 - 297 296 tty = tty_kref_get(current->signal->tty); 297 + spin_unlock_irq(&current->sighand->siglock); 298 + 298 299 if (tty) { 299 300 unsigned long flags; 301 + 302 + tty_lock(tty); 300 303 spin_lock_irqsave(&tty->ctrl_lock, flags); 301 304 put_pid(tty->session); 302 305 put_pid(tty->pgrp); 303 306 tty->session = NULL; 304 307 tty->pgrp = NULL; 305 308 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 309 + tty_unlock(tty); 306 310 tty_kref_put(tty); 307 311 } 308 312 309 - spin_unlock_irq(&current->sighand->siglock); 310 313 /* Now clear signal->tty under the lock */ 311 314 read_lock(&tasklist_lock); 312 315 session_clear_tty(task_session(current)); ··· 480 477 return -ENOTTY; 481 478 if (retval) 482 479 return retval; 483 - if (!current->signal->tty || 484 - (current->signal->tty != real_tty) || 485 - (real_tty->session != task_session(current))) 486 - return -ENOTTY; 480 + 487 481 if (get_user(pgrp_nr, p)) 488 482 return -EFAULT; 489 483 if (pgrp_nr < 0) 490 484 return -EINVAL; 485 + 486 + spin_lock_irq(&real_tty->ctrl_lock); 487 + if (!current->signal->tty || 488 + (current->signal->tty != real_tty) || 489 + (real_tty->session != task_session(current))) { 490 + retval = -ENOTTY; 491 + goto out_unlock_ctrl; 492 + } 491 493 rcu_read_lock(); 492 494 pgrp = find_vpid(pgrp_nr); 493 495 retval = -ESRCH; ··· 502 494 if (session_of_pgrp(pgrp) != task_session(current)) 503 495 goto out_unlock; 504 496 retval = 0; 505 - spin_lock_irq(&tty->ctrl_lock); 506 497 put_pid(real_tty->pgrp); 507 498 real_tty->pgrp = get_pid(pgrp); 508 - spin_unlock_irq(&tty->ctrl_lock); 509 499 out_unlock: 510 500 rcu_read_unlock(); 501 + out_unlock_ctrl: 502 + spin_unlock_irq(&real_tty->ctrl_lock); 511 503 return retval; 512 504 } 513 505 ··· 519 511 * 520 512 * Obtain the session id of the tty. If there is no session 521 513 * return an error. 522 - * 523 - * Locking: none. Reference to current->signal->tty is safe. 524 514 */ 525 515 static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 526 516 { 517 + unsigned long flags; 518 + pid_t sid; 519 + 527 520 /* 528 521 * (tty == real_tty) is a cheap way of 529 522 * testing if the tty is NOT a master pty. 530 523 */ 531 524 if (tty == real_tty && current->signal->tty != real_tty) 532 525 return -ENOTTY; 526 + 527 + spin_lock_irqsave(&real_tty->ctrl_lock, flags); 533 528 if (!real_tty->session) 534 - return -ENOTTY; 535 - return put_user(pid_vnr(real_tty->session), p); 529 + goto err; 530 + sid = pid_vnr(real_tty->session); 531 + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); 532 + 533 + return put_user(sid, p); 534 + 535 + err: 536 + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); 537 + return -ENOTTY; 536 538 } 537 539 538 540 /*
+16 -13
drivers/usb/cdns3/core.c
··· 427 427 */ 428 428 static int cdns3_probe(struct platform_device *pdev) 429 429 { 430 - struct usb_role_switch_desc sw_desc = { }; 431 430 struct device *dev = &pdev->dev; 432 431 struct resource *res; 433 432 struct cdns3 *cdns; ··· 528 529 if (ret) 529 530 goto err2; 530 531 531 - sw_desc.set = cdns3_role_set; 532 - sw_desc.get = cdns3_role_get; 533 - sw_desc.allow_userspace_control = true; 534 - sw_desc.driver_data = cdns; 535 - if (device_property_read_bool(dev, "usb-role-switch")) 532 + if (device_property_read_bool(dev, "usb-role-switch")) { 533 + struct usb_role_switch_desc sw_desc = { }; 534 + 535 + sw_desc.set = cdns3_role_set; 536 + sw_desc.get = cdns3_role_get; 537 + sw_desc.allow_userspace_control = true; 538 + sw_desc.driver_data = cdns; 536 539 sw_desc.fwnode = dev->fwnode; 537 540 538 - cdns->role_sw = usb_role_switch_register(dev, &sw_desc); 539 - if (IS_ERR(cdns->role_sw)) { 540 - ret = PTR_ERR(cdns->role_sw); 541 - dev_warn(dev, "Unable to register Role Switch\n"); 542 - goto err3; 541 + cdns->role_sw = usb_role_switch_register(dev, &sw_desc); 542 + if (IS_ERR(cdns->role_sw)) { 543 + ret = PTR_ERR(cdns->role_sw); 544 + dev_warn(dev, "Unable to register Role Switch\n"); 545 + goto err3; 546 + } 543 547 } 544 548 545 549 if (cdns->wakeup_irq) { ··· 553 551 554 552 if (ret) { 555 553 dev_err(cdns->dev, "couldn't register wakeup irq handler\n"); 556 - goto err3; 554 + goto err4; 557 555 } 558 556 } 559 557 ··· 584 582 return 0; 585 583 err4: 586 584 cdns3_drd_exit(cdns); 587 - usb_role_switch_unregister(cdns->role_sw); 585 + if (cdns->role_sw) 586 + usb_role_switch_unregister(cdns->role_sw); 588 587 err3: 589 588 set_phy_power_off(cdns); 590 589 err2:
+1
drivers/usb/cdns3/gadget.c
··· 1260 1260 priv_req->end_trb = priv_ep->enqueue; 1261 1261 cdns3_ep_inc_enq(priv_ep); 1262 1262 trb = priv_ep->trb_pool + priv_ep->enqueue; 1263 + trb->length = 0; 1263 1264 } while (sg_iter < num_trb); 1264 1265 1265 1266 trb = priv_req->trb;
+4 -2
drivers/usb/gadget/function/f_fs.c
··· 1324 1324 case FUNCTIONFS_ENDPOINT_DESC: 1325 1325 { 1326 1326 int desc_idx; 1327 - struct usb_endpoint_descriptor *desc; 1327 + struct usb_endpoint_descriptor desc1, *desc; 1328 1328 1329 1329 switch (epfile->ffs->gadget->speed) { 1330 1330 case USB_SPEED_SUPER: ··· 1336 1336 default: 1337 1337 desc_idx = 0; 1338 1338 } 1339 + 1339 1340 desc = epfile->ep->descs[desc_idx]; 1341 + memcpy(&desc1, desc, desc->bLength); 1340 1342 1341 1343 spin_unlock_irq(&epfile->ffs->eps_lock); 1342 - ret = copy_to_user((void __user *)value, desc, desc->bLength); 1344 + ret = copy_to_user((void __user *)value, &desc1, desc1.bLength); 1343 1345 if (ret) 1344 1346 ret = -EFAULT; 1345 1347 return ret;
+2 -2
drivers/usb/host/ohci-omap.c
··· 91 91 | ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)), 92 92 INNOVATOR_FPGA_CAM_USB_CONTROL); 93 93 else if (priv->power) 94 - gpiod_set_value(priv->power, 0); 94 + gpiod_set_value_cansleep(priv->power, 0); 95 95 } else { 96 96 if (machine_is_omap_innovator() && cpu_is_omap1510()) 97 97 __raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL) 98 98 & ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)), 99 99 INNOVATOR_FPGA_CAM_USB_CONTROL); 100 100 else if (priv->power) 101 - gpiod_set_value(priv->power, 1); 101 + gpiod_set_value_cansleep(priv->power, 1); 102 102 } 103 103 104 104 return 0;
+3 -2
drivers/usb/serial/ch341.c
··· 81 81 #define CH341_QUIRK_SIMULATE_BREAK BIT(1) 82 82 83 83 static const struct usb_device_id id_table[] = { 84 - { USB_DEVICE(0x4348, 0x5523) }, 84 + { USB_DEVICE(0x1a86, 0x5512) }, 85 + { USB_DEVICE(0x1a86, 0x5523) }, 85 86 { USB_DEVICE(0x1a86, 0x7522) }, 86 87 { USB_DEVICE(0x1a86, 0x7523) }, 87 - { USB_DEVICE(0x1a86, 0x5523) }, 88 + { USB_DEVICE(0x4348, 0x5523) }, 88 89 { }, 89 90 }; 90 91 MODULE_DEVICE_TABLE(usb, id_table);
+4 -6
drivers/usb/serial/kl5kusb105.c
··· 276 276 priv->cfg.unknown2 = cfg->unknown2; 277 277 spin_unlock_irqrestore(&priv->lock, flags); 278 278 279 + kfree(cfg); 280 + 279 281 /* READ_ON and urb submission */ 280 282 rc = usb_serial_generic_open(tty, port); 281 - if (rc) { 282 - retval = rc; 283 - goto err_free_cfg; 284 - } 283 + if (rc) 284 + return rc; 285 285 286 286 rc = usb_control_msg(port->serial->dev, 287 287 usb_sndctrlpipe(port->serial->dev, 0), ··· 324 324 KLSI_TIMEOUT); 325 325 err_generic_close: 326 326 usb_serial_generic_close(port); 327 - err_free_cfg: 328 - kfree(cfg); 329 327 330 328 return retval; 331 329 }
+6 -4
drivers/usb/serial/option.c
··· 419 419 #define CINTERION_PRODUCT_PH8 0x0053 420 420 #define CINTERION_PRODUCT_AHXX 0x0055 421 421 #define CINTERION_PRODUCT_PLXX 0x0060 422 + #define CINTERION_PRODUCT_EXS82 0x006c 422 423 #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 423 424 #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 424 425 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 ··· 1106 1105 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), 1107 1106 .driver_info = NUMEP2 }, 1108 1107 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, 1109 - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), 1110 - .driver_info = NUMEP2 }, 1111 - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, 1108 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1109 + .driver_info = RSVD(4) }, 1112 1110 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1113 1111 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1114 1112 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, ··· 1902 1902 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, 1903 1903 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), 1904 1904 .driver_info = RSVD(0) | RSVD(4) }, 1905 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) }, 1905 1906 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 1906 1907 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, 1907 1908 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, ··· 2047 2046 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2048 2047 { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ 2049 2048 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2050 - { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ 2049 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ 2051 2050 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, 2052 2051 { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ 2053 2052 .driver_info = RSVD(4) | RSVD(5) }, 2054 2053 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ 2055 2054 .driver_info = RSVD(6) }, 2055 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ 2056 2056 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ 2057 2057 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ 2058 2058 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+1 -1
drivers/usb/storage/scsiglue.c
··· 92 92 static int slave_configure(struct scsi_device *sdev) 93 93 { 94 94 struct us_data *us = host_to_us(sdev->host); 95 - struct device *dev = sdev->host->dma_dev; 95 + struct device *dev = us->pusb_dev->bus->sysdev; 96 96 97 97 /* 98 98 * Many devices have trouble transferring more than 32KB at a time,
+6 -13
drivers/usb/storage/uas.c
··· 837 837 */ 838 838 blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); 839 839 840 + if (devinfo->flags & US_FL_MAX_SECTORS_64) 841 + blk_queue_max_hw_sectors(sdev->request_queue, 64); 842 + else if (devinfo->flags & US_FL_MAX_SECTORS_240) 843 + blk_queue_max_hw_sectors(sdev->request_queue, 240); 844 + 840 845 return 0; 841 846 } 842 847 843 848 static int uas_slave_configure(struct scsi_device *sdev) 844 849 { 845 850 struct uas_dev_info *devinfo = sdev->hostdata; 846 - struct device *dev = sdev->host->dma_dev; 847 - 848 - if (devinfo->flags & US_FL_MAX_SECTORS_64) 849 - blk_queue_max_hw_sectors(sdev->request_queue, 64); 850 - else if (devinfo->flags & US_FL_MAX_SECTORS_240) 851 - blk_queue_max_hw_sectors(sdev->request_queue, 240); 852 - else if (devinfo->udev->speed >= USB_SPEED_SUPER) 853 - blk_queue_max_hw_sectors(sdev->request_queue, 2048); 854 - 855 - blk_queue_max_hw_sectors(sdev->request_queue, 856 - min_t(size_t, queue_max_hw_sectors(sdev->request_queue), 857 - dma_max_mapping_size(dev) >> SECTOR_SHIFT)); 858 851 859 852 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 860 853 sdev->no_report_opcodes = 1; ··· 1033 1040 shost->can_queue = devinfo->qdepth - 2; 1034 1041 1035 1042 usb_set_intfdata(intf, shost); 1036 - result = scsi_add_host_with_dma(shost, &intf->dev, udev->bus->sysdev); 1043 + result = scsi_add_host(shost, &intf->dev); 1037 1044 if (result) 1038 1045 goto free_streams; 1039 1046
+2 -3
drivers/usb/storage/usb.c
··· 1049 1049 goto BadDevice; 1050 1050 usb_autopm_get_interface_no_resume(us->pusb_intf); 1051 1051 snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s", 1052 - dev_name(dev)); 1053 - result = scsi_add_host_with_dma(us_to_host(us), dev, 1054 - us->pusb_dev->bus->sysdev); 1052 + dev_name(&us->pusb_intf->dev)); 1053 + result = scsi_add_host(us_to_host(us), dev); 1055 1054 if (result) { 1056 1055 dev_warn(dev, 1057 1056 "Unable to add the scsi host\n");
+3
fs/afs/super.c
··· 230 230 231 231 _enter(",%s", name); 232 232 233 + if (fc->source) 234 + return invalf(fc, "kAFS: Multiple sources not supported"); 235 + 233 236 if (!name) { 234 237 printk(KERN_ERR "kAFS: no volume name specified\n"); 235 238 return -EINVAL;
+2 -1
fs/cifs/connect.c
··· 4546 4546 if (ses) { 4547 4547 spin_lock(&cifs_tcp_ses_lock); 4548 4548 ses->ses_count++; 4549 - ses->tcon_ipc->remap = cifs_remap(cifs_sb); 4549 + if (ses->tcon_ipc) 4550 + ses->tcon_ipc->remap = cifs_remap(cifs_sb); 4550 4551 spin_unlock(&cifs_tcp_ses_lock); 4551 4552 } 4552 4553 *root_ses = ses;
+2 -2
fs/cifs/smb2ops.c
··· 3114 3114 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; 3115 3115 3116 3116 rc = SMB2_ioctl_init(tcon, server, 3117 - &rqst[1], fid.persistent_fid, 3118 - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, 3117 + &rqst[1], COMPOUND_FID, 3118 + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, 3119 3119 true /* is_fctl */, NULL, 0, 3120 3120 CIFSMaxBufSize - 3121 3121 MAX_SMB2_CREATE_RESPONSE_SIZE -
+37 -32
fs/cifs/smb2pdu.c
··· 2272 2272 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) 2273 2273 { 2274 2274 struct crt_sd_ctxt *buf; 2275 - struct cifs_ace *pace; 2276 - unsigned int sdlen, acelen; 2275 + __u8 *ptr, *aclptr; 2276 + unsigned int acelen, acl_size, ace_count; 2277 2277 unsigned int owner_offset = 0; 2278 2278 unsigned int group_offset = 0; 2279 + struct smb3_acl acl; 2279 2280 2280 - *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 2), 8); 2281 + *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); 2281 2282 2282 2283 if (set_owner) { 2283 - /* offset fields are from beginning of security descriptor not of create context */ 2284 - owner_offset = sizeof(struct smb3_acl) + (sizeof(struct cifs_ace) * 2); 2285 - 2286 2284 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ 2287 2285 *len += sizeof(struct owner_group_sids); 2288 2286 } ··· 2289 2291 if (buf == NULL) 2290 2292 return buf; 2291 2293 2294 + ptr = (__u8 *)&buf[1]; 2292 2295 if (set_owner) { 2296 + /* offset fields are from beginning of security descriptor not of create context */ 2297 + owner_offset = ptr - (__u8 *)&buf->sd; 2293 2298 buf->sd.OffsetOwner = cpu_to_le32(owner_offset); 2294 - group_offset = owner_offset + sizeof(struct owner_sid); 2299 + group_offset = owner_offset + offsetof(struct owner_group_sids, group); 2295 2300 buf->sd.OffsetGroup = cpu_to_le32(group_offset); 2301 + 2302 + setup_owner_group_sids(ptr); 2303 + ptr += sizeof(struct owner_group_sids); 2296 2304 } else { 2297 2305 buf->sd.OffsetOwner = 0; 2298 2306 buf->sd.OffsetGroup = 0; 2299 2307 } 2300 2308 2301 - sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) + 2302 - 2 * sizeof(struct cifs_ace); 2303 - if (set_owner) { 2304 - sdlen += sizeof(struct owner_group_sids); 2305 - setup_owner_group_sids(owner_offset + sizeof(struct create_context) + 8 /* name */ 2306 - + (char *)buf); 2307 - } 2308 - 2309 - buf->ccontext.DataOffset = cpu_to_le16(offsetof 2310 - (struct crt_sd_ctxt, sd)); 2311 - buf->ccontext.DataLength = cpu_to_le32(sdlen); 2309 + buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); 2312 2310 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); 2313 2311 buf->ccontext.NameLength = cpu_to_le16(4); 2314 2312 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ ··· 2313 2319 buf->Name[2] = 'c'; 2314 2320 buf->Name[3] = 'D'; 2315 2321 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ 2322 + 2316 2323 /* 2317 2324 * ACL is "self relative" ie ACL is stored in contiguous block of memory 2318 2325 * and "DP" ie the DACL is present ··· 2321 2326 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); 2322 2327 2323 2328 /* offset owner, group and Sbz1 and SACL are all zero */ 2324 - buf->sd.OffsetDacl = cpu_to_le32(sizeof(struct smb3_sd)); 2325 - buf->acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2329 + buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2330 + /* Ship the ACL for now. we will copy it into buf later. */ 2331 + aclptr = ptr; 2332 + ptr += sizeof(struct cifs_acl); 2326 2333 2327 2334 /* create one ACE to hold the mode embedded in reserved special SID */ 2328 - pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf); 2329 - acelen = setup_special_mode_ACE(pace, (__u64)mode); 2335 + acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); 2336 + ptr += acelen; 2337 + acl_size = acelen + sizeof(struct smb3_acl); 2338 + ace_count = 1; 2330 2339 2331 2340 if (set_owner) { 2332 2341 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ 2333 - pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + (char *)buf)); 2334 - acelen += setup_special_user_owner_ACE(pace); 2335 - /* it does not appear necessary to add an ACE for the NFS group SID */ 2336 - buf->acl.AceCount = cpu_to_le16(3); 2337 - } else 2338 - buf->acl.AceCount = cpu_to_le16(2); 2342 + acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr); 2343 + ptr += acelen; 2344 + acl_size += acelen; 2345 + ace_count += 1; 2346 + } 2339 2347 2340 2348 /* and one more ACE to allow access for authenticated users */ 2341 - pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + 2342 - (char *)buf)); 2343 - acelen += setup_authusers_ACE(pace); 2349 + acelen = setup_authusers_ACE((struct cifs_ace *)ptr); 2350 + ptr += acelen; 2351 + acl_size += acelen; 2352 + ace_count += 1; 2344 2353 2345 - buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen); 2354 + acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2355 + acl.AclSize = cpu_to_le16(acl_size); 2356 + acl.AceCount = cpu_to_le16(ace_count); 2357 + memcpy(aclptr, &acl, sizeof(struct cifs_acl)); 2358 + 2359 + buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2360 + *len = ptr - (__u8 *)buf; 2346 2361 2347 2362 return buf; 2348 2363 }
-2
fs/cifs/smb2pdu.h
··· 963 963 struct create_context ccontext; 964 964 __u8 Name[8]; 965 965 struct smb3_sd sd; 966 - struct smb3_acl acl; 967 - /* Followed by at least 4 ACEs */ 968 966 } __packed; 969 967 970 968
+2 -1
fs/coredump.c
··· 229 229 */ 230 230 if (ispipe) { 231 231 if (isspace(*pat_ptr)) { 232 - was_space = true; 232 + if (cn->used != 0) 233 + was_space = true; 233 234 pat_ptr++; 234 235 continue; 235 236 } else if (was_space) {
+2 -1
fs/io_uring.c
··· 4499 4499 return -EFAULT; 4500 4500 if (clen < 0) 4501 4501 return -EINVAL; 4502 - sr->len = iomsg->iov[0].iov_len; 4502 + sr->len = clen; 4503 + iomsg->iov[0].iov_len = clen; 4503 4504 iomsg->iov = NULL; 4504 4505 } else { 4505 4506 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
+9
fs/nfs/Kconfig
··· 205 205 Choose Y here to disable the use of NFS over UDP. NFS over UDP 206 206 on modern networks (1Gb+) can lead to data corruption caused by 207 207 fragmentation during high loads. 208 + 209 + config NFS_V4_2_READ_PLUS 210 + bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation" 211 + depends on NFS_V4_2 212 + default n 213 + help 214 + This is intended for developers only. The READ_PLUS operation has 215 + been shown to have issues under specific conditions and should not 216 + be used in production.
+21 -6
fs/nfs/flexfilelayout/flexfilelayout.c
··· 838 838 struct nfs_pgio_mirror *pgm; 839 839 struct nfs4_ff_layout_mirror *mirror; 840 840 struct nfs4_pnfs_ds *ds; 841 - u32 ds_idx, i; 841 + u32 ds_idx; 842 842 843 843 retry: 844 844 ff_layout_pg_check_layout(pgio, req); ··· 864 864 goto retry; 865 865 } 866 866 867 - for (i = 0; i < pgio->pg_mirror_count; i++) { 868 - mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 869 - pgm = &pgio->pg_mirrors[i]; 870 - pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 871 - } 867 + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); 868 + pgm = &pgio->pg_mirrors[0]; 869 + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 872 870 873 871 pgio->pg_mirror_idx = ds_idx; 874 872 ··· 983 985 return 1; 984 986 } 985 987 988 + static u32 989 + ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) 990 + { 991 + u32 old = desc->pg_mirror_idx; 992 + 993 + desc->pg_mirror_idx = idx; 994 + return old; 995 + } 996 + 997 + static struct nfs_pgio_mirror * 998 + ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) 999 + { 1000 + return &desc->pg_mirrors[idx]; 1001 + } 1002 + 986 1003 static const struct nfs_pageio_ops ff_layout_pg_read_ops = { 987 1004 .pg_init = ff_layout_pg_init_read, 988 1005 .pg_test = pnfs_generic_pg_test, ··· 1011 998 .pg_doio = pnfs_generic_pg_writepages, 1012 999 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, 1013 1000 .pg_cleanup = pnfs_generic_pg_cleanup, 1001 + .pg_get_mirror = ff_layout_pg_get_mirror_write, 1002 + .pg_set_mirror = ff_layout_pg_set_mirror_write, 1014 1003 }; 1015 1004 1016 1005 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
+13 -8
fs/nfs/nfs42proc.c
··· 1241 1241 .rpc_resp = &res, 1242 1242 }; 1243 1243 u32 xdrlen; 1244 - int ret, np; 1244 + int ret, np, i; 1245 1245 1246 1246 1247 + ret = -ENOMEM; 1247 1248 res.scratch = alloc_page(GFP_KERNEL); 1248 1249 if (!res.scratch) 1249 - return -ENOMEM; 1250 + goto out; 1250 1251 1251 1252 xdrlen = nfs42_listxattr_xdrsize(buflen); 1252 1253 if (xdrlen > server->lxasize) ··· 1255 1254 np = xdrlen / PAGE_SIZE + 1; 1256 1255 1257 1256 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1258 - if (pages == NULL) { 1259 - __free_page(res.scratch); 1260 - return -ENOMEM; 1257 + if (!pages) 1258 + goto out_free_scratch; 1259 + for (i = 0; i < np; i++) { 1260 + pages[i] = alloc_page(GFP_KERNEL); 1261 + if (!pages[i]) 1262 + goto out_free_pages; 1261 1263 } 1262 1264 1263 1265 arg.xattr_pages = pages; ··· 1275 1271 *eofp = res.eof; 1276 1272 } 1277 1273 1274 + out_free_pages: 1278 1275 while (--np >= 0) { 1279 1276 if (pages[np]) 1280 1277 __free_page(pages[np]); 1281 1278 } 1282 - 1283 - __free_page(res.scratch); 1284 1279 kfree(pages); 1285 - 1280 + out_free_scratch: 1281 + __free_page(res.scratch); 1282 + out: 1286 1283 return ret; 1287 1284 1288 1285 }
-1
fs/nfs/nfs42xdr.c
··· 1528 1528 1529 1529 rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, 1530 1530 hdr.replen); 1531 - req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; 1532 1531 1533 1532 encode_nops(&hdr); 1534 1533 }
+1 -1
fs/nfs/nfs4file.c
··· 377 377 goto out_stateowner; 378 378 379 379 set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags); 380 - set_bit(NFS_OPEN_STATE, &ctx->state->flags); 381 380 memcpy(&ctx->state->open_stateid.other, &stateid->other, 382 381 NFS4_STATEID_OTHER_SIZE); 383 382 update_open_stateid(ctx->state, stateid, NULL, filep->f_mode); 383 + set_bit(NFS_OPEN_STATE, &ctx->state->flags); 384 384 385 385 nfs_file_set_open_context(filep, ctx); 386 386 put_nfs_open_context(ctx);
+1 -1
fs/nfs/nfs4proc.c
··· 5309 5309 nfs4_read_done_cb(task, hdr); 5310 5310 } 5311 5311 5312 - #ifdef CONFIG_NFS_V4_2 5312 + #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5313 5313 static void nfs42_read_plus_support(struct nfs_server *server, struct rpc_message *msg) 5314 5314 { 5315 5315 if (server->caps & NFS_CAP_READ_PLUS)
+27 -9
fs/nfs/pagelist.c
··· 31 31 static struct kmem_cache *nfs_page_cachep; 32 32 static const struct rpc_call_ops nfs_pgio_common_ops; 33 33 34 + static struct nfs_pgio_mirror * 35 + nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx) 36 + { 37 + if (desc->pg_ops->pg_get_mirror) 38 + return desc->pg_ops->pg_get_mirror(desc, idx); 39 + return &desc->pg_mirrors[0]; 40 + } 41 + 34 42 struct nfs_pgio_mirror * 35 43 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) 36 44 { 37 - return &desc->pg_mirrors[desc->pg_mirror_idx]; 45 + return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx); 38 46 } 39 47 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror); 48 + 49 + static u32 50 + nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx) 51 + { 52 + if (desc->pg_ops->pg_set_mirror) 53 + return desc->pg_ops->pg_set_mirror(desc, idx); 54 + return desc->pg_mirror_idx; 55 + } 40 56 41 57 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, 42 58 struct nfs_pgio_header *hdr, ··· 1275 1259 return; 1276 1260 1277 1261 for (midx = 0; midx < desc->pg_mirror_count; midx++) { 1278 - mirror = &desc->pg_mirrors[midx]; 1262 + mirror = nfs_pgio_get_mirror(desc, midx); 1279 1263 desc->pg_completion_ops->error_cleanup(&mirror->pg_list, 1280 1264 desc->pg_error); 1281 1265 } ··· 1309 1293 goto out_failed; 1310 1294 } 1311 1295 1312 - desc->pg_mirror_idx = midx; 1296 + nfs_pgio_set_current_mirror(desc, midx); 1313 1297 if (!nfs_pageio_add_request_mirror(desc, dupreq)) 1314 1298 goto out_cleanup_subreq; 1315 1299 } 1316 1300 1317 - desc->pg_mirror_idx = 0; 1301 + nfs_pgio_set_current_mirror(desc, 0); 1318 1302 if (!nfs_pageio_add_request_mirror(desc, req)) 1319 1303 goto out_failed; 1320 1304 ··· 1336 1320 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, 1337 1321 u32 mirror_idx) 1338 1322 { 1339 - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; 1340 - u32 restore_idx = desc->pg_mirror_idx; 1323 + struct nfs_pgio_mirror *mirror; 1324 + u32 restore_idx; 1341 1325 1342 - desc->pg_mirror_idx = mirror_idx; 1326 + restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx); 1327 + mirror = nfs_pgio_current_mirror(desc); 1328 + 1343 1329 for (;;) { 1344 1330 nfs_pageio_doio(desc); 1345 1331 if (desc->pg_error < 0 || !mirror->pg_recoalesce) ··· 1349 1331 if (!nfs_do_recoalesce(desc)) 1350 1332 break; 1351 1333 } 1352 - desc->pg_mirror_idx = restore_idx; 1334 + nfs_pgio_set_current_mirror(desc, restore_idx); 1353 1335 } 1354 1336 1355 1337 /* ··· 1423 1405 u32 midx; 1424 1406 1425 1407 for (midx = 0; midx < desc->pg_mirror_count; midx++) { 1426 - mirror = &desc->pg_mirrors[midx]; 1408 + mirror = nfs_pgio_get_mirror(desc, midx); 1427 1409 if (!list_empty(&mirror->pg_list)) { 1428 1410 prev = nfs_list_entry(mirror->pg_list.prev); 1429 1411 if (index != prev->wb_index + 1) {
+6 -2
fs/proc/task_mmu.c
··· 1599 1599 1600 1600 src = *ppos; 1601 1601 svpfn = src / PM_ENTRY_BYTES; 1602 - start_vaddr = svpfn << PAGE_SHIFT; 1603 1602 end_vaddr = mm->task_size; 1604 1603 1605 1604 /* watch out for wraparound */ 1606 - if (svpfn > mm->task_size >> PAGE_SHIFT) 1605 + start_vaddr = end_vaddr; 1606 + if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) 1607 + start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); 1608 + 1609 + /* Ensure the address is inside the task */ 1610 + if (start_vaddr > mm->task_size) 1607 1611 start_vaddr = end_vaddr; 1608 1612 1609 1613 /*
+27 -30
fs/seq_file.c
··· 168 168 ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) 169 169 { 170 170 struct seq_file *m = iocb->ki_filp->private_data; 171 - size_t size = iov_iter_count(iter); 172 171 size_t copied = 0; 173 172 size_t n; 174 173 void *p; 175 174 int err = 0; 175 + 176 + if (!iov_iter_count(iter)) 177 + return 0; 176 178 177 179 mutex_lock(&m->lock); 178 180 ··· 208 206 if (!m->buf) 209 207 goto Enomem; 210 208 } 211 - /* if not empty - flush it first */ 209 + // something left in the buffer - copy it out first 212 210 if (m->count) { 213 - n = min(m->count, size); 214 - if (copy_to_iter(m->buf + m->from, n, iter) != n) 215 - goto Efault; 211 + n = copy_to_iter(m->buf + m->from, m->count, iter); 216 212 m->count -= n; 217 213 m->from += n; 218 - size -= n; 219 214 copied += n; 220 - if (!size) 215 + if (m->count) // hadn't managed to copy everything 221 216 goto Done; 222 217 } 223 - /* we need at least one record in buffer */ 218 + // get a non-empty record in the buffer 224 219 m->from = 0; 225 220 p = m->op->start(m, &m->index); 226 221 while (1) { 227 222 err = PTR_ERR(p); 228 - if (!p || IS_ERR(p)) 223 + if (!p || IS_ERR(p)) // EOF or an error 229 224 break; 230 225 err = m->op->show(m, p); 231 - if (err < 0) 226 + if (err < 0) // hard error 232 227 break; 233 - if (unlikely(err)) 228 + if (unlikely(err)) // ->show() says "skip it" 234 229 m->count = 0; 235 - if (unlikely(!m->count)) { 230 + if (unlikely(!m->count)) { // empty record 236 231 p = m->op->next(m, p, &m->index); 237 232 continue; 238 233 } 239 - if (m->count < m->size) 234 + if (!seq_has_overflowed(m)) // got it 240 235 goto Fill; 236 + // need a bigger buffer 241 237 m->op->stop(m, p); 242 238 kvfree(m->buf); 243 239 m->count = 0; ··· 244 244 goto Enomem; 245 245 p = m->op->start(m, &m->index); 246 246 } 247 + // EOF or an error 247 248 m->op->stop(m, p); 248 249 m->count = 0; 249 250 goto Done; 250 251 Fill: 251 - /* they want more? let's try to get some more */ 252 + // one non-empty record is in the buffer; if they want more, 253 + // try to fit more in, but in any case we need to advance 254 + // the iterator once for every record shown. 252 255 while (1) { 253 256 size_t offs = m->count; 254 257 loff_t pos = m->index; ··· 262 259 m->op->next); 263 260 m->index++; 264 261 } 265 - if (!p || IS_ERR(p)) { 266 - err = PTR_ERR(p); 262 + if (!p || IS_ERR(p)) // no next record for us 267 263 break; 268 - } 269 - if (m->count >= size) 264 + if (m->count >= iov_iter_count(iter)) 270 265 break; 271 266 err = m->op->show(m, p); 272 - if (seq_has_overflowed(m) || err) { 267 + if (err > 0) { // ->show() says "skip it" 273 268 m->count = offs; 274 - if (likely(err <= 0)) 275 - break; 269 + } else if (err || seq_has_overflowed(m)) { 270 + m->count = offs; 271 + break; 276 272 } 277 273 } 278 274 m->op->stop(m, p); 279 - n = min(m->count, size); 280 - if (copy_to_iter(m->buf, n, iter) != n) 281 - goto Efault; 275 + n = copy_to_iter(m->buf, m->count, iter); 282 276 copied += n; 283 277 m->count -= n; 284 278 m->from = n; 285 279 Done: 286 - if (!copied) 287 - copied = err; 288 - else { 280 + if (unlikely(!copied)) { 281 + copied = m->count ? -EFAULT : err; 282 + } else { 289 283 iocb->ki_pos += copied; 290 284 m->read_pos += copied; 291 285 } ··· 290 290 return copied; 291 291 Enomem: 292 292 err = -ENOMEM; 293 - goto Done; 294 - Efault: 295 - err = -EFAULT; 296 293 goto Done; 297 294 } 298 295 EXPORT_SYMBOL(seq_read_iter);
+8 -6
fs/zonefs/super.c
··· 691 691 bio->bi_opf |= REQ_FUA; 692 692 693 693 ret = bio_iov_iter_get_pages(bio, from); 694 - if (unlikely(ret)) { 695 - bio_io_error(bio); 696 - return ret; 697 - } 694 + if (unlikely(ret)) 695 + goto out_release; 696 + 698 697 size = bio->bi_iter.bi_size; 699 - task_io_account_write(ret); 698 + task_io_account_write(size); 700 699 701 700 if (iocb->ki_flags & IOCB_HIPRI) 702 701 bio_set_polled(bio, iocb); 703 702 704 703 ret = submit_bio_wait(bio); 705 704 705 + zonefs_file_write_dio_end_io(iocb, size, ret, 0); 706 + 707 + out_release: 708 + bio_release_pages(bio, false); 706 709 bio_put(bio); 707 710 708 - zonefs_file_write_dio_end_io(iocb, size, ret, 0); 709 711 if (ret >= 0) { 710 712 iocb->ki_pos += size; 711 713 return size;
+9 -6
include/linux/blkdev.h
··· 1073 1073 * file system requests. 1074 1074 */ 1075 1075 static inline unsigned int blk_max_size_offset(struct request_queue *q, 1076 - sector_t offset) 1076 + sector_t offset, 1077 + unsigned int chunk_sectors) 1077 1078 { 1078 - unsigned int chunk_sectors = q->limits.chunk_sectors; 1079 - 1080 - if (!chunk_sectors) 1081 - return q->limits.max_sectors; 1079 + if (!chunk_sectors) { 1080 + if (q->limits.chunk_sectors) 1081 + chunk_sectors = q->limits.chunk_sectors; 1082 + else 1083 + return q->limits.max_sectors; 1084 + } 1082 1085 1083 1086 if (likely(is_power_of_2(chunk_sectors))) 1084 1087 chunk_sectors -= offset & (chunk_sectors - 1); ··· 1104 1101 req_op(rq) == REQ_OP_SECURE_ERASE) 1105 1102 return blk_queue_get_max_sectors(q, req_op(rq)); 1106 1103 1107 - return min(blk_max_size_offset(q, offset), 1104 + return min(blk_max_size_offset(q, offset, 0), 1108 1105 blk_queue_get_max_sectors(q, req_op(rq))); 1109 1106 } 1110 1107
+5
include/linux/build_bug.h
··· 77 77 #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) 78 78 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) 79 79 80 + #ifdef __GENKSYMS__ 81 + /* genksyms gets confused by _Static_assert */ 82 + #define _Static_assert(expr, ...) 83 + #endif 84 + 80 85 #endif /* _LINUX_BUILD_BUG_H */
+22
include/linux/elfcore.h
··· 104 104 #endif 105 105 } 106 106 107 + #if defined(CONFIG_UM) || defined(CONFIG_IA64) 107 108 /* 108 109 * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out 109 110 * extra segments containing the gate DSO contents. Dumping its ··· 119 118 extern int 120 119 elf_core_write_extra_data(struct coredump_params *cprm); 121 120 extern size_t elf_core_extra_data_size(void); 121 + #else 122 + static inline Elf_Half elf_core_extra_phdrs(void) 123 + { 124 + return 0; 125 + } 126 + 127 + static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) 128 + { 129 + return 1; 130 + } 131 + 132 + static inline int elf_core_write_extra_data(struct coredump_params *cprm) 133 + { 134 + return 1; 135 + } 136 + 137 + static inline size_t elf_core_extra_data_size(void) 138 + { 139 + return 0; 140 + } 141 + #endif 122 142 123 143 #endif /* _LINUX_ELFCORE_H */
+10 -2
include/linux/irqdomain.h
··· 384 384 extern void irq_domain_disassociate(struct irq_domain *domain, 385 385 unsigned int irq); 386 386 387 - extern unsigned int irq_create_mapping(struct irq_domain *host, 388 - irq_hw_number_t hwirq); 387 + extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, 388 + irq_hw_number_t hwirq, 389 + const struct irq_affinity_desc *affinity); 389 390 extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); 390 391 extern void irq_dispose_mapping(unsigned int virq); 392 + 393 + static inline unsigned int irq_create_mapping(struct irq_domain *host, 394 + irq_hw_number_t hwirq) 395 + { 396 + return irq_create_mapping_affinity(host, hwirq, NULL); 397 + } 398 + 391 399 392 400 /** 393 401 * irq_linear_revmap() - Find a linux irq from a hw irq number.
+4 -1
include/linux/netfilter/x_tables.h
··· 227 227 unsigned int valid_hooks; 228 228 229 229 /* Man behind the curtain... */ 230 - struct xt_table_info *private; 230 + struct xt_table_info __rcu *private; 231 231 232 232 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 233 233 struct module *me; ··· 447 447 } 448 448 449 449 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); 450 + 451 + struct xt_table_info 452 + *xt_table_get_private_protected(const struct xt_table *table); 450 453 451 454 #ifdef CONFIG_COMPAT 452 455 #include <net/compat.h>
+4
include/linux/nfs_page.h
··· 55 55 unsigned short wb_nio; /* Number of I/O attempts */ 56 56 }; 57 57 58 + struct nfs_pgio_mirror; 58 59 struct nfs_pageio_descriptor; 59 60 struct nfs_pageio_ops { 60 61 void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); ··· 65 64 unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, 66 65 struct nfs_page *); 67 66 void (*pg_cleanup)(struct nfs_pageio_descriptor *); 67 + struct nfs_pgio_mirror * 68 + (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32); 69 + u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32); 68 70 }; 69 71 70 72 struct nfs_rw_ops {
+1 -1
include/linux/security.h
··· 869 869 870 870 static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) 871 871 { 872 - return -EOPNOTSUPP; 872 + return cap_inode_getsecurity(inode, name, buffer, alloc); 873 873 } 874 874 875 875 static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
+1
include/linux/stmmac.h
··· 170 170 int unicast_filter_entries; 171 171 int tx_fifo_size; 172 172 int rx_fifo_size; 173 + u32 addr64; 173 174 u32 rx_queues_to_use; 174 175 u32 tx_queues_to_use; 175 176 u8 rx_sched_algorithm;
+4
include/linux/tty.h
··· 306 306 struct termiox *termiox; /* May be NULL for unsupported */ 307 307 char name[64]; 308 308 struct pid *pgrp; /* Protected by ctrl lock */ 309 + /* 310 + * Writes protected by both ctrl lock and legacy mutex, readers must use 311 + * at least one of them. 312 + */ 309 313 struct pid *session; 310 314 unsigned long flags; 311 315 int count;
-1
include/linux/zsmalloc.h
··· 20 20 * zsmalloc mapping modes 21 21 * 22 22 * NOTE: These only make a difference when a mapped object spans pages. 23 - * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected. 24 23 */ 25 24 enum zs_mapmode { 26 25 ZS_MM_RW, /* normal read-write mapping */
-2
include/net/bonding.h
··· 86 86 #define bond_for_each_slave_rcu(bond, pos, iter) \ 87 87 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 88 88 89 - #ifdef CONFIG_XFRM_OFFLOAD 90 89 #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ 91 90 NETIF_F_GSO_ESP) 92 - #endif /* CONFIG_XFRM_OFFLOAD */ 93 91 94 92 #ifdef CONFIG_NET_POLL_CONTROLLER 95 93 extern atomic_t netpoll_block_tx;
+4
include/net/netfilter/nf_tables.h
··· 1524 1524 void nft_chain_route_fini(void); 1525 1525 1526 1526 void nf_tables_trans_destroy_flush_work(void); 1527 + 1528 + int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); 1529 + __be64 nf_jiffies64_to_msecs(u64 input); 1530 + 1527 1531 #endif /* _NET_NF_TABLES_H */
-2
include/net/xdp.h
··· 256 256 }; 257 257 258 258 struct netdev_bpf; 259 - bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, 260 - struct netdev_bpf *bpf); 261 259 void xdp_attachment_setup(struct xdp_attachment_info *info, 262 260 struct netdev_bpf *bpf); 263 261
+3
include/soc/mscc/ocelot.h
··· 621 621 /* Keep track of the vlan port masks */ 622 622 u32 vlan_mask[VLAN_N_VID]; 623 623 624 + /* Switches like VSC9959 have flooding per traffic class */ 625 + int num_flooding_pgids; 626 + 624 627 /* In tables like ANA:PORT and the ANA:PGID:PGID mask, 625 628 * the CPU is located after the physical ports (at the 626 629 * num_phys_ports index).
+2 -2
include/uapi/linux/bpf.h
··· 3977 3977 FN(seq_printf_btf), \ 3978 3978 FN(skb_cgroup_classid), \ 3979 3979 FN(redirect_neigh), \ 3980 - FN(bpf_per_cpu_ptr), \ 3981 - FN(bpf_this_cpu_ptr), \ 3980 + FN(per_cpu_ptr), \ 3981 + FN(this_cpu_ptr), \ 3982 3982 FN(redirect_peer), \ 3983 3983 FN(task_storage_get), \ 3984 3984 FN(task_storage_delete), \
+10
init/Kconfig
··· 47 47 int 48 48 default $(shell,$(srctree)/scripts/clang-version.sh $(CC)) 49 49 50 + config LLD_VERSION 51 + int 52 + default $(shell,$(srctree)/scripts/lld-version.sh $(LD)) 53 + 50 54 config CC_CAN_LINK 51 55 bool 52 56 default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT ··· 1351 1347 silently broken kernel if the required annotations are not 1352 1348 present. This option is not well tested yet, so use at your 1353 1349 own risk. 1350 + 1351 + config LD_ORPHAN_WARN 1352 + def_bool y 1353 + depends on ARCH_WANT_LD_ORPHAN_WARN 1354 + depends on !LD_IS_LLD || LLD_VERSION >= 110000 1355 + depends on $(ld-option,--orphan-handling=warn) 1354 1356 1355 1357 config SYSCTL 1356 1358 bool
+1 -1
init/initramfs.c
··· 535 535 #include <linux/initrd.h> 536 536 #include <linux/kexec.h> 537 537 538 - void __weak free_initrd_mem(unsigned long start, unsigned long end) 538 + void __weak __init free_initrd_mem(unsigned long start, unsigned long end) 539 539 { 540 540 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK 541 541 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
-1
kernel/Makefile
··· 97 97 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 98 98 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o 99 99 obj-$(CONFIG_LATENCYTOP) += latencytop.o 100 - obj-$(CONFIG_ELFCORE) += elfcore.o 101 100 obj-$(CONFIG_FUNCTION_TRACER) += trace/ 102 101 obj-$(CONFIG_TRACING) += trace/ 103 102 obj-$(CONFIG_TRACE_CLOCK) += trace/
+2 -2
kernel/bpf/helpers.c
··· 730 730 return &bpf_snprintf_btf_proto; 731 731 case BPF_FUNC_jiffies64: 732 732 return &bpf_jiffies64_proto; 733 - case BPF_FUNC_bpf_per_cpu_ptr: 733 + case BPF_FUNC_per_cpu_ptr: 734 734 return &bpf_per_cpu_ptr_proto; 735 - case BPF_FUNC_bpf_this_cpu_ptr: 735 + case BPF_FUNC_this_cpu_ptr: 736 736 return &bpf_this_cpu_ptr_proto; 737 737 default: 738 738 break;
+5 -5
kernel/bpf/verifier.c
··· 1299 1299 1300 1300 static bool __reg64_bound_s32(s64 a) 1301 1301 { 1302 - if (a > S32_MIN && a < S32_MAX) 1303 - return true; 1304 - return false; 1302 + return a > S32_MIN && a < S32_MAX; 1305 1303 } 1306 1304 1307 1305 static bool __reg64_bound_u32(u64 a) ··· 1313 1315 { 1314 1316 __mark_reg32_unbounded(reg); 1315 1317 1316 - if (__reg64_bound_s32(reg->smin_value)) 1318 + if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 1317 1319 reg->s32_min_value = (s32)reg->smin_value; 1318 - if (__reg64_bound_s32(reg->smax_value)) 1319 1320 reg->s32_max_value = (s32)reg->smax_value; 1321 + } 1320 1322 if (__reg64_bound_u32(reg->umin_value)) 1321 1323 reg->u32_min_value = (u32)reg->umin_value; 1322 1324 if (__reg64_bound_u32(reg->umax_value)) ··· 4940 4942 4941 4943 ret_reg->smax_value = meta->msize_max_value; 4942 4944 ret_reg->s32_max_value = meta->msize_max_value; 4945 + ret_reg->smin_value = -MAX_ERRNO; 4946 + ret_reg->s32_min_value = -MAX_ERRNO; 4943 4947 __reg_deduce_bounds(ret_reg); 4944 4948 __reg_bound_offset(ret_reg); 4945 4949 __update_reg_bounds(ret_reg);
+5 -1
kernel/cpu.c
··· 815 815 } 816 816 817 817 #ifdef CONFIG_HOTPLUG_CPU 818 + #ifndef arch_clear_mm_cpumask_cpu 819 + #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) 820 + #endif 821 + 818 822 /** 819 823 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 820 824 * @cpu: a CPU id ··· 854 850 t = find_lock_task_mm(p); 855 851 if (!t) 856 852 continue; 857 - cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 853 + arch_clear_mm_cpumask_cpu(cpu, t->mm); 858 854 task_unlock(t); 859 855 } 860 856 rcu_read_unlock();
-26
kernel/elfcore.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/elf.h> 3 - #include <linux/fs.h> 4 - #include <linux/mm.h> 5 - #include <linux/binfmts.h> 6 - #include <linux/elfcore.h> 7 - 8 - Elf_Half __weak elf_core_extra_phdrs(void) 9 - { 10 - return 0; 11 - } 12 - 13 - int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) 14 - { 15 - return 1; 16 - } 17 - 18 - int __weak elf_core_write_extra_data(struct coredump_params *cprm) 19 - { 20 - return 1; 21 - } 22 - 23 - size_t __weak elf_core_extra_data_size(void) 24 - { 25 - return 0; 26 - }
+8 -5
kernel/irq/irqdomain.c
··· 624 624 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 625 625 626 626 /** 627 - * irq_create_mapping() - Map a hardware interrupt into linux irq space 627 + * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space 628 628 * @domain: domain owning this hardware interrupt or NULL for default domain 629 629 * @hwirq: hardware irq number in that domain space 630 + * @affinity: irq affinity 630 631 * 631 632 * Only one mapping per hardware interrupt is permitted. Returns a linux 632 633 * irq number. 633 634 * If the sense/trigger is to be specified, set_irq_type() should be called 634 635 * on the number returned from that call. 635 636 */ 636 - unsigned int irq_create_mapping(struct irq_domain *domain, 637 - irq_hw_number_t hwirq) 637 + unsigned int irq_create_mapping_affinity(struct irq_domain *domain, 638 + irq_hw_number_t hwirq, 639 + const struct irq_affinity_desc *affinity) 638 640 { 639 641 struct device_node *of_node; 640 642 int virq; ··· 662 660 } 663 661 664 662 /* Allocate a virtual interrupt number */ 665 - virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); 663 + virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), 664 + affinity); 666 665 if (virq <= 0) { 667 666 pr_debug("-> virq allocation failed\n"); 668 667 return 0; ··· 679 676 680 677 return virq; 681 678 } 682 - EXPORT_SYMBOL_GPL(irq_create_mapping); 679 + EXPORT_SYMBOL_GPL(irq_create_mapping_affinity); 683 680 684 681 /** 685 682 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
+2 -2
kernel/trace/bpf_trace.c
··· 1362 1362 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1363 1363 case BPF_FUNC_snprintf_btf: 1364 1364 return &bpf_snprintf_btf_proto; 1365 - case BPF_FUNC_bpf_per_cpu_ptr: 1365 + case BPF_FUNC_per_cpu_ptr: 1366 1366 return &bpf_per_cpu_ptr_proto; 1367 - case BPF_FUNC_bpf_this_cpu_ptr: 1367 + case BPF_FUNC_this_cpu_ptr: 1368 1368 return &bpf_this_cpu_ptr_proto; 1369 1369 default: 1370 1370 return NULL;
+8 -5
kernel/trace/trace.c
··· 163 163 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 164 164 165 165 int tracing_set_tracer(struct trace_array *tr, const char *buf); 166 - static void ftrace_trace_userstack(struct trace_buffer *buffer, 166 + static void ftrace_trace_userstack(struct trace_array *tr, 167 + struct trace_buffer *buffer, 167 168 unsigned long flags, int pc); 168 169 169 170 #define MAX_TRACER_SIZE 100 ··· 2871 2870 * two. They are not that meaningful. 2872 2871 */ 2873 2872 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); 2874 - ftrace_trace_userstack(buffer, flags, pc); 2873 + ftrace_trace_userstack(tr, buffer, flags, pc); 2875 2874 } 2876 2875 2877 2876 /* ··· 3057 3056 static DEFINE_PER_CPU(int, user_stack_count); 3058 3057 3059 3058 static void 3060 - ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc) 3059 + ftrace_trace_userstack(struct trace_array *tr, 3060 + struct trace_buffer *buffer, unsigned long flags, int pc) 3061 3061 { 3062 3062 struct trace_event_call *call = &event_user_stack; 3063 3063 struct ring_buffer_event *event; 3064 3064 struct userstack_entry *entry; 3065 3065 3066 - if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) 3066 + if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3067 3067 return; 3068 3068 3069 3069 /* ··· 3103 3101 preempt_enable(); 3104 3102 } 3105 3103 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3106 - static void ftrace_trace_userstack(struct trace_buffer *buffer, 3104 + static void ftrace_trace_userstack(struct trace_array *tr, 3105 + struct trace_buffer *buffer, 3107 3106 unsigned long flags, int pc) 3108 3107 { 3109 3108 }
+2 -1
lib/Makefile
··· 107 107 # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS 108 108 # get appended last to CFLAGS and thus override those previous compiler options. 109 109 # 110 - FPU_CFLAGS := -mhard-float -msse -msse2 110 + FPU_CFLAGS := -msse -msse2 111 111 ifdef CONFIG_CC_IS_GCC 112 112 # Stack alignment mismatch, proceed with caution. 113 113 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 120 120 # -mpreferred-stack-boundary=3 is not between 4 and 12 121 121 # 122 122 # can be triggered. Otherwise gcc doesn't complain. 123 + FPU_CFLAGS += -mhard-float 123 124 FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4) 124 125 endif 125 126
+3
lib/zlib_dfltcc/dfltcc_inflate.c
··· 4 4 #include "dfltcc_util.h" 5 5 #include "dfltcc.h" 6 6 #include <asm/setup.h> 7 + #include <linux/export.h> 7 8 #include <linux/zutil.h> 8 9 9 10 /* ··· 30 29 return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) && 31 30 is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0); 32 31 } 32 + EXPORT_SYMBOL(dfltcc_can_inflate); 33 33 34 34 static int dfltcc_was_inflate_used( 35 35 z_streamp strm ··· 149 147 return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ? 150 148 DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE; 151 149 } 150 + EXPORT_SYMBOL(dfltcc_inflate);
-13
mm/Kconfig
··· 707 707 returned by an alloc(). This handle must be mapped in order to 708 708 access the allocated space. 709 709 710 - config ZSMALLOC_PGTABLE_MAPPING 711 - bool "Use page table mapping to access object in zsmalloc" 712 - depends on ZSMALLOC=y 713 - help 714 - By default, zsmalloc uses a copy-based object mapping method to 715 - access allocations that span two pages. However, if a particular 716 - architecture (ex, ARM) performs VM mapping faster than copying, 717 - then you should select this. This causes zsmalloc to use page table 718 - mapping rather than copying for object mapping. 719 - 720 - You can check speed with zsmalloc benchmark: 721 - https://github.com/spartacus06/zsmapbench 722 - 723 710 config ZSMALLOC_STAT 724 711 bool "Export zsmalloc statistics" 725 712 depends on ZSMALLOC
+1
mm/hugetlb.c
··· 1216 1216 } 1217 1217 1218 1218 set_compound_order(page, 0); 1219 + page[1].compound_nr = 0; 1219 1220 __ClearPageHead(page); 1220 1221 } 1221 1222
+3 -5
mm/hugetlb_cgroup.c
··· 82 82 83 83 for (idx = 0; idx < hugetlb_max_hstate; idx++) { 84 84 if (page_counter_read( 85 - hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) || 86 - page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd( 87 - h_cg, idx))) { 85 + hugetlb_cgroup_counter_from_cgroup(h_cg, idx))) 88 86 return true; 89 - } 90 87 } 91 88 return false; 92 89 } ··· 199 202 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); 200 203 struct hstate *h; 201 204 struct page *page; 202 - int idx = 0; 205 + int idx; 203 206 204 207 do { 208 + idx = 0; 205 209 for_each_hstate(h) { 206 210 spin_lock(&hugetlb_lock); 207 211 list_for_each_entry(page, &h->hugepage_activelist, lru)
+39
mm/kasan/quarantine.c
··· 29 29 #include <linux/srcu.h> 30 30 #include <linux/string.h> 31 31 #include <linux/types.h> 32 + #include <linux/cpuhotplug.h> 32 33 33 34 #include "../slab.h" 34 35 #include "kasan.h" ··· 44 43 struct qlist_node *head; 45 44 struct qlist_node *tail; 46 45 size_t bytes; 46 + bool offline; 47 47 }; 48 48 49 49 #define QLIST_INIT { NULL, NULL, 0 } ··· 190 188 local_irq_save(flags); 191 189 192 190 q = this_cpu_ptr(&cpu_quarantine); 191 + if (q->offline) { 192 + local_irq_restore(flags); 193 + return; 194 + } 193 195 qlist_put(q, &info->quarantine_link, cache->size); 194 196 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { 195 197 qlist_move_all(q, &temp); ··· 334 328 335 329 synchronize_srcu(&remove_cache_srcu); 336 330 } 331 + 332 + static int kasan_cpu_online(unsigned int cpu) 333 + { 334 + this_cpu_ptr(&cpu_quarantine)->offline = false; 335 + return 0; 336 + } 337 + 338 + static int kasan_cpu_offline(unsigned int cpu) 339 + { 340 + struct qlist_head *q; 341 + 342 + q = this_cpu_ptr(&cpu_quarantine); 343 + /* Ensure the ordering between the writing to q->offline and 344 + * qlist_free_all. Otherwise, cpu_quarantine may be corrupted 345 + * by interrupt. 346 + */ 347 + WRITE_ONCE(q->offline, true); 348 + barrier(); 349 + qlist_free_all(q, NULL); 350 + return 0; 351 + } 352 + 353 + static int __init kasan_cpu_quarantine_init(void) 354 + { 355 + int ret = 0; 356 + 357 + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", 358 + kasan_cpu_online, kasan_cpu_offline); 359 + if (ret < 0) 360 + pr_err("kasan cpu quarantine register failed [%d]\n", ret); 361 + return ret; 362 + } 363 + late_initcall(kasan_cpu_quarantine_init);
+5 -5
mm/list_lru.c
··· 534 534 struct list_lru_node *nlru = &lru->node[nid]; 535 535 int dst_idx = dst_memcg->kmemcg_id; 536 536 struct list_lru_one *src, *dst; 537 - bool set; 538 537 539 538 /* 540 539 * Since list_lru_{add,del} may be called under an IRQ-safe lock, ··· 545 546 dst = list_lru_from_memcg_idx(nlru, dst_idx); 546 547 547 548 list_splice_init(&src->list, &dst->list); 548 - set = (!dst->nr_items && src->nr_items); 549 - dst->nr_items += src->nr_items; 550 - if (set) 549 + 550 + if (src->nr_items) { 551 + dst->nr_items += src->nr_items; 551 552 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); 552 - src->nr_items = 0; 553 + src->nr_items = 0; 554 + } 553 555 554 556 spin_unlock_irq(&nlru->lock); 555 557 }
+1 -2
mm/madvise.c
··· 1204 1204 goto put_pid; 1205 1205 } 1206 1206 1207 - if (task->mm != current->mm && 1208 - !process_madvise_behavior_valid(behavior)) { 1207 + if (!process_madvise_behavior_valid(behavior)) { 1209 1208 ret = -EINVAL; 1210 1209 goto release_task; 1211 1210 }
+12 -14
mm/mmap.c
··· 1808 1808 if (error) 1809 1809 goto unmap_and_free_vma; 1810 1810 1811 + /* Can addr have changed?? 1812 + * 1813 + * Answer: Yes, several device drivers can do it in their 1814 + * f_op->mmap method. -DaveM 1815 + * Bug: If addr is changed, prev, rb_link, rb_parent should 1816 + * be updated for vma_link() 1817 + */ 1818 + WARN_ON_ONCE(addr != vma->vm_start); 1819 + 1820 + addr = vma->vm_start; 1821 + 1811 1822 /* If vm_flags changed after call_mmap(), we should try merge vma again 1812 1823 * as we may succeed this time. 1813 1824 */ ··· 1833 1822 fput(vma->vm_file); 1834 1823 vm_area_free(vma); 1835 1824 vma = merge; 1836 - /* Update vm_flags and possible addr to pick up the change. We don't 1837 - * warn here if addr changed as the vma is not linked by vma_link(). 1838 - */ 1839 - addr = vma->vm_start; 1825 + /* Update vm_flags to pick up the change. */ 1840 1826 vm_flags = vma->vm_flags; 1841 1827 goto unmap_writable; 1842 1828 } 1843 1829 } 1844 1830 1845 - /* Can addr have changed?? 1846 - * 1847 - * Answer: Yes, several device drivers can do it in their 1848 - * f_op->mmap method. -DaveM 1849 - * Bug: If addr is changed, prev, rb_link, rb_parent should 1850 - * be updated for vma_link() 1851 - */ 1852 - WARN_ON_ONCE(addr != vma->vm_start); 1853 - 1854 - addr = vma->vm_start; 1855 1831 vm_flags = vma->vm_flags; 1856 1832 } else if (vm_flags & VM_SHARED) { 1857 1833 error = shmem_zero_setup(vma);
+24 -16
mm/slab.h
··· 257 257 return s->size + sizeof(struct obj_cgroup *); 258 258 } 259 259 260 - static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, 261 - size_t objects, 262 - gfp_t flags) 260 + /* 261 + * Returns false if the allocation should fail. 262 + */ 263 + static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 264 + struct obj_cgroup **objcgp, 265 + size_t objects, gfp_t flags) 263 266 { 264 267 struct obj_cgroup *objcg; 265 268 269 + if (!memcg_kmem_enabled()) 270 + return true; 271 + 272 + if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 273 + return true; 274 + 266 275 objcg = get_obj_cgroup_from_current(); 267 276 if (!objcg) 268 - return NULL; 277 + return true; 269 278 270 279 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { 271 280 obj_cgroup_put(objcg); 272 - return NULL; 281 + return false; 273 282 } 274 283 275 - return objcg; 284 + *objcgp = objcg; 285 + return true; 276 286 } 277 287 278 288 static inline void mod_objcg_state(struct obj_cgroup *objcg, ··· 308 298 unsigned long off; 309 299 size_t i; 310 300 311 - if (!objcg) 301 + if (!memcg_kmem_enabled() || !objcg) 312 302 return; 313 303 314 304 flags &= ~__GFP_ACCOUNT; ··· 390 380 { 391 381 } 392 382 393 - static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, 394 - size_t objects, 395 - gfp_t flags) 383 + static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 384 + struct obj_cgroup **objcgp, 385 + size_t objects, gfp_t flags) 396 386 { 397 - return NULL; 387 + return true; 398 388 } 399 389 400 390 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, ··· 498 488 if (should_failslab(s, flags)) 499 489 return NULL; 500 490 501 - if (memcg_kmem_enabled() && 502 - ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 503 - *objcgp = memcg_slab_pre_alloc_hook(s, size, flags); 491 + if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) 492 + return NULL; 504 493 505 494 return s; 506 495 } ··· 518 509 s->flags, flags); 519 510 } 520 511 521 - if (memcg_kmem_enabled()) 522 - memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 512 + memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 523 513 } 524 514 525 515 #ifndef CONFIG_SLOB
+3 -1
mm/swapfile.c
··· 2867 2867 static struct swap_info_struct *alloc_swap_info(void) 2868 2868 { 2869 2869 struct swap_info_struct *p; 2870 + struct swap_info_struct *defer = NULL; 2870 2871 unsigned int type; 2871 2872 int i; 2872 2873 ··· 2896 2895 smp_wmb(); 2897 2896 WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); 2898 2897 } else { 2899 - kvfree(p); 2898 + defer = p; 2900 2899 p = swap_info[type]; 2901 2900 /* 2902 2901 * Do not memset this entry: a racing procfs swap_next() ··· 2909 2908 plist_node_init(&p->avail_lists[i], 0); 2910 2909 p->flags = SWP_USED; 2911 2910 spin_unlock(&swap_lock); 2911 + kvfree(defer); 2912 2912 spin_lock_init(&p->lock); 2913 2913 spin_lock_init(&p->cont_lock); 2914 2914
-54
mm/zsmalloc.c
··· 293 293 }; 294 294 295 295 struct mapping_area { 296 - #ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING 297 - struct vm_struct *vm; /* vm area for mapping object that span pages */ 298 - #else 299 296 char *vm_buf; /* copy buffer for objects that span pages */ 300 - #endif 301 297 char *vm_addr; /* address of kmap_atomic()'ed pages */ 302 298 enum zs_mapmode vm_mm; /* mapping mode */ 303 299 }; ··· 1109 1113 return zspage; 1110 1114 } 1111 1115 1112 - #ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING 1113 - static inline int __zs_cpu_up(struct mapping_area *area) 1114 - { 1115 - /* 1116 - * Make sure we don't leak memory if a cpu UP notification 1117 - * and zs_init() race and both call zs_cpu_up() on the same cpu 1118 - */ 1119 - if (area->vm) 1120 - return 0; 1121 - area->vm = get_vm_area(PAGE_SIZE * 2, 0); 1122 - if (!area->vm) 1123 - return -ENOMEM; 1124 - 1125 - /* 1126 - * Populate ptes in advance to avoid pte allocation with GFP_KERNEL 1127 - * in non-preemtible context of zs_map_object. 1128 - */ 1129 - return apply_to_page_range(&init_mm, (unsigned long)area->vm->addr, 1130 - PAGE_SIZE * 2, NULL, NULL); 1131 - } 1132 - 1133 - static inline void __zs_cpu_down(struct mapping_area *area) 1134 - { 1135 - if (area->vm) 1136 - free_vm_area(area->vm); 1137 - area->vm = NULL; 1138 - } 1139 - 1140 - static inline void *__zs_map_object(struct mapping_area *area, 1141 - struct page *pages[2], int off, int size) 1142 - { 1143 - unsigned long addr = (unsigned long)area->vm->addr; 1144 - 1145 - BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0); 1146 - area->vm_addr = area->vm->addr; 1147 - return area->vm_addr + off; 1148 - } 1149 - 1150 - static inline void __zs_unmap_object(struct mapping_area *area, 1151 - struct page *pages[2], int off, int size) 1152 - { 1153 - unsigned long addr = (unsigned long)area->vm_addr; 1154 - 1155 - unmap_kernel_range(addr, PAGE_SIZE * 2); 1156 - } 1157 - 1158 - #else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */ 1159 - 1160 1116 static inline int __zs_cpu_up(struct mapping_area *area) 1161 1117 { 1162 1118 /* ··· 1188 1240 /* enable page faults to match kunmap_atomic() return conditions */ 1189 1241 pagefault_enable(); 1190 1242 } 1191 - 1192 - #endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */ 1193 1243 1194 1244 static int zs_cpu_prepare(unsigned int cpu) 1195 1245 {
+6
net/bridge/br_device.c
··· 173 173 br_stp_enable_bridge(br); 174 174 br_multicast_open(br); 175 175 176 + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 177 + br_multicast_join_snoopers(br); 178 + 176 179 return 0; 177 180 } 178 181 ··· 195 192 196 193 br_stp_disable_bridge(br); 197 194 br_multicast_stop(br); 195 + 196 + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 197 + br_multicast_leave_snoopers(br); 198 198 199 199 netif_stop_queue(dev); 200 200
+25 -9
net/bridge/br_multicast.c
··· 3291 3291 } 3292 3292 #endif 3293 3293 3294 - static void br_multicast_join_snoopers(struct net_bridge *br) 3294 + void br_multicast_join_snoopers(struct net_bridge *br) 3295 3295 { 3296 3296 br_ip4_multicast_join_snoopers(br); 3297 3297 br_ip6_multicast_join_snoopers(br); ··· 3322 3322 } 3323 3323 #endif 3324 3324 3325 - static void br_multicast_leave_snoopers(struct net_bridge *br) 3325 + void br_multicast_leave_snoopers(struct net_bridge *br) 3326 3326 { 3327 3327 br_ip4_multicast_leave_snoopers(br); 3328 3328 br_ip6_multicast_leave_snoopers(br); ··· 3341 3341 3342 3342 void br_multicast_open(struct net_bridge *br) 3343 3343 { 3344 - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3345 - br_multicast_join_snoopers(br); 3346 - 3347 3344 __br_multicast_open(br, &br->ip4_own_query); 3348 3345 #if IS_ENABLED(CONFIG_IPV6) 3349 3346 __br_multicast_open(br, &br->ip6_own_query); ··· 3356 3359 del_timer_sync(&br->ip6_other_query.timer); 3357 3360 del_timer_sync(&br->ip6_own_query.timer); 3358 3361 #endif 3359 - 3360 - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3361 - br_multicast_leave_snoopers(br); 3362 3362 } 3363 3363 3364 3364 void br_multicast_dev_del(struct net_bridge *br) ··· 3486 3492 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 3487 3493 { 3488 3494 struct net_bridge_port *port; 3495 + bool change_snoopers = false; 3489 3496 3490 3497 spin_lock_bh(&br->multicast_lock); 3491 3498 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) ··· 3495 3500 br_mc_disabled_update(br->dev, val); 3496 3501 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 3497 3502 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 3498 - br_multicast_leave_snoopers(br); 3503 + change_snoopers = true; 3499 3504 goto unlock; 3500 3505 } 3501 3506 ··· 3506 3511 list_for_each_entry(port, &br->port_list, list) 3507 3512 __br_multicast_enable_port(port); 3508 3513 3514 + change_snoopers = true; 3515 + 3509 3516 unlock: 3510 3517 spin_unlock_bh(&br->multicast_lock); 3518 + 3519 + /* br_multicast_join_snoopers has the potential to cause 3520 + * an MLD Report/Leave to be delivered to br_multicast_rcv, 3521 + * which would in turn call br_multicast_add_group, which would 3522 + * attempt to acquire multicast_lock. This function should be 3523 + * called after the lock has been released to avoid deadlocks on 3524 + * multicast_lock. 3525 + * 3526 + * br_multicast_leave_snoopers does not have the problem since 3527 + * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 3528 + * returns without calling br_multicast_ipv4/6_rcv if it's not 3529 + * enabled. Moved both functions out just for symmetry. 3530 + */ 3531 + if (change_snoopers) { 3532 + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3533 + br_multicast_join_snoopers(br); 3534 + else 3535 + br_multicast_leave_snoopers(br); 3536 + } 3511 3537 3512 3538 return 0; 3513 3539 }
+10
net/bridge/br_private.h
··· 797 797 void br_multicast_enable_port(struct net_bridge_port *port); 798 798 void br_multicast_disable_port(struct net_bridge_port *port); 799 799 void br_multicast_init(struct net_bridge *br); 800 + void br_multicast_join_snoopers(struct net_bridge *br); 801 + void br_multicast_leave_snoopers(struct net_bridge *br); 800 802 void br_multicast_open(struct net_bridge *br); 801 803 void br_multicast_stop(struct net_bridge *br); 802 804 void br_multicast_dev_del(struct net_bridge *br); ··· 979 977 } 980 978 981 979 static inline void br_multicast_init(struct net_bridge *br) 980 + { 981 + } 982 + 983 + static inline void br_multicast_join_snoopers(struct net_bridge *br) 984 + { 985 + } 986 + 987 + static inline void br_multicast_leave_snoopers(struct net_bridge *br) 982 988 { 983 989 } 984 990
+3 -1
net/bridge/br_vlan.c
··· 266 266 } 267 267 268 268 masterv = br_vlan_get_master(br, v->vid, extack); 269 - if (!masterv) 269 + if (!masterv) { 270 + err = -ENOMEM; 270 271 goto out_filt; 272 + } 271 273 v->brvlan = masterv; 272 274 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { 273 275 v->stats =
+3
net/can/isotp.c
··· 1173 1173 if (level != SOL_CAN_ISOTP) 1174 1174 return -EINVAL; 1175 1175 1176 + if (so->bound) 1177 + return -EISCONN; 1178 + 1176 1179 switch (optname) { 1177 1180 case CAN_ISOTP_OPTS: 1178 1181 if (optlen != sizeof(struct can_isotp_options))
+20 -2
net/core/dev.c
··· 8958 8958 return dev->xdp_state[mode].prog; 8959 8959 } 8960 8960 8961 + static u8 dev_xdp_prog_count(struct net_device *dev) 8962 + { 8963 + u8 count = 0; 8964 + int i; 8965 + 8966 + for (i = 0; i < __MAX_XDP_MODE; i++) 8967 + if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 8968 + count++; 8969 + return count; 8970 + } 8971 + 8961 8972 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 8962 8973 { 8963 8974 struct bpf_prog *prog = dev_xdp_prog(dev, mode); ··· 9059 9048 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9060 9049 struct bpf_prog *old_prog, u32 flags) 9061 9050 { 9051 + unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9062 9052 struct bpf_prog *cur_prog; 9063 9053 enum bpf_xdp_mode mode; 9064 9054 bpf_op_t bpf_op; ··· 9075 9063 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9076 9064 return -EINVAL; 9077 9065 } 9078 - /* just one XDP mode bit should be set, zero defaults to SKB mode */ 9079 - if (hweight32(flags & XDP_FLAGS_MODES) > 1) { 9066 + /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9067 + if (num_modes > 1) { 9080 9068 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9069 + return -EINVAL; 9070 + } 9071 + /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9072 + if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9073 + NL_SET_ERR_MSG(extack, 9074 + "More than one program loaded, unset mode is ambiguous"); 9081 9075 return -EINVAL; 9082 9076 } 9083 9077 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
+1 -3
net/core/flow_offload.c
··· 381 381 382 382 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { 383 383 if (this->release == release && 384 - this->indr.cb_priv == cb_priv) { 384 + this->indr.cb_priv == cb_priv) 385 385 list_move(&this->indr.list, cleanup_list); 386 - return; 387 - } 388 386 } 389 387 } 390 388
+6 -6
net/core/lwt_bpf.c
··· 39 39 { 40 40 int ret; 41 41 42 - /* Preempt disable is needed to protect per-cpu redirect_info between 43 - * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and 44 - * access to maps strictly require a rcu_read_lock() for protection, 45 - * mixing with BH RCU lock doesn't work. 42 + /* Migration disable and BH disable are needed to protect per-cpu 43 + * redirect_info between BPF prog and skb_do_redirect(). 46 44 */ 47 - preempt_disable(); 45 + migrate_disable(); 46 + local_bh_disable(); 48 47 bpf_compute_data_pointers(skb); 49 48 ret = bpf_prog_run_save_cb(lwt->prog, skb); 50 49 ··· 77 78 break; 78 79 } 79 80 80 - preempt_enable(); 81 + local_bh_enable(); 82 + migrate_enable(); 81 83 82 84 return ret; 83 85 }
+11 -20
net/core/xdp.c
··· 336 336 * scenarios (e.g. queue full), it is possible to return the xdp_frame 337 337 * while still leveraging this protection. The @napi_direct boolean 338 338 * is used for those calls sites. Thus, allowing for faster recycling 339 - * of xdp_frames/pages in those cases. This path is never used by the 340 - * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of 341 - * the switch-statement. 339 + * of xdp_frames/pages in those cases. 342 340 */ 343 - static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) 341 + static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, 342 + struct xdp_buff *xdp) 344 343 { 345 344 struct xdp_mem_allocator *xa; 346 345 struct page *page; ··· 361 362 page = virt_to_page(data); /* Assumes order0 page*/ 362 363 put_page(page); 363 364 break; 365 + case MEM_TYPE_XSK_BUFF_POOL: 366 + /* NB! Only valid from an xdp_buff! */ 367 + xsk_buff_free(xdp); 368 + break; 364 369 default: 365 370 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 366 371 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); ··· 374 371 375 372 void xdp_return_frame(struct xdp_frame *xdpf) 376 373 { 377 - __xdp_return(xdpf->data, &xdpf->mem, false); 374 + __xdp_return(xdpf->data, &xdpf->mem, false, NULL); 378 375 } 379 376 EXPORT_SYMBOL_GPL(xdp_return_frame); 380 377 381 378 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) 382 379 { 383 - __xdp_return(xdpf->data, &xdpf->mem, true); 380 + __xdp_return(xdpf->data, &xdpf->mem, true, NULL); 384 381 } 385 382 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); 386 383 ··· 415 412 struct xdp_mem_allocator *xa; 416 413 417 414 if (mem->type != MEM_TYPE_PAGE_POOL) { 418 - __xdp_return(xdpf->data, &xdpf->mem, false); 415 + __xdp_return(xdpf->data, &xdpf->mem, false, NULL); 419 416 return; 420 417 } 421 418 ··· 440 437 441 438 void xdp_return_buff(struct xdp_buff *xdp) 442 439 { 443 - __xdp_return(xdp->data, &xdp->rxq->mem, true); 440 + __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); 444 441 } 445 442 446 443 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ ··· 457 454 rcu_read_unlock(); 458 455 } 459 456 EXPORT_SYMBOL_GPL(__xdp_release_frame); 460 - 461 - bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, 462 - struct netdev_bpf *bpf) 463 - { 464 - if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { 465 - NL_SET_ERR_MSG(bpf->extack, 466 - "program loaded with different flags"); 467 - return false; 468 - } 469 - return true; 470 - } 471 - EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); 472 457 473 458 void xdp_attachment_setup(struct xdp_attachment_info *info, 474 459 struct netdev_bpf *bpf)
+2
net/ethtool/bitset.c
··· 628 628 return ret; 629 629 630 630 change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]); 631 + if (change_bits > nbits) 632 + change_bits = nbits; 631 633 bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]), 632 634 change_bits); 633 635 if (change_bits < nbits)
+1 -1
net/ipv4/fib_frontend.c
··· 825 825 if (has_gw && has_via) { 826 826 NL_SET_ERR_MSG(extack, 827 827 "Nexthop configuration can not contain both GATEWAY and VIA"); 828 - goto errout; 828 + return -EINVAL; 829 829 } 830 830 831 831 return 0;
+7 -7
net/ipv4/netfilter/arp_tables.c
··· 203 203 204 204 local_bh_disable(); 205 205 addend = xt_write_recseq_begin(); 206 - private = READ_ONCE(table->private); /* Address dependency. */ 206 + private = rcu_access_pointer(table->private); 207 207 cpu = smp_processor_id(); 208 208 table_base = private->entries; 209 209 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; ··· 649 649 { 650 650 unsigned int countersize; 651 651 struct xt_counters *counters; 652 - const struct xt_table_info *private = table->private; 652 + const struct xt_table_info *private = xt_table_get_private_protected(table); 653 653 654 654 /* We need atomic snapshot of counters: rest doesn't change 655 655 * (other than comefrom, which userspace doesn't care ··· 673 673 unsigned int off, num; 674 674 const struct arpt_entry *e; 675 675 struct xt_counters *counters; 676 - struct xt_table_info *private = table->private; 676 + struct xt_table_info *private = xt_table_get_private_protected(table); 677 677 int ret = 0; 678 678 void *loc_cpu_entry; 679 679 ··· 807 807 t = xt_request_find_table_lock(net, NFPROTO_ARP, name); 808 808 if (!IS_ERR(t)) { 809 809 struct arpt_getinfo info; 810 - const struct xt_table_info *private = t->private; 810 + const struct xt_table_info *private = xt_table_get_private_protected(t); 811 811 #ifdef CONFIG_COMPAT 812 812 struct xt_table_info tmp; 813 813 ··· 860 860 861 861 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 862 862 if (!IS_ERR(t)) { 863 - const struct xt_table_info *private = t->private; 863 + const struct xt_table_info *private = xt_table_get_private_protected(t); 864 864 865 865 if (get.size == private->size) 866 866 ret = copy_entries_to_user(private->size, ··· 1017 1017 } 1018 1018 1019 1019 local_bh_disable(); 1020 - private = t->private; 1020 + private = xt_table_get_private_protected(t); 1021 1021 if (private->number != tmp.num_counters) { 1022 1022 ret = -EINVAL; 1023 1023 goto unlock_up_free; ··· 1330 1330 void __user *userptr) 1331 1331 { 1332 1332 struct xt_counters *counters; 1333 - const struct xt_table_info *private = table->private; 1333 + const struct xt_table_info *private = xt_table_get_private_protected(table); 1334 1334 void __user *pos; 1335 1335 unsigned int size; 1336 1336 int ret = 0;
+7 -7
net/ipv4/netfilter/ip_tables.c
··· 258 258 WARN_ON(!(table->valid_hooks & (1 << hook))); 259 259 local_bh_disable(); 260 260 addend = xt_write_recseq_begin(); 261 - private = READ_ONCE(table->private); /* Address dependency. */ 261 + private = rcu_access_pointer(table->private); 262 262 cpu = smp_processor_id(); 263 263 table_base = private->entries; 264 264 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; ··· 791 791 { 792 792 unsigned int countersize; 793 793 struct xt_counters *counters; 794 - const struct xt_table_info *private = table->private; 794 + const struct xt_table_info *private = xt_table_get_private_protected(table); 795 795 796 796 /* We need atomic snapshot of counters: rest doesn't change 797 797 (other than comefrom, which userspace doesn't care ··· 815 815 unsigned int off, num; 816 816 const struct ipt_entry *e; 817 817 struct xt_counters *counters; 818 - const struct xt_table_info *private = table->private; 818 + const struct xt_table_info *private = xt_table_get_private_protected(table); 819 819 int ret = 0; 820 820 const void *loc_cpu_entry; 821 821 ··· 964 964 t = xt_request_find_table_lock(net, AF_INET, name); 965 965 if (!IS_ERR(t)) { 966 966 struct ipt_getinfo info; 967 - const struct xt_table_info *private = t->private; 967 + const struct xt_table_info *private = xt_table_get_private_protected(t); 968 968 #ifdef CONFIG_COMPAT 969 969 struct xt_table_info tmp; 970 970 ··· 1018 1018 1019 1019 t = xt_find_table_lock(net, AF_INET, get.name); 1020 1020 if (!IS_ERR(t)) { 1021 - const struct xt_table_info *private = t->private; 1021 + const struct xt_table_info *private = xt_table_get_private_protected(t); 1022 1022 if (get.size == private->size) 1023 1023 ret = copy_entries_to_user(private->size, 1024 1024 t, uptr->entrytable); ··· 1173 1173 } 1174 1174 1175 1175 local_bh_disable(); 1176 - private = t->private; 1176 + private = xt_table_get_private_protected(t); 1177 1177 if (private->number != tmp.num_counters) { 1178 1178 ret = -EINVAL; 1179 1179 goto unlock_up_free; ··· 1543 1543 void __user *userptr) 1544 1544 { 1545 1545 struct xt_counters *counters; 1546 - const struct xt_table_info *private = table->private; 1546 + const struct xt_table_info *private = xt_table_get_private_protected(table); 1547 1547 void __user *pos; 1548 1548 unsigned int size; 1549 1549 int ret = 0;
+2 -1
net/ipv4/tcp_input.c
··· 510 510 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 511 511 tcp_sndbuf_expand(sk); 512 512 513 - tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); 514 513 tcp_mstamp_refresh(tp); 515 514 tp->rcvq_space.time = tp->tcp_mstamp; 516 515 tp->rcvq_space.seq = tp->copied_seq; ··· 533 534 534 535 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 535 536 tp->snd_cwnd_stamp = tcp_jiffies32; 537 + tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, 538 + (u32)TCP_INIT_CWND * tp->advmss); 536 539 } 537 540 538 541 /* 4. Recalculate window clamp after socket hit its memory bounds. */
+5 -2
net/ipv4/tcp_ipv4.c
··· 984 984 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 985 985 986 986 tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? 987 - tcp_rsk(req)->syn_tos & ~INET_ECN_MASK : 987 + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | 988 + (inet_sk(sk)->tos & INET_ECN_MASK) : 988 989 inet_sk(sk)->tos; 989 990 990 991 if (!INET_ECN_is_capable(tos) && ··· 1547 1546 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1548 1547 newinet->inet_id = prandom_u32(); 1549 1548 1550 - /* Set ToS of the new socket based upon the value of incoming SYN. */ 1549 + /* Set ToS of the new socket based upon the value of incoming SYN. 1550 + * ECT bits are set later in tcp_init_transfer(). 1551 + */ 1551 1552 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) 1552 1553 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; 1553 1554
+6 -3
net/ipv4/tcp_output.c
··· 1882 1882 * window, and remember whether we were cwnd-limited then. 1883 1883 */ 1884 1884 if (!before(tp->snd_una, tp->max_packets_seq) || 1885 - tp->packets_out > tp->max_packets_out) { 1885 + tp->packets_out > tp->max_packets_out || 1886 + is_cwnd_limited) { 1886 1887 tp->max_packets_out = tp->packets_out; 1887 1888 tp->max_packets_seq = tp->snd_nxt; 1888 1889 tp->is_cwnd_limited = is_cwnd_limited; ··· 2707 2706 else 2708 2707 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 2709 2708 2709 + is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2710 + if (likely(sent_pkts || is_cwnd_limited)) 2711 + tcp_cwnd_validate(sk, is_cwnd_limited); 2712 + 2710 2713 if (likely(sent_pkts)) { 2711 2714 if (tcp_in_cwnd_reduction(sk)) 2712 2715 tp->prr_out += sent_pkts; ··· 2718 2713 /* Send one loss probe per tail loss episode. */ 2719 2714 if (push_one != 2) 2720 2715 tcp_schedule_loss_probe(sk, false); 2721 - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2722 - tcp_cwnd_validate(sk, is_cwnd_limited); 2723 2716 return false; 2724 2717 } 2725 2718 return !tp->packets_out && !tcp_write_queue_empty(sk);
+1 -1
net/ipv4/udp.c
··· 2175 2175 __skb_pull(skb, skb_transport_offset(skb)); 2176 2176 ret = udp_queue_rcv_one_skb(sk, skb); 2177 2177 if (ret > 0) 2178 - ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); 2178 + ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); 2179 2179 } 2180 2180 return 0; 2181 2181 }
+7 -7
net/ipv6/netfilter/ip6_tables.c
··· 280 280 281 281 local_bh_disable(); 282 282 addend = xt_write_recseq_begin(); 283 - private = READ_ONCE(table->private); /* Address dependency. */ 283 + private = rcu_access_pointer(table->private); 284 284 cpu = smp_processor_id(); 285 285 table_base = private->entries; 286 286 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; ··· 807 807 { 808 808 unsigned int countersize; 809 809 struct xt_counters *counters; 810 - const struct xt_table_info *private = table->private; 810 + const struct xt_table_info *private = xt_table_get_private_protected(table); 811 811 812 812 /* We need atomic snapshot of counters: rest doesn't change 813 813 (other than comefrom, which userspace doesn't care ··· 831 831 unsigned int off, num; 832 832 const struct ip6t_entry *e; 833 833 struct xt_counters *counters; 834 - const struct xt_table_info *private = table->private; 834 + const struct xt_table_info *private = xt_table_get_private_protected(table); 835 835 int ret = 0; 836 836 const void *loc_cpu_entry; 837 837 ··· 980 980 t = xt_request_find_table_lock(net, AF_INET6, name); 981 981 if (!IS_ERR(t)) { 982 982 struct ip6t_getinfo info; 983 - const struct xt_table_info *private = t->private; 983 + const struct xt_table_info *private = xt_table_get_private_protected(t); 984 984 #ifdef CONFIG_COMPAT 985 985 struct xt_table_info tmp; 986 986 ··· 1035 1035 1036 1036 t = xt_find_table_lock(net, AF_INET6, get.name); 1037 1037 if (!IS_ERR(t)) { 1038 - struct xt_table_info *private = t->private; 1038 + struct xt_table_info *private = xt_table_get_private_protected(t); 1039 1039 if (get.size == private->size) 1040 1040 ret = copy_entries_to_user(private->size, 1041 1041 t, uptr->entrytable); ··· 1189 1189 } 1190 1190 1191 1191 local_bh_disable(); 1192 - private = t->private; 1192 + private = xt_table_get_private_protected(t); 1193 1193 if (private->number != tmp.num_counters) { 1194 1194 ret = -EINVAL; 1195 1195 goto unlock_up_free; ··· 1552 1552 void __user *userptr) 1553 1553 { 1554 1554 struct xt_counters *counters; 1555 - const struct xt_table_info *private = table->private; 1555 + const struct xt_table_info *private = xt_table_get_private_protected(table); 1556 1556 void __user *pos; 1557 1557 unsigned int size; 1558 1558 int ret = 0;
+5 -2
net/ipv6/tcp_ipv6.c
··· 528 528 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); 529 529 530 530 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? 531 - tcp_rsk(req)->syn_tos & ~INET_ECN_MASK : 531 + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | 532 + (np->tclass & INET_ECN_MASK) : 532 533 np->tclass; 533 534 534 535 if (!INET_ECN_is_capable(tclass) && ··· 1326 1325 if (np->repflow) 1327 1326 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); 1328 1327 1329 - /* Set ToS of the new socket based upon the value of incoming SYN. */ 1328 + /* Set ToS of the new socket based upon the value of incoming SYN. 1329 + * ECT bits are set later in tcp_init_transfer(). 1330 + */ 1330 1331 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) 1331 1332 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; 1332 1333
+2
net/mac80211/iface.c
··· 940 940 return ret; 941 941 } 942 942 943 + set_bit(SDATA_STATE_RUNNING, &sdata->state); 944 + 943 945 ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); 944 946 if (ret) { 945 947 kfree(sdata);
+1 -3
net/mac80211/mesh_pathtbl.c
··· 60 60 atomic_set(&newtbl->entries, 0); 61 61 spin_lock_init(&newtbl->gates_lock); 62 62 spin_lock_init(&newtbl->walk_lock); 63 + rhashtable_init(&newtbl->rhead, &mesh_rht_params); 63 64 64 65 return newtbl; 65 66 } ··· 773 772 ret = -ENOMEM; 774 773 goto free_path; 775 774 } 776 - 777 - rhashtable_init(&tbl_path->rhead, &mesh_rht_params); 778 - rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); 779 775 780 776 sdata->u.mesh.mesh_paths = tbl_path; 781 777 sdata->u.mesh.mpp_paths = tbl_mpp;
+1 -1
net/mac80211/util.c
··· 3456 3456 3457 3457 *chandef = he_chandef; 3458 3458 3459 - return false; 3459 + return true; 3460 3460 } 3461 3461 3462 3462 bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
+1
net/mptcp/mib.c
··· 67 67 for (i = 0; mptcp_snmp_list[i].name; i++) 68 68 seq_puts(seq, " 0"); 69 69 70 + seq_putc(seq, '\n'); 70 71 return; 71 72 } 72 73
+6 -2
net/netfilter/nf_tables_api.c
··· 1724 1724 } 1725 1725 1726 1726 nla_strscpy(ifname, attr, IFNAMSIZ); 1727 + /* nf_tables_netdev_event() is called under rtnl_mutex, this is 1728 + * indirectly serializing all the other holders of the commit_mutex with 1729 + * the rtnl_mutex. 1730 + */ 1727 1731 dev = __dev_get_by_name(net, ifname); 1728 1732 if (!dev) { 1729 1733 err = -ENOENT; ··· 3724 3720 return 0; 3725 3721 } 3726 3722 3727 - static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) 3723 + int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) 3728 3724 { 3729 3725 u64 ms = be64_to_cpu(nla_get_be64(nla)); 3730 3726 u64 max = (u64)(~((u64)0)); ··· 3738 3734 return 0; 3739 3735 } 3740 3736 3741 - static __be64 nf_jiffies64_to_msecs(u64 input) 3737 + __be64 nf_jiffies64_to_msecs(u64 input) 3742 3738 { 3743 3739 return cpu_to_be64(jiffies64_to_msecs(input)); 3744 3740 }
-2
net/netfilter/nft_ct.c
··· 177 177 } 178 178 #endif 179 179 case NFT_CT_ID: 180 - if (!nf_ct_is_confirmed(ct)) 181 - goto err; 182 180 *dest = nf_ct_get_id(ct); 183 181 return; 184 182 default:
+5 -3
net/netfilter/nft_dynset.c
··· 157 157 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { 158 158 if (!(set->flags & NFT_SET_TIMEOUT)) 159 159 return -EINVAL; 160 - timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( 161 - tb[NFTA_DYNSET_TIMEOUT]))); 160 + 161 + err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout); 162 + if (err) 163 + return err; 162 164 } 163 165 164 166 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); ··· 269 267 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) 270 268 goto nla_put_failure; 271 269 if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, 272 - cpu_to_be64(jiffies_to_msecs(priv->timeout)), 270 + nf_jiffies64_to_msecs(priv->timeout), 273 271 NFTA_DYNSET_PAD)) 274 272 goto nla_put_failure; 275 273 if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
+15 -34
net/netfilter/x_tables.c
··· 1349 1349 } 1350 1350 EXPORT_SYMBOL(xt_counters_alloc); 1351 1351 1352 + struct xt_table_info 1353 + *xt_table_get_private_protected(const struct xt_table *table) 1354 + { 1355 + return rcu_dereference_protected(table->private, 1356 + mutex_is_locked(&xt[table->af].mutex)); 1357 + } 1358 + EXPORT_SYMBOL(xt_table_get_private_protected); 1359 + 1352 1360 struct xt_table_info * 1353 1361 xt_replace_table(struct xt_table *table, 1354 1362 unsigned int num_counters, ··· 1364 1356 int *error) 1365 1357 { 1366 1358 struct xt_table_info *private; 1367 - unsigned int cpu; 1368 1359 int ret; 1369 1360 1370 1361 ret = xt_jumpstack_alloc(newinfo); ··· 1373 1366 } 1374 1367 1375 1368 /* Do the substitution. */ 1376 - local_bh_disable(); 1377 - private = table->private; 1369 + private = xt_table_get_private_protected(table); 1378 1370 1379 1371 /* Check inside lock: is the old number correct? */ 1380 1372 if (num_counters != private->number) { 1381 1373 pr_debug("num_counters != table->private->number (%u/%u)\n", 1382 1374 num_counters, private->number); 1383 - local_bh_enable(); 1384 1375 *error = -EAGAIN; 1385 1376 return NULL; 1386 1377 } 1387 1378 1388 1379 newinfo->initial_entries = private->initial_entries; 1389 - /* 1390 - * Ensure contents of newinfo are visible before assigning to 1391 - * private. 1392 - */ 1393 - smp_wmb(); 1394 - table->private = newinfo; 1395 1380 1396 - /* make sure all cpus see new ->private value */ 1397 - smp_wmb(); 1398 - 1399 - /* 1400 - * Even though table entries have now been swapped, other CPU's 1401 - * may still be using the old entries... 1402 - */ 1403 - local_bh_enable(); 1404 - 1405 - /* ... so wait for even xt_recseq on all cpus */ 1406 - for_each_possible_cpu(cpu) { 1407 - seqcount_t *s = &per_cpu(xt_recseq, cpu); 1408 - u32 seq = raw_read_seqcount(s); 1409 - 1410 - if (seq & 1) { 1411 - do { 1412 - cond_resched(); 1413 - cpu_relax(); 1414 - } while (seq == raw_read_seqcount(s)); 1415 - } 1416 - } 1381 + rcu_assign_pointer(table->private, newinfo); 1382 + synchronize_rcu(); 1417 1383 1418 1384 audit_log_nfcfg(table->name, table->af, private->number, 1419 1385 !private->number ? AUDIT_XT_OP_REGISTER : ··· 1422 1442 } 1423 1443 1424 1444 /* Simplifies replace_table code. */ 1425 - table->private = bootstrap; 1445 + rcu_assign_pointer(table->private, bootstrap); 1426 1446 1427 1447 if (!xt_replace_table(table, 0, newinfo, &ret)) 1428 1448 goto unlock; 1429 1449 1430 - private = table->private; 1450 + private = xt_table_get_private_protected(table); 1431 1451 pr_debug("table->private->number = %u\n", private->number); 1432 1452 1433 1453 /* save number of initial entries */ ··· 1450 1470 struct xt_table_info *private; 1451 1471 1452 1472 mutex_lock(&xt[table->af].mutex); 1453 - private = table->private; 1473 + private = xt_table_get_private_protected(table); 1474 + RCU_INIT_POINTER(table->private, NULL); 1454 1475 list_del(&table->list); 1455 1476 mutex_unlock(&xt[table->af].mutex); 1456 1477 audit_log_nfcfg(table->name, table->af, private->number,
+1 -1
net/openvswitch/flow_netlink.c
··· 2531 2531 2532 2532 action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log); 2533 2533 if (action_start < 0) 2534 - return start; 2534 + return action_start; 2535 2535 2536 2536 err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type, 2537 2537 vlan_tci, mpls_label_count, log);
+2 -2
net/sched/cls_flower.c
··· 2424 2424 return err; 2425 2425 } 2426 2426 if (lse_mask->mpls_label) { 2427 - err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2428 - lse_key->mpls_label); 2427 + err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2428 + lse_key->mpls_label); 2429 2429 if (err) 2430 2430 return err; 2431 2431 }
+1 -1
net/sched/sch_fq_pie.c
··· 401 401 402 402 INIT_LIST_HEAD(&q->new_flows); 403 403 INIT_LIST_HEAD(&q->old_flows); 404 + timer_setup(&q->adapt_timer, fq_pie_timer, 0); 404 405 405 406 if (opt) { 406 407 err = fq_pie_change(sch, opt, extack); ··· 427 426 pie_vars_init(&flow->vars); 428 427 } 429 428 430 - timer_setup(&q->adapt_timer, fq_pie_timer, 0); 431 429 mod_timer(&q->adapt_timer, jiffies + HZ / 2); 432 430 433 431 return 0;
+4 -2
net/tipc/node.c
··· 2206 2206 &xmitq); 2207 2207 else if (prop == TIPC_NLA_PROP_MTU) 2208 2208 tipc_link_set_mtu(e->link, b->mtu); 2209 + 2210 + /* Update MTU for node link entry */ 2211 + e->mtu = tipc_link_mss(e->link); 2209 2212 } 2210 - /* Update MTU for node link entry */ 2211 - e->mtu = tipc_link_mss(e->link); 2213 + 2212 2214 tipc_node_write_unlock(n); 2213 2215 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); 2214 2216 }
+1 -1
net/wireless/nl80211.c
··· 12644 12644 struct net_device *dev = info->user_ptr[1]; 12645 12645 struct wireless_dev *wdev = dev->ieee80211_ptr; 12646 12646 struct nlattr *tb[NUM_NL80211_REKEY_DATA]; 12647 - struct cfg80211_gtk_rekey_data rekey_data; 12647 + struct cfg80211_gtk_rekey_data rekey_data = {}; 12648 12648 int err; 12649 12649 12650 12650 if (!info->attrs[NL80211_ATTR_REKEY_DATA])
+16 -4
net/xdp/xsk.c
··· 212 212 return 0; 213 213 } 214 214 215 + static bool xsk_tx_writeable(struct xdp_sock *xs) 216 + { 217 + if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 218 + return false; 219 + 220 + return true; 221 + } 222 + 215 223 static bool xsk_is_bound(struct xdp_sock *xs) 216 224 { 217 225 if (READ_ONCE(xs->state) == XSK_BOUND) { ··· 306 298 rcu_read_lock(); 307 299 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 308 300 __xskq_cons_release(xs->tx); 309 - xs->sk.sk_write_space(&xs->sk); 301 + if (xsk_tx_writeable(xs)) 302 + xs->sk.sk_write_space(&xs->sk); 310 303 } 311 304 rcu_read_unlock(); 312 305 } ··· 504 495 505 496 out: 506 497 if (sent_frame) 507 - sk->sk_write_space(sk); 498 + if (xsk_tx_writeable(xs)) 499 + sk->sk_write_space(sk); 508 500 509 501 mutex_unlock(&xs->mutex); 510 502 return err; ··· 587 577 static __poll_t xsk_poll(struct file *file, struct socket *sock, 588 578 struct poll_table_struct *wait) 589 579 { 590 - __poll_t mask = datagram_poll(file, sock, wait); 580 + __poll_t mask = 0; 591 581 struct sock *sk = sock->sk; 592 582 struct xdp_sock *xs = xdp_sk(sk); 593 583 struct xsk_buff_pool *pool; 584 + 585 + sock_poll_wait(file, sock, wait); 594 586 595 587 if (unlikely(!xsk_is_bound(xs))) 596 588 return mask; ··· 609 597 610 598 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 611 599 mask |= EPOLLIN | EPOLLRDNORM; 612 - if (xs->tx && !xskq_cons_is_full(xs->tx)) 600 + if (xs->tx && xsk_tx_writeable(xs)) 613 601 mask |= EPOLLOUT | EPOLLWRNORM; 614 602 615 603 return mask;
+1
net/xdp/xsk_buff_pool.c
··· 174 174 175 175 if (!pool->dma_pages) { 176 176 WARN(1, "Driver did not DMA map zero-copy buffers"); 177 + err = -EINVAL; 177 178 goto err_unreg_xsk; 178 179 } 179 180 pool->umem->zc = true;
+6
net/xdp/xsk_queue.h
··· 307 307 q->nentries; 308 308 } 309 309 310 + static inline u32 xskq_cons_present_entries(struct xsk_queue *q) 311 + { 312 + /* No barriers needed since data is not accessed */ 313 + return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); 314 + } 315 + 310 316 /* Functions for producers */ 311 317 312 318 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
+3 -2
net/xfrm/xfrm_compat.c
··· 234 234 case XFRMA_PAD: 235 235 /* Ignore */ 236 236 return 0; 237 + case XFRMA_UNSPEC: 237 238 case XFRMA_ALG_AUTH: 238 239 case XFRMA_ALG_CRYPT: 239 240 case XFRMA_ALG_COMP: ··· 388 387 389 388 memcpy(nla, src, nla_attr_size(copy_len)); 390 389 nla->nla_len = nla_attr_size(payload); 391 - *pos += nla_attr_size(payload); 390 + *pos += nla_attr_size(copy_len); 392 391 nlmsg->nlmsg_len += nla->nla_len; 393 392 394 393 memset(dst + *pos, 0, payload - copy_len); ··· 564 563 return NULL; 565 564 566 565 len += NLMSG_HDRLEN; 567 - h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO); 566 + h64 = kvmalloc(len, GFP_KERNEL); 568 567 if (!h64) 569 568 return ERR_PTR(-ENOMEM); 570 569
+3 -1
net/xfrm/xfrm_state.c
··· 2382 2382 if (in_compat_syscall()) { 2383 2383 struct xfrm_translator *xtr = xfrm_get_translator(); 2384 2384 2385 - if (!xtr) 2385 + if (!xtr) { 2386 + kfree(data); 2386 2387 return -EOPNOTSUPP; 2388 + } 2387 2389 2388 2390 err = xtr->xlate_user_policy_sockptr(&data, optlen); 2389 2391 xfrm_put_translator(xtr);
+4 -8
scripts/Makefile.build
··· 252 252 ifdef CONFIG_TRIM_UNUSED_KSYMS 253 253 cmd_gen_ksymdeps = \ 254 254 $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd 255 + 256 + # List module undefined symbols 257 + undefined_syms = $(NM) $< | $(AWK) '$$1 == "U" { printf("%s%s", x++ ? " " : "", $$2) }'; 255 258 endif 256 259 257 260 define rule_cc_o_c ··· 274 271 $(call cmd,modversions_S) 275 272 endef 276 273 277 - # List module undefined symbols (or empty line if not enabled) 278 - ifdef CONFIG_TRIM_UNUSED_KSYMS 279 - cmd_undef_syms = $(NM) $< | sed -n 's/^ *U //p' | xargs echo 280 - else 281 - cmd_undef_syms = echo 282 - endif 283 - 284 274 # Built-in and composite module parts 285 275 $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE 286 276 $(call if_changed_rule,cc_o_c) ··· 281 285 282 286 cmd_mod = { \ 283 287 echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \ 284 - $(cmd_undef_syms); \ 288 + $(undefined_syms) echo; \ 285 289 } > $@ 286 290 287 291 $(obj)/%.mod: $(obj)/%.o FORCE
+1 -1
scripts/Makefile.extrawarn
··· 60 60 # 61 61 ifneq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) 62 62 63 - KBUILD_CFLAGS += -Wcast-align 64 63 KBUILD_CFLAGS += -Wdisabled-optimization 65 64 KBUILD_CFLAGS += -Wnested-externs 66 65 KBUILD_CFLAGS += -Wshadow ··· 79 80 ifneq ($(findstring 3, $(KBUILD_EXTRA_WARN)),) 80 81 81 82 KBUILD_CFLAGS += -Wbad-function-cast 83 + KBUILD_CFLAGS += -Wcast-align 82 84 KBUILD_CFLAGS += -Wcast-qual 83 85 KBUILD_CFLAGS += -Wconversion 84 86 KBUILD_CFLAGS += -Wpacked
+20
scripts/lld-version.sh
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Usage: $ ./scripts/lld-version.sh ld.lld 5 + # 6 + # Print the linker version of `ld.lld' in a 5 or 6-digit form 7 + # such as `100001' for ld.lld 10.0.1 etc. 8 + 9 + linker_string="$($* --version)" 10 + 11 + if ! ( echo $linker_string | grep -q LLD ); then 12 + echo 0 13 + exit 1 14 + fi 15 + 16 + VERSION=$(echo $linker_string | cut -d ' ' -f 2) 17 + MAJOR=$(echo $VERSION | cut -d . -f 1) 18 + MINOR=$(echo $VERSION | cut -d . -f 2) 19 + PATCHLEVEL=$(echo $VERSION | cut -d . -f 3) 20 + printf "%d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
+15
tools/arch/x86/include/asm/insn.h
··· 201 201 return insn_offset_displacement(insn) + insn->displacement.nbytes; 202 202 } 203 203 204 + /** 205 + * for_each_insn_prefix() -- Iterate prefixes in the instruction 206 + * @insn: Pointer to struct insn. 207 + * @idx: Index storage. 208 + * @prefix: Prefix byte. 209 + * 210 + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix 211 + * and the index is stored in @idx (note that this @idx is just for a cursor, 212 + * do not change it.) 213 + * Since prefixes.nbytes can be bigger than 4 if some prefixes 214 + * are repeated, it cannot be used for looping over the prefixes. 215 + */ 216 + #define for_each_insn_prefix(insn, idx, prefix) \ 217 + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) 218 + 204 219 #define POP_SS_OPCODE 0x1f 205 220 #define MOV_SREG_OPCODE 0x8e 206 221
+2 -2
tools/bpf/bpftool/pids.c
··· 89 89 90 90 int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) 91 91 { 92 - char buf[4096]; 93 - struct pid_iter_bpf *skel; 94 92 struct pid_iter_entry *e; 93 + char buf[4096 / sizeof(*e) * sizeof(*e)]; 94 + struct pid_iter_bpf *skel; 95 95 int err, ret, fd = -1, i; 96 96 libbpf_print_fn_t default_print; 97 97
+2 -2
tools/include/uapi/linux/bpf.h
··· 3977 3977 FN(seq_printf_btf), \ 3978 3978 FN(skb_cgroup_classid), \ 3979 3979 FN(redirect_neigh), \ 3980 - FN(bpf_per_cpu_ptr), \ 3981 - FN(bpf_this_cpu_ptr), \ 3980 + FN(per_cpu_ptr), \ 3981 + FN(this_cpu_ptr), \ 3982 3982 FN(redirect_peer), \ 3983 3983 FN(task_storage_get), \ 3984 3984 FN(task_storage_delete), \
+1 -1
tools/lib/bpf/ringbuf.c
··· 278 278 err = ringbuf_process_ring(ring); 279 279 if (err < 0) 280 280 return err; 281 - res += cnt; 281 + res += err; 282 282 } 283 283 return cnt < 0 ? -errno : res; 284 284 }
+1 -1
tools/testing/ktest/ktest.pl
··· 2040 2040 2041 2041 if ($reboot_type eq "grub") { 2042 2042 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; 2043 - } elsif ($reboot_type eq "grub2") { 2043 + } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { 2044 2044 run_ssh "$grub_reboot $grub_number"; 2045 2045 } elsif ($reboot_type eq "syslinux") { 2046 2046 run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
+4 -4
tools/testing/selftests/bpf/prog_tests/align.c
··· 456 456 */ 457 457 {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, 458 458 /* Checked s>=0 */ 459 - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, 459 + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, 460 460 /* packet pointer + nonnegative (4n+2) */ 461 - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, 462 - {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, 461 + {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, 462 + {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, 463 463 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. 464 464 * We checked the bounds, but it might have been able 465 465 * to overflow if the packet pointer started in the ··· 467 467 * So we did not get a 'range' on R6, and the access 468 468 * attempt will fail. 469 469 */ 470 - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, 470 + {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, 471 471 } 472 472 }, 473 473 {
+7 -1
tools/testing/selftests/bpf/prog_tests/ringbuf.c
··· 217 217 if (CHECK(err, "join_bg", "err %d\n", err)) 218 218 goto cleanup; 219 219 220 - if (CHECK(bg_ret != 1, "bg_ret", "epoll_wait result: %ld", bg_ret)) 220 + if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret)) 221 221 goto cleanup; 222 + 223 + /* due to timing variations, there could still be non-notified 224 + * samples, so consume them here to collect all the samples 225 + */ 226 + err = ring_buffer__consume(ringbuf); 227 + CHECK(err < 0, "rb_consume", "failed: %d\b", err); 222 228 223 229 /* 3 rounds, 2 samples each */ 224 230 cnt = atomic_xchg(&sample_cnt, 0);
+1 -1
tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
··· 81 81 82 82 /* poll for samples, should get 2 ringbufs back */ 83 83 err = ring_buffer__poll(ringbuf, -1); 84 - if (CHECK(err != 4, "poll_res", "expected 4 records, got %d\n", err)) 84 + if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err)) 85 85 goto cleanup; 86 86 87 87 /* expect extra polling to return nothing */
+29 -24
tools/testing/selftests/bpf/test_offload.py
··· 184 184 def bpftool_map_list(expected=None, ns=""): 185 185 _, maps = bpftool("map show", JSON=True, ns=ns, fail=True) 186 186 # Remove the base maps 187 - for m in base_maps: 188 - if m in maps: 189 - maps.remove(m) 187 + maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names] 190 188 if expected is not None: 191 189 if len(maps) != expected: 192 190 fail(True, "%d BPF maps loaded, expected %d" % ··· 714 716 fail(ret == 0, "Replaced one of programs without -force") 715 717 check_extack(err, "XDP program already attached.", args) 716 718 717 - if modename == "" or modename == "drv": 718 - othermode = "" if modename == "drv" else "drv" 719 - start_test("Test multi-attachment XDP - detach...") 720 - ret, _, err = sim.unset_xdp(othermode, force=True, 721 - fail=False, include_stderr=True) 722 - fail(ret == 0, "Removed program with a bad mode") 723 - check_extack(err, "program loaded with different flags.", args) 719 + start_test("Test multi-attachment XDP - remove without mode...") 720 + ret, _, err = sim.unset_xdp("", force=True, 721 + fail=False, include_stderr=True) 722 + fail(ret == 0, "Removed program without a mode flag") 723 + check_extack(err, "More than one program loaded, unset mode is ambiguous.", args) 724 724 725 725 sim.unset_xdp("offload") 726 726 xdp = sim.ip_link_show(xdp=True)["xdp"] ··· 768 772 skip(ret != 0, "bpftool not installed") 769 773 base_progs = progs 770 774 _, base_maps = bpftool("map") 775 + base_map_names = [ 776 + 'pid_iter.rodata' # created on each bpftool invocation 777 + ] 771 778 772 779 # Check netdevsim 773 780 ret, out = cmd("modprobe netdevsim", fail=False) ··· 912 913 913 914 sim.tc_flush_filters() 914 915 916 + start_test("Test TC offloads failure...") 917 + sim.dfs["dev/bpf_bind_verifier_accept"] = 0 918 + ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, 919 + fail=False, include_stderr=True) 920 + fail(ret == 0, "TC filter did not reject with TC offloads enabled") 921 + check_verifier_log(err, "[netdevsim] Hello from netdevsim!") 922 + sim.dfs["dev/bpf_bind_verifier_accept"] = 1 923 + 915 924 start_test("Test TC offloads work...") 916 925 ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, 917 926 fail=False, include_stderr=True) 918 927 fail(ret != 0, "TC filter did not load with TC offloads enabled") 919 - check_verifier_log(err, "[netdevsim] Hello from netdevsim!") 920 928 921 929 start_test("Test TC offload basics...") 922 930 dfs = simdev.dfs_get_bound_progs(expected=1) ··· 947 941 start_test("Test disabling TC offloads is rejected while filters installed...") 948 942 ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) 949 943 fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...") 944 + sim.set_ethtool_tc_offloads(True) 950 945 951 946 start_test("Test qdisc removal frees things...") 952 947 sim.tc_flush_filters() ··· 1006 999 fail=False, include_stderr=True) 1007 1000 fail(ret == 0, "Replaced XDP program with a program in different mode") 1008 1001 check_extack(err, 1009 - "native and generic XDP can't be active at the same time.", 1002 + "Native and generic XDP can't be active at the same time.", 1010 1003 args) 1011 - ret, _, err = sim.set_xdp(obj, "", force=True, 1012 - fail=False, include_stderr=True) 1013 - fail(ret == 0, "Replaced XDP program with a program in different mode") 1014 - check_extack(err, "program loaded with different flags.", args) 1015 - 1016 - start_test("Test XDP prog remove with bad flags...") 1017 - ret, _, err = sim.unset_xdp("", force=True, 1018 - fail=False, include_stderr=True) 1019 - fail(ret == 0, "Removed program with a bad mode") 1020 - check_extack(err, "program loaded with different flags.", args) 1021 1004 1022 1005 start_test("Test MTU restrictions...") 1023 1006 ret, _ = sim.set_mtu(9000, fail=False) ··· 1037 1040 offload = bpf_pinned("/sys/fs/bpf/offload") 1038 1041 ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True) 1039 1042 fail(ret == 0, "attached offloaded XDP program to drv") 1040 - check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args) 1043 + check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args) 1041 1044 rm("/sys/fs/bpf/offload") 1045 + sim.wait_for_flush() 1046 + 1047 + start_test("Test XDP load failure...") 1048 + sim.dfs["dev/bpf_bind_verifier_accept"] = 0 1049 + ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload", 1050 + dev=sim['ifname'], fail=False, include_stderr=True) 1051 + fail(ret == 0, "verifier should fail on load") 1052 + check_verifier_log(err, "[netdevsim] Hello from netdevsim!") 1053 + sim.dfs["dev/bpf_bind_verifier_accept"] = 1 1042 1054 sim.wait_for_flush() 1043 1055 1044 1056 start_test("Test XDP offload...") ··· 1057 1051 progs = bpftool_prog_list(expected=1) 1058 1052 prog = progs[0] 1059 1053 fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID") 1060 - check_verifier_log(err, "[netdevsim] Hello from netdevsim!") 1061 1054 1062 1055 start_test("Test XDP offload is device bound...") 1063 1056 dfs = simdev.dfs_get_bound_progs(expected=1)
+1 -1
tools/testing/selftests/bpf/verifier/array_access.c
··· 68 68 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 69 69 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 70 70 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 71 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), 71 + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), 72 72 BPF_MOV32_IMM(BPF_REG_1, 0), 73 73 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 74 74 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+41
tools/testing/selftests/bpf/verifier/bounds.c
··· 703 703 .fixup_map_hash_8b = { 3 }, 704 704 .result = ACCEPT, 705 705 }, 706 + { 707 + "bounds checks after 32-bit truncation. test 1", 708 + .insns = { 709 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 710 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 711 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 712 + BPF_LD_MAP_FD(BPF_REG_1, 0), 713 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 714 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 715 + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 716 + /* This used to reduce the max bound to 0x7fffffff */ 717 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 718 + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1), 719 + BPF_MOV64_IMM(BPF_REG_0, 0), 720 + BPF_EXIT_INSN(), 721 + }, 722 + .fixup_map_hash_8b = { 3 }, 723 + .errstr_unpriv = "R0 leaks addr", 724 + .result_unpriv = REJECT, 725 + .result = ACCEPT, 726 + }, 727 + { 728 + "bounds checks after 32-bit truncation. test 2", 729 + .insns = { 730 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 731 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 732 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 733 + BPF_LD_MAP_FD(BPF_REG_1, 0), 734 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 735 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 736 + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 737 + BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1), 738 + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1), 739 + BPF_MOV64_IMM(BPF_REG_0, 0), 740 + BPF_EXIT_INSN(), 741 + }, 742 + .fixup_map_hash_8b = { 3 }, 743 + .errstr_unpriv = "R0 leaks addr", 744 + .result_unpriv = REJECT, 745 + .result = ACCEPT, 746 + },
+95
tools/testing/selftests/net/fcnal-test.sh
··· 256 256 fi 257 257 } 258 258 259 + setup_cmd_nsc() 260 + { 261 + local cmd="$*" 262 + local rc 263 + 264 + run_cmd_nsc ${cmd} 265 + rc=$? 266 + if [ $rc -ne 0 ]; then 267 + # show user the command if not done so already 268 + if [ "$VERBOSE" = "0" ]; then 269 + echo "setup command: $cmd" 270 + fi 271 + echo "failed. stopping tests" 272 + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then 273 + echo 274 + echo "hit enter to continue" 275 + read a 276 + fi 277 + exit $rc 278 + fi 279 + } 280 + 259 281 # set sysctl values in NS-A 260 282 set_sysctl() 261 283 { ··· 487 465 # tell ns-B how to get to remote addresses of ns-A 488 466 ip -netns ${NSB} ro add ${NSA_LO_IP}/32 via ${NSA_IP} dev ${NSB_DEV} 489 467 ip -netns ${NSB} ro add ${NSA_LO_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV} 468 + 469 + set +e 470 + 471 + sleep 1 472 + } 473 + 474 + setup_lla_only() 475 + { 476 + # make sure we are starting with a clean slate 477 + kill_procs 478 + cleanup 2>/dev/null 479 + 480 + log_debug "Configuring network namespaces" 481 + set -e 482 + 483 + create_ns ${NSA} "-" "-" 484 + create_ns ${NSB} "-" "-" 485 + create_ns ${NSC} "-" "-" 486 + connect_ns ${NSA} ${NSA_DEV} "-" "-" \ 487 + ${NSB} ${NSB_DEV} "-" "-" 488 + connect_ns ${NSA} ${NSA_DEV2} "-" "-" \ 489 + ${NSC} ${NSC_DEV} "-" "-" 490 + 491 + NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV}) 492 + NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV}) 493 + NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV}) 494 + 495 + create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-" 496 + ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF} 497 + ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF} 490 498 491 499 set +e 492 500 ··· 3839 3787 setup_cmd_nsb ip li del vlan100 2>/dev/null 3840 3788 } 3841 3789 3790 + # VRF only. 3791 + # ns-A device is connected to both ns-B and ns-C on a single VRF but only has 3792 + # LLA on the interfaces 3793 + use_case_ping_lla_multi() 3794 + { 3795 + setup_lla_only 3796 + # only want reply from ns-A 3797 + setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 3798 + setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 3799 + 3800 + log_start 3801 + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} 3802 + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B" 3803 + 3804 + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} 3805 + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C" 3806 + 3807 + # cycle/flap the first ns-A interface 3808 + setup_cmd ip link set ${NSA_DEV} down 3809 + setup_cmd ip link set ${NSA_DEV} up 3810 + sleep 1 3811 + 3812 + log_start 3813 + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} 3814 + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B" 3815 + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} 3816 + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C" 3817 + 3818 + # cycle/flap the second ns-A interface 3819 + setup_cmd ip link set ${NSA_DEV2} down 3820 + setup_cmd ip link set ${NSA_DEV2} up 3821 + sleep 1 3822 + 3823 + log_start 3824 + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} 3825 + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B" 3826 + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} 3827 + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C" 3828 + } 3829 + 3842 3830 use_cases() 3843 3831 { 3844 3832 log_section "Use cases" 3833 + log_subsection "Device enslaved to bridge" 3845 3834 use_case_br 3835 + log_subsection "Ping LLA with multiple interfaces" 3836 + use_case_ping_lla_multi 3846 3837 } 3847 3838 3848 3839 ################################################################################
+3
tools/testing/selftests/net/udpgso_bench_rx.c
··· 113 113 interrupted = true; 114 114 break; 115 115 } 116 + 117 + /* no events and more time to wait, do poll again */ 118 + continue; 116 119 } 117 120 if (pfd.revents != POLLIN) 118 121 error(1, errno, "poll: 0x%x expected 0x%x\n",
+4
tools/testing/selftests/vm/Makefile
··· 60 60 TEST_GEN_FILES += $(BINARIES_64) 61 61 endif 62 62 else 63 + 64 + ifneq (,$(findstring $(ARCH),powerpc)) 63 65 TEST_GEN_FILES += protection_keys 66 + endif 67 + 64 68 endif 65 69 66 70 ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
+16 -9
tools/testing/selftests/vm/userfaultfd.c
··· 206 206 return ret; 207 207 } 208 208 209 - 210 209 static void hugetlb_allocate_area(void **alloc_area) 211 210 { 212 211 void *area_alias = NULL; 213 212 char **alloc_area_alias; 213 + 214 214 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, 215 215 (map_shared ? MAP_SHARED : MAP_PRIVATE) | 216 216 MAP_HUGETLB, 217 217 huge_fd, *alloc_area == area_src ? 0 : 218 218 nr_pages * page_size); 219 219 if (*alloc_area == MAP_FAILED) { 220 - fprintf(stderr, "mmap of hugetlbfs file failed\n"); 221 - *alloc_area = NULL; 220 + perror("mmap of hugetlbfs file failed"); 221 + goto fail; 222 222 } 223 223 224 224 if (map_shared) { ··· 227 227 huge_fd, *alloc_area == area_src ? 0 : 228 228 nr_pages * page_size); 229 229 if (area_alias == MAP_FAILED) { 230 - if (munmap(*alloc_area, nr_pages * page_size) < 0) { 231 - perror("hugetlb munmap"); 232 - exit(1); 233 - } 234 - *alloc_area = NULL; 235 - return; 230 + perror("mmap of hugetlb file alias failed"); 231 + goto fail_munmap; 236 232 } 237 233 } 234 + 238 235 if (*alloc_area == area_src) { 239 236 huge_fd_off0 = *alloc_area; 240 237 alloc_area_alias = &area_src_alias; ··· 240 243 } 241 244 if (area_alias) 242 245 *alloc_area_alias = area_alias; 246 + 247 + return; 248 + 249 + fail_munmap: 250 + if (munmap(*alloc_area, nr_pages * page_size) < 0) { 251 + perror("hugetlb munmap"); 252 + exit(1); 253 + } 254 + fail: 255 + *alloc_area = NULL; 243 256 } 244 257 245 258 static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)