Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 5.0-rc6 into char-misc-next

We need the char-misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+5565 -2190
+3 -3
Documentation/ABI/stable/sysfs-driver-mlxreg-io
··· 24 24 cpld3_version 25 25 26 26 Date: November 2018 27 - KernelVersion: 4.21 27 + KernelVersion: 5.0 28 28 Contact: Vadim Pasternak <vadimpmellanox.com> 29 29 Description: These files show with which CPLD versions have been burned 30 30 on LED board. ··· 35 35 jtag_enable 36 36 37 37 Date: November 2018 38 - KernelVersion: 4.21 38 + KernelVersion: 5.0 39 39 Contact: Vadim Pasternak <vadimpmellanox.com> 40 40 Description: These files enable and disable the access to the JTAG domain. 41 41 By default access to the JTAG domain is disabled. ··· 105 105 reset_voltmon_upgrade_fail 106 106 107 107 Date: November 2018 108 - KernelVersion: 4.21 108 + KernelVersion: 5.0 109 109 Contact: Vadim Pasternak <vadimpmellanox.com> 110 110 Description: These files show the system reset cause, as following: ComEx 111 111 power fail, reset from ComEx, system platform reset, reset
+3 -4
Documentation/admin-guide/kernel-parameters.txt
··· 1696 1696 By default, super page will be supported if Intel IOMMU 1697 1697 has the capability. With this option, super page will 1698 1698 not be supported. 1699 - sm_off [Default Off] 1700 - By default, scalable mode will be supported if the 1699 + sm_on [Default Off] 1700 + By default, scalable mode will be disabled even if the 1701 1701 hardware advertises that it has support for the scalable 1702 1702 mode translation. With this option set, scalable mode 1703 - will not be used even on hardware which claims to support 1704 - it. 1703 + will be used on hardware which claims to support it. 1705 1704 tboot_noforce [Default Off] 1706 1705 Do not force the Intel IOMMU enabled under tboot. 1707 1706 By default, tboot will force Intel IOMMU on, which
+5 -1
Documentation/devicetree/bindings/Makefile
··· 17 17 quiet_cmd_mk_schema = SCHEMA $@ 18 18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) 19 19 20 - DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') 20 + DT_DOCS = $(shell \ 21 + cd $(srctree)/$(src) && \ 22 + find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ 23 + ) 24 + 21 25 DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 22 26 23 27 extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-4
Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
··· 4 4 - compatible : "olpc,ap-sp" 5 5 - reg : base address and length of SoC's WTM registers 6 6 - interrupts : SP-AP interrupt 7 - - clocks : phandle + clock-specifier for the clock that drives the WTM 8 - - clock-names: should be "sp" 9 7 10 8 Example: 11 9 ap-sp@d4290000 { 12 10 compatible = "olpc,ap-sp"; 13 11 reg = <0xd4290000 0x1000>; 14 12 interrupts = <40>; 15 - clocks = <&soc_clocks MMP2_CLK_SP>; 16 - clock-names = "sp"; 17 13 }
+15 -9
Documentation/sysctl/fs.txt
··· 56 56 57 57 dentry-state: 58 58 59 - From linux/fs/dentry.c: 59 + From linux/include/linux/dcache.h: 60 60 -------------------------------------------------------------- 61 - struct { 61 + struct dentry_stat_t dentry_stat { 62 62 int nr_dentry; 63 63 int nr_unused; 64 64 int age_limit; /* age in seconds */ 65 65 int want_pages; /* pages requested by system */ 66 - int dummy[2]; 67 - } dentry_stat = {0, 0, 45, 0,}; 68 - -------------------------------------------------------------- 66 + int nr_negative; /* # of unused negative dentries */ 67 + int dummy; /* Reserved for future use */ 68 + }; 69 + -------------------------------------------------------------- 69 70 70 - Dentries are dynamically allocated and deallocated, and 71 - nr_dentry seems to be 0 all the time. Hence it's safe to 72 - assume that only nr_unused, age_limit and want_pages are 73 - used. Nr_unused seems to be exactly what its name says. 71 + Dentries are dynamically allocated and deallocated. 72 + 73 + nr_dentry shows the total number of dentries allocated (active 74 + + unused). nr_unused shows the number of dentries that are not 75 + actively used, but are saved in the LRU list for future reuse. 76 + 74 77 Age_limit is the age in seconds after which dcache entries 75 78 can be reclaimed when memory is short and want_pages is 76 79 nonzero when shrink_dcache_pages() has been called and the 77 80 dcache isn't pruned yet. 81 + 82 + nr_negative shows the number of unused dentries that are also 83 + negative dentries which do not mapped to actual files. 78 84 79 85 ============================================================== 80 86
+1 -1
Documentation/x86/resctrl_ui.txt
··· 9 9 Tony Luck <tony.luck@intel.com> 10 10 Vikas Shivappa <vikas.shivappa@intel.com> 11 11 12 - This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo 12 + This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo 13 13 flag bits: 14 14 RDT (Resource Director Technology) Allocation - "rdt_a" 15 15 CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
+51 -1
MAINTAINERS
··· 2848 2848 BPF (Safe dynamic programs and tools) 2849 2849 M: Alexei Starovoitov <ast@kernel.org> 2850 2850 M: Daniel Borkmann <daniel@iogearbox.net> 2851 + R: Martin KaFai Lau <kafai@fb.com> 2852 + R: Song Liu <songliubraving@fb.com> 2853 + R: Yonghong Song <yhs@fb.com> 2851 2854 L: netdev@vger.kernel.org 2852 2855 L: linux-kernel@vger.kernel.org 2853 2856 T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git ··· 2876 2873 F: tools/bpf/ 2877 2874 F: tools/lib/bpf/ 2878 2875 F: tools/testing/selftests/bpf/ 2876 + K: bpf 2877 + N: bpf 2879 2878 2880 2879 BPF JIT for ARM 2881 2880 M: Shubham Bansal <illusionist.neo@gmail.com> ··· 5186 5181 M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 5187 5182 T: git git://anongit.freedesktop.org/drm/drm-misc 5188 5183 L: dri-devel@lists.freedesktop.org 5189 - L: xen-devel@lists.xen.org 5184 + L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 5190 5185 S: Supported 5191 5186 F: drivers/gpu/drm/xen/ 5192 5187 F: Documentation/gpu/xen-front.rst ··· 11322 11317 11323 11318 OPENCORES I2C BUS DRIVER 11324 11319 M: Peter Korsgaard <peter@korsgaard.com> 11320 + M: Andrew Lunn <andrew@lunn.ch> 11325 11321 L: linux-i2c@vger.kernel.org 11326 11322 S: Maintained 11327 11323 F: Documentation/i2c/busses/i2c-ocores 11328 11324 F: drivers/i2c/busses/i2c-ocores.c 11325 + F: include/linux/platform_data/i2c-ocores.h 11329 11326 11330 11327 OPENRISC ARCHITECTURE 11331 11328 M: Jonas Bonn <jonas@southpole.se> ··· 12885 12878 F: drivers/net/dsa/realtek-smi* 12886 12879 F: drivers/net/dsa/rtl83* 12887 12880 12881 + REDPINE WIRELESS DRIVER 12882 + M: Amitkumar Karwar <amitkarwar@gmail.com> 12883 + M: Siva Rebbagondla <siva8118@gmail.com> 12884 + L: linux-wireless@vger.kernel.org 12885 + S: Maintained 12886 + F: drivers/net/wireless/rsi/ 12887 + 12888 12888 REGISTER MAP ABSTRACTION 12889 12889 M: Mark Brown <broonie@kernel.org> 12890 12890 L: linux-kernel@vger.kernel.org ··· 13719 13705 L: netdev@vger.kernel.org 13720 13706 S: Supported 13721 13707 F: drivers/net/ethernet/sfc/ 13708 + 13709 + SFF/SFP/SFP+ MODULE SUPPORT 13710 + M: Russell King <linux@armlinux.org.uk> 13711 + L: netdev@vger.kernel.org 13712 + S: Maintained 13713 + F: drivers/net/phy/phylink.c 13714 + F: drivers/net/phy/sfp* 13715 + F: include/linux/phylink.h 13716 + F: include/linux/sfp.h 13722 13717 13723 13718 SGI GRU DRIVER 13724 13719 M: Dimitri Sivanich <sivanich@sgi.com> ··· 16674 16651 F: drivers/platform/x86/ 16675 16652 F: drivers/platform/olpc/ 16676 16653 16654 + X86 PLATFORM DRIVERS - ARCH 16655 + R: Darren Hart <dvhart@infradead.org> 16656 + R: Andy Shevchenko <andy@infradead.org> 16657 + L: platform-driver-x86@vger.kernel.org 16658 + L: x86@kernel.org 16659 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core 16660 + S: Maintained 16661 + F: arch/x86/platform 16662 + 16677 16663 X86 VDSO 16678 16664 M: Andy Lutomirski <luto@kernel.org> 16679 16665 L: linux-kernel@vger.kernel.org ··· 16714 16682 T: git git://linuxtv.org/media_tree.git 16715 16683 S: Maintained 16716 16684 F: drivers/media/tuners/tuner-xc2028.* 16685 + 16686 + XDP (eXpress Data Path) 16687 + M: Alexei Starovoitov <ast@kernel.org> 16688 + M: Daniel Borkmann <daniel@iogearbox.net> 16689 + M: David S. Miller <davem@davemloft.net> 16690 + M: Jakub Kicinski <jakub.kicinski@netronome.com> 16691 + M: Jesper Dangaard Brouer <hawk@kernel.org> 16692 + M: John Fastabend <john.fastabend@gmail.com> 16693 + L: netdev@vger.kernel.org 16694 + L: xdp-newbies@vger.kernel.org 16695 + S: Supported 16696 + F: net/core/xdp.c 16697 + F: include/net/xdp.h 16698 + F: kernel/bpf/devmap.c 16699 + F: kernel/bpf/cpumap.c 16700 + F: include/trace/events/xdp.h 16701 + K: xdp 16702 + N: xdp 16717 16703 16718 16704 XDP SOCKETS (AF_XDP) 16719 16705 M: Björn Töpel <bjorn.topel@intel.com>
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc6 6 6 NAME = Shy Crocodile 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/boot/dts/am335x-shc.dts
··· 215 215 pinctrl-names = "default"; 216 216 pinctrl-0 = <&mmc1_pins>; 217 217 bus-width = <0x4>; 218 - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 218 + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; 219 219 cd-inverted; 220 220 max-frequency = <26000000>; 221 221 vmmc-supply = <&vmmcsd_fixed>;
+1 -1
arch/arm/boot/dts/da850.dtsi
··· 476 476 clocksource: timer@20000 { 477 477 compatible = "ti,da830-timer"; 478 478 reg = <0x20000 0x1000>; 479 - interrupts = <12>, <13>; 479 + interrupts = <21>, <22>; 480 480 interrupt-names = "tint12", "tint34"; 481 481 clocks = <&pll0_auxclk>; 482 482 };
+1 -1
arch/arm/boot/dts/imx6q-pistachio.dts
··· 103 103 power { 104 104 label = "Power Button"; 105 105 gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; 106 - gpio-key,wakeup; 106 + wakeup-source; 107 107 linux,code = <KEY_POWER>; 108 108 }; 109 109 };
+1 -1
arch/arm/boot/dts/imx6sll-evk.dts
··· 309 309 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 310 310 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; 311 311 keep-power-in-suspend; 312 - enable-sdio-wakeup; 312 + wakeup-source; 313 313 vmmc-supply = <&reg_sd3_vmmc>; 314 314 status = "okay"; 315 315 };
+1 -1
arch/arm/boot/dts/imx6sx.dtsi
··· 467 467 }; 468 468 469 469 gpt: gpt@2098000 { 470 - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; 470 + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; 471 471 reg = <0x02098000 0x4000>; 472 472 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; 473 473 clocks = <&clks IMX6SX_CLK_GPT_BUS>,
+1 -1
arch/arm/boot/dts/meson.dtsi
··· 274 274 compatible = "amlogic,meson6-dwmac", "snps,dwmac"; 275 275 reg = <0xc9410000 0x10000 276 276 0xc1108108 0x4>; 277 - interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; 277 + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; 278 278 interrupt-names = "macirq"; 279 279 status = "disabled"; 280 280 };
+1 -2
arch/arm/boot/dts/meson8b-ec100.dts
··· 205 205 cap-sd-highspeed; 206 206 disable-wp; 207 207 208 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 209 - cd-inverted; 208 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 210 209 211 210 vmmc-supply = <&vcc_3v3>; 212 211 };
+1 -3
arch/arm/boot/dts/meson8b-odroidc1.dts
··· 221 221 /* Realtek RTL8211F (0x001cc916) */ 222 222 eth_phy: ethernet-phy@0 { 223 223 reg = <0>; 224 - eee-broken-1000t; 225 224 interrupt-parent = <&gpio_intc>; 226 225 /* GPIOH_3 */ 227 226 interrupts = <17 IRQ_TYPE_LEVEL_LOW>; ··· 272 273 cap-sd-highspeed; 273 274 disable-wp; 274 275 275 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 276 - cd-inverted; 276 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 277 277 278 278 vmmc-supply = <&tflash_vdd>; 279 279 vqmmc-supply = <&tf_io>;
+1 -2
arch/arm/boot/dts/meson8m2-mxiii-plus.dts
··· 206 206 cap-sd-highspeed; 207 207 disable-wp; 208 208 209 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 210 - cd-inverted; 209 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 211 210 212 211 vmmc-supply = <&vcc_3v3>; 213 212 };
+1 -1
arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
··· 105 105 interrupts-extended = < 106 106 &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 107 107 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 108 - &cpcap 48 1 108 + &cpcap 48 0 109 109 >; 110 110 interrupt-names = 111 111 "id_ground", "id_float", "se0conn", "vbusvld",
-4
arch/arm/boot/dts/omap3-gta04.dtsi
··· 714 714 715 715 vdda-supply = <&vdac>; 716 716 717 - #address-cells = <1>; 718 - #size-cells = <0>; 719 - 720 717 port { 721 - reg = <0>; 722 718 venc_out: endpoint { 723 719 remote-endpoint = <&opa_in>; 724 720 ti,channels = <1>;
+1 -1
arch/arm/boot/dts/omap3-n900.dts
··· 814 814 /* For debugging, it is often good idea to remove this GPIO. 815 815 It means you can remove back cover (to reboot by removing 816 816 battery) and still use the MMC card. */ 817 - cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ 817 + cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ 818 818 }; 819 819 820 820 /* most boards use vaux3, only some old versions use vmmc2 instead */
+28 -14
arch/arm/boot/dts/omap3-n950-n9.dtsi
··· 370 370 compatible = "ti,omap2-onenand"; 371 371 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ 372 372 373 + /* 374 + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported 375 + * bootloader set values when booted with v4.19 using both N950 376 + * and N9 devices (OneNAND Manufacturer: Samsung): 377 + * 378 + * gpmc cs0 before gpmc_cs_program_settings: 379 + * cs0 GPMC_CS_CONFIG1: 0xfd001202 380 + * cs0 GPMC_CS_CONFIG2: 0x00181800 381 + * cs0 GPMC_CS_CONFIG3: 0x00030300 382 + * cs0 GPMC_CS_CONFIG4: 0x18001804 383 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d 384 + * cs0 GPMC_CS_CONFIG6: 0x97080000 385 + */ 373 386 gpmc,sync-read; 374 387 gpmc,sync-write; 375 388 gpmc,burst-length = <16>; ··· 392 379 gpmc,device-width = <2>; 393 380 gpmc,mux-add-data = <2>; 394 381 gpmc,cs-on-ns = <0>; 395 - gpmc,cs-rd-off-ns = <87>; 396 - gpmc,cs-wr-off-ns = <87>; 382 + gpmc,cs-rd-off-ns = <122>; 383 + gpmc,cs-wr-off-ns = <122>; 397 384 gpmc,adv-on-ns = <0>; 398 - gpmc,adv-rd-off-ns = <10>; 399 - gpmc,adv-wr-off-ns = <10>; 400 - gpmc,oe-on-ns = <15>; 401 - gpmc,oe-off-ns = <87>; 385 + gpmc,adv-rd-off-ns = <15>; 386 + gpmc,adv-wr-off-ns = <15>; 387 + gpmc,oe-on-ns = <20>; 388 + gpmc,oe-off-ns = <122>; 402 389 gpmc,we-on-ns = <0>; 403 - gpmc,we-off-ns = <87>; 404 - gpmc,rd-cycle-ns = <112>; 405 - gpmc,wr-cycle-ns = <112>; 406 - gpmc,access-ns = <81>; 390 + gpmc,we-off-ns = <122>; 391 + gpmc,rd-cycle-ns = <148>; 392 + gpmc,wr-cycle-ns = <148>; 393 + gpmc,access-ns = <117>; 407 394 gpmc,page-burst-access-ns = <15>; 408 395 gpmc,bus-turnaround-ns = <0>; 409 396 gpmc,cycle2cycle-delay-ns = <0>; 410 397 gpmc,wait-monitoring-ns = <0>; 411 - gpmc,clk-activation-ns = <5>; 412 - gpmc,wr-data-mux-bus-ns = <30>; 413 - gpmc,wr-access-ns = <81>; 414 - gpmc,sync-clk-ps = <15000>; 398 + gpmc,clk-activation-ns = <10>; 399 + gpmc,wr-data-mux-bus-ns = <40>; 400 + gpmc,wr-access-ns = <117>; 401 + 402 + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ 415 403 416 404 /* 417 405 * MTD partition table corresponding to Nokia's MeeGo 1.2
-2
arch/arm/boot/dts/omap5-l4.dtsi
··· 1046 1046 <SYSC_IDLE_SMART>, 1047 1047 <SYSC_IDLE_SMART_WKUP>; 1048 1048 ti,syss-mask = <1>; 1049 - ti,no-reset-on-init; 1050 - ti,no-idle-on-init; 1051 1049 /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ 1052 1050 clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; 1053 1051 clock-names = "fck";
+30 -6
arch/arm/boot/dts/r8a7743.dtsi
··· 1681 1681 1682 1682 du: display@feb00000 { 1683 1683 compatible = "renesas,du-r8a7743"; 1684 - reg = <0 0xfeb00000 0 0x40000>, 1685 - <0 0xfeb90000 0 0x1c>; 1686 - reg-names = "du", "lvds.0"; 1684 + reg = <0 0xfeb00000 0 0x40000>; 1687 1685 interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, 1688 1686 <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; 1689 1687 clocks = <&cpg CPG_MOD 724>, 1690 - <&cpg CPG_MOD 723>, 1691 - <&cpg CPG_MOD 726>; 1692 - clock-names = "du.0", "du.1", "lvds.0"; 1688 + <&cpg CPG_MOD 723>; 1689 + clock-names = "du.0", "du.1"; 1693 1690 status = "disabled"; 1694 1691 1695 1692 ports { ··· 1701 1704 port@1 { 1702 1705 reg = <1>; 1703 1706 du_out_lvds0: endpoint { 1707 + remote-endpoint = <&lvds0_in>; 1708 + }; 1709 + }; 1710 + }; 1711 + }; 1712 + 1713 + lvds0: lvds@feb90000 { 1714 + compatible = "renesas,r8a7743-lvds"; 1715 + reg = <0 0xfeb90000 0 0x1c>; 1716 + clocks = <&cpg CPG_MOD 726>; 1717 + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; 1718 + resets = <&cpg 726>; 1719 + status = "disabled"; 1720 + 1721 + ports { 1722 + #address-cells = <1>; 1723 + #size-cells = <0>; 1724 + 1725 + port@0 { 1726 + reg = <0>; 1727 + lvds0_in: endpoint { 1728 + remote-endpoint = <&du_out_lvds0>; 1729 + }; 1730 + }; 1731 + port@1 { 1732 + reg = <1>; 1733 + lvds0_out: endpoint { 1704 1734 }; 1705 1735 }; 1706 1736 };
+1
arch/arm/boot/dts/sun6i-a31.dtsi
··· 216 216 #clock-cells = <0>; 217 217 compatible = "fixed-clock"; 218 218 clock-frequency = <24000000>; 219 + clock-output-names = "osc24M"; 219 220 }; 220 221 221 222 osc32k: clk-32k {
+2 -2
arch/arm/boot/dts/vf610-bk4.dts
··· 110 110 bus-num = <3>; 111 111 status = "okay"; 112 112 spi-slave; 113 + #address-cells = <0>; 113 114 114 - slave@0 { 115 + slave { 115 116 compatible = "lwn,bk4"; 116 117 spi-max-frequency = <30000000>; 117 - reg = <0>; 118 118 }; 119 119 }; 120 120
+2 -2
arch/arm/mach-cns3xxx/pcie.c
··· 83 83 } else /* remote PCI bus */ 84 84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20); 85 85 86 - return base + (where & 0xffc) + (devfn << 12); 86 + return base + where + (devfn << 12); 87 87 } 88 88 89 89 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, ··· 93 93 u32 mask = (0x1ull << (size * 8)) - 1; 94 94 int shift = (where % 4) * 8; 95 95 96 - ret = pci_generic_config_read32(bus, devfn, where, size, val); 96 + ret = pci_generic_config_read(bus, devfn, where, size, val); 97 97 98 98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && 99 99 (where & 0xffc) == PCI_CLASS_REVISION)
+1 -2
arch/arm/mach-iop32x/n2100.c
··· 75 75 /* 76 76 * N2100 PCI. 77 77 */ 78 - static int __init 79 - n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 78 + static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 80 79 { 81 80 int irq; 82 81
+2 -4
arch/arm/mach-tango/pm.c
··· 3 3 #include <linux/suspend.h> 4 4 #include <asm/suspend.h> 5 5 #include "smc.h" 6 + #include "pm.h" 6 7 7 8 static int tango_pm_powerdown(unsigned long arg) 8 9 { ··· 25 24 .valid = suspend_valid_only_mem, 26 25 }; 27 26 28 - static int __init tango_pm_init(void) 27 + void __init tango_pm_init(void) 29 28 { 30 29 suspend_set_ops(&tango_pm_ops); 31 - return 0; 32 30 } 33 - 34 - late_initcall(tango_pm_init);
+7
arch/arm/mach-tango/pm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifdef CONFIG_SUSPEND 4 + void __init tango_pm_init(void); 5 + #else 6 + #define tango_pm_init NULL 7 + #endif
+2
arch/arm/mach-tango/setup.c
··· 2 2 #include <asm/mach/arch.h> 3 3 #include <asm/hardware/cache-l2x0.h> 4 4 #include "smc.h" 5 + #include "pm.h" 5 6 6 7 static void tango_l2c_write(unsigned long val, unsigned int reg) 7 8 { ··· 16 15 .dt_compat = tango_dt_compat, 17 16 .l2c_aux_mask = ~0, 18 17 .l2c_write_sec = tango_l2c_write, 18 + .init_late = tango_pm_init, 19 19 MACHINE_END
-3
arch/arm/plat-pxa/ssp.c
··· 190 190 if (ssp == NULL) 191 191 return -ENODEV; 192 192 193 - iounmap(ssp->mmio_base); 194 - 195 193 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 194 release_mem_region(res->start, resource_size(res)); 197 195 ··· 199 201 list_del(&ssp->node); 200 202 mutex_unlock(&ssp_lock); 201 203 202 - kfree(ssp); 203 204 return 0; 204 205 } 205 206
-1
arch/arm/xen/mm.c
··· 7 7 #include <linux/of_address.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/types.h> 10 - #include <linux/dma-mapping.h> 11 10 #include <linux/vmalloc.h> 12 11 #include <linux/swiotlb.h> 13 12
+1
arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
··· 188 188 reg = <0x3a3>; 189 189 interrupt-parent = <&r_intc>; 190 190 interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 191 + x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ 191 192 }; 192 193 }; 193 194
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
··· 390 390 }; 391 391 392 392 video-codec@1c0e000 { 393 - compatible = "allwinner,sun50i-h5-video-engine"; 393 + compatible = "allwinner,sun50i-a64-video-engine"; 394 394 reg = <0x01c0e000 0x1000>; 395 395 clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, 396 396 <&ccu CLK_DRAM_VE>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
··· 187 187 max-frequency = <100000000>; 188 188 disable-wp; 189 189 190 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 191 - cd-inverted; 190 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 192 191 193 192 vmmc-supply = <&vddao_3v3>; 194 193 vqmmc-supply = <&vddio_boot>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
··· 305 305 max-frequency = <200000000>; 306 306 disable-wp; 307 307 308 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 309 - cd-inverted; 308 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 310 309 311 310 vmmc-supply = <&vddio_ao3v3>; 312 311 vqmmc-supply = <&vddio_tf>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
··· 238 238 max-frequency = <100000000>; 239 239 disable-wp; 240 240 241 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 242 - cd-inverted; 241 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 243 242 244 243 vmmc-supply = <&vddao_3v3>; 245 244 vqmmc-supply = <&vddio_card>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
··· 258 258 max-frequency = <100000000>; 259 259 disable-wp; 260 260 261 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 262 - cd-inverted; 261 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 263 262 264 263 vmmc-supply = <&tflash_vdd>; 265 264 vqmmc-supply = <&tf_io>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
··· 196 196 max-frequency = <100000000>; 197 197 disable-wp; 198 198 199 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 200 - cd-inverted; 199 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 201 200 202 201 vmmc-supply = <&vddao_3v3>; 203 202 vqmmc-supply = <&vddio_card>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
··· 154 154 max-frequency = <100000000>; 155 155 disable-wp; 156 156 157 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 158 - cd-inverted; 157 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 159 158 160 159 vmmc-supply = <&vcc_3v3>; 161 160 };
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
··· 211 211 max-frequency = <100000000>; 212 212 disable-wp; 213 213 214 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 215 - cd-inverted; 214 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 216 215 217 216 vmmc-supply = <&vddao_3v3>; 218 217 vqmmc-supply = <&vcc_3v3>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
··· 131 131 max-frequency = <100000000>; 132 132 disable-wp; 133 133 134 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 135 - cd-inverted; 134 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 136 135 137 136 vmmc-supply = <&vddao_3v3>; 138 137 vqmmc-supply = <&vddio_card>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
··· 238 238 max-frequency = <100000000>; 239 239 disable-wp; 240 240 241 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 242 - cd-inverted; 241 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 243 242 244 243 vmmc-supply = <&vcc_3v3>; 245 244 vqmmc-supply = <&vcc_card>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
··· 183 183 max-frequency = <100000000>; 184 184 disable-wp; 185 185 186 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 187 - cd-inverted; 186 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 188 187 189 188 vmmc-supply = <&vddao_3v3>; 190 189 vqmmc-supply = <&vddio_card>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
··· 137 137 max-frequency = <100000000>; 138 138 disable-wp; 139 139 140 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 141 - cd-inverted; 140 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 142 141 143 142 vmmc-supply = <&vddao_3v3>; 144 143 vqmmc-supply = <&vddio_boot>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
··· 356 356 max-frequency = <100000000>; 357 357 disable-wp; 358 358 359 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 360 - cd-inverted; 359 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 361 360 362 361 vmmc-supply = <&vddao_3v3>; 363 362 vqmmc-supply = <&vddio_boot>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
··· 147 147 max-frequency = <100000000>; 148 148 disable-wp; 149 149 150 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 151 - cd-inverted; 150 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 152 151 153 152 vmmc-supply = <&vddao_3v3>; 154 153 vqmmc-supply = <&vddio_boot>;
+1 -2
arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
··· 170 170 max-frequency = <100000000>; 171 171 disable-wp; 172 172 173 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 174 - cd-inverted; 173 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; 175 174 176 175 vmmc-supply = <&vddao_3v3>; 177 176 vqmmc-supply = <&vddio_boot>;
+1 -1
arch/arm64/boot/dts/qcom/msm8996.dtsi
··· 404 404 }; 405 405 406 406 intc: interrupt-controller@9bc0000 { 407 - compatible = "arm,gic-v3"; 407 + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; 408 408 #interrupt-cells = <3>; 409 409 interrupt-controller; 410 410 #redistributor-regions = <1>;
+3
arch/arm64/boot/dts/renesas/r8a774a1.dtsi
··· 1011 1011 <&cpg CPG_CORE R8A774A1_CLK_S3D1>, 1012 1012 <&scif_clk>; 1013 1013 clock-names = "fck", "brg_int", "scif_clk"; 1014 + dmas = <&dmac1 0x13>, <&dmac1 0x12>, 1015 + <&dmac2 0x13>, <&dmac2 0x12>; 1016 + dma-names = "tx", "rx", "tx", "rx"; 1014 1017 power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; 1015 1018 resets = <&cpg 310>; 1016 1019 status = "disabled";
+3
arch/arm64/boot/dts/renesas/r8a7796.dtsi
··· 1262 1262 <&cpg CPG_CORE R8A7796_CLK_S3D1>, 1263 1263 <&scif_clk>; 1264 1264 clock-names = "fck", "brg_int", "scif_clk"; 1265 + dmas = <&dmac1 0x13>, <&dmac1 0x12>, 1266 + <&dmac2 0x13>, <&dmac2 0x12>; 1267 + dma-names = "tx", "rx", "tx", "rx"; 1265 1268 power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; 1266 1269 resets = <&cpg 310>; 1267 1270 status = "disabled";
+3
arch/arm64/boot/dts/renesas/r8a77965.dtsi
··· 1068 1068 <&cpg CPG_CORE R8A77965_CLK_S3D1>, 1069 1069 <&scif_clk>; 1070 1070 clock-names = "fck", "brg_int", "scif_clk"; 1071 + dmas = <&dmac1 0x13>, <&dmac1 0x12>, 1072 + <&dmac2 0x13>, <&dmac2 0x12>; 1073 + dma-names = "tx", "rx", "tx", "rx"; 1071 1074 power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; 1072 1075 resets = <&cpg 310>; 1073 1076 status = "disabled";
+3 -1
arch/arm64/kernel/hibernate.c
··· 299 299 dcache_clean_range(__idmap_text_start, __idmap_text_end); 300 300 301 301 /* Clean kvm setup code to PoC? */ 302 - if (el2_reset_needed()) 302 + if (el2_reset_needed()) { 303 303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); 304 + dcache_clean_range(__hyp_text_start, __hyp_text_end); 305 + } 304 306 305 307 /* make the crash dump kernel image protected again */ 306 308 crash_post_resume();
+2
arch/arm64/kernel/hyp-stub.S
··· 28 28 #include <asm/virt.h> 29 29 30 30 .text 31 + .pushsection .hyp.text, "ax" 32 + 31 33 .align 11 32 34 33 35 ENTRY(__hyp_stub_vectors)
+1
arch/arm64/kernel/kaslr.c
··· 88 88 * we end up running with module randomization disabled. 89 89 */ 90 90 module_alloc_base = (u64)_etext - MODULES_VSIZE; 91 + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); 91 92 92 93 /* 93 94 * Try to map the FDT early. If this fails, we simply bail,
+3 -1
arch/arm64/kernel/machine_kexec_file.c
··· 120 120 { 121 121 void *buf; 122 122 size_t buf_size; 123 + size_t cmdline_len; 123 124 int ret; 124 125 126 + cmdline_len = cmdline ? strlen(cmdline) : 0; 125 127 buf_size = fdt_totalsize(initial_boot_params) 126 - + strlen(cmdline) + DTB_EXTRA_SPACE; 128 + + cmdline_len + DTB_EXTRA_SPACE; 127 129 128 130 for (;;) { 129 131 buf = vmalloc(buf_size);
+3 -3
arch/arm64/kernel/probes/kprobes.c
··· 478 478 addr < (unsigned long)__entry_text_end) || 479 479 (addr >= (unsigned long)__idmap_text_start && 480 480 addr < (unsigned long)__idmap_text_end) || 481 + (addr >= (unsigned long)__hyp_text_start && 482 + addr < (unsigned long)__hyp_text_end) || 481 483 !!search_exception_tables(addr)) 482 484 return true; 483 485 484 486 if (!is_kernel_in_hyp_mode()) { 485 - if ((addr >= (unsigned long)__hyp_text_start && 486 - addr < (unsigned long)__hyp_text_end) || 487 - (addr >= (unsigned long)__hyp_idmap_text_start && 487 + if ((addr >= (unsigned long)__hyp_idmap_text_start && 488 488 addr < (unsigned long)__hyp_idmap_text_end)) 489 489 return true; 490 490 }
+29 -30
arch/arm64/mm/dump.c
··· 286 286 287 287 } 288 288 289 - static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) 289 + static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, 290 + unsigned long end) 290 291 { 291 - pte_t *ptep = pte_offset_kernel(pmdp, 0UL); 292 - unsigned long addr; 293 - unsigned i; 292 + unsigned long addr = start; 293 + pte_t *ptep = pte_offset_kernel(pmdp, start); 294 294 295 - for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { 296 - addr = start + i * PAGE_SIZE; 295 + do { 297 296 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); 298 - } 297 + } while (ptep++, addr += PAGE_SIZE, addr != end); 299 298 } 300 299 301 - static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) 300 + static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, 301 + unsigned long end) 302 302 { 303 - pmd_t *pmdp = pmd_offset(pudp, 0UL); 304 - unsigned long addr; 305 - unsigned i; 303 + unsigned long next, addr = start; 304 + pmd_t *pmdp = pmd_offset(pudp, start); 306 305 307 - for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { 306 + do { 308 307 pmd_t pmd = READ_ONCE(*pmdp); 308 + next = pmd_addr_end(addr, end); 309 309 310 - addr = start + i * PMD_SIZE; 311 310 if (pmd_none(pmd) || pmd_sect(pmd)) { 312 311 note_page(st, addr, 3, pmd_val(pmd)); 313 312 } else { 314 313 BUG_ON(pmd_bad(pmd)); 315 - walk_pte(st, pmdp, addr); 314 + walk_pte(st, pmdp, addr, next); 316 315 } 317 - } 316 + } while (pmdp++, addr = next, addr != end); 318 317 } 319 318 320 - static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) 319 + static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, 320 + unsigned long end) 321 321 { 322 - pud_t *pudp = pud_offset(pgdp, 0UL); 323 - unsigned long addr; 324 - unsigned i; 322 + unsigned long next, addr = start; 323 + pud_t *pudp = pud_offset(pgdp, start); 325 324 326 - for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { 325 + do { 327 326 pud_t pud = READ_ONCE(*pudp); 327 + next = pud_addr_end(addr, end); 328 328 329 - addr = start + i * PUD_SIZE; 330 329 if (pud_none(pud) || pud_sect(pud)) { 331 330 note_page(st, addr, 2, pud_val(pud)); 332 331 } else { 333 332 BUG_ON(pud_bad(pud)); 334 - walk_pmd(st, pudp, addr); 333 + walk_pmd(st, pudp, addr, next); 335 334 } 336 - } 335 + } while (pudp++, addr = next, addr != end); 337 336 } 338 337 339 338 static void walk_pgd(struct pg_state *st, struct mm_struct *mm, 340 339 unsigned long start) 341 340 { 342 - pgd_t *pgdp = pgd_offset(mm, 0UL); 343 - unsigned i; 344 - unsigned long addr; 341 + unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; 342 + unsigned long next, addr = start; 343 + pgd_t *pgdp = pgd_offset(mm, start); 345 344 346 - for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { 345 + do { 347 346 pgd_t pgd = READ_ONCE(*pgdp); 347 + next = pgd_addr_end(addr, end); 348 348 349 - addr = start + i * PGDIR_SIZE; 350 349 if (pgd_none(pgd)) { 351 350 note_page(st, addr, 1, pgd_val(pgd)); 352 351 } else { 353 352 BUG_ON(pgd_bad(pgd)); 354 - walk_pud(st, pgdp, addr); 353 + walk_pud(st, pgdp, addr, next); 355 354 } 356 - } 355 + } while (pgdp++, addr = next, addr != end); 357 356 } 358 357 359 358 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
+5 -1
arch/arm64/mm/flush.c
··· 33 33 __clean_dcache_area_pou(kaddr, len); 34 34 __flush_icache_all(); 35 35 } else { 36 - flush_icache_range(addr, addr + len); 36 + /* 37 + * Don't issue kick_all_cpus_sync() after I-cache invalidation 38 + * for user mappings. 39 + */ 40 + __flush_icache_range(addr, addr + len); 37 41 } 38 42 } 39 43
+1
arch/c6x/include/asm/Kbuild
··· 30 30 generic-y += preempt.h 31 31 generic-y += segment.h 32 32 generic-y += serial.h 33 + generic-y += shmparam.h 33 34 generic-y += tlbflush.h 34 35 generic-y += topology.h 35 36 generic-y += trace_clock.h
-1
arch/c6x/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 - generic-y += shmparam.h 5 4 generic-y += ucontext.h
+1
arch/h8300/include/asm/Kbuild
··· 40 40 generic-y += scatterlist.h 41 41 generic-y += sections.h 42 42 generic-y += serial.h 43 + generic-y += shmparam.h 43 44 generic-y += sizes.h 44 45 generic-y += spinlock.h 45 46 generic-y += timex.h
-1
arch/h8300/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 - generic-y += shmparam.h 5 4 generic-y += ucontext.h
+1
arch/hexagon/include/asm/Kbuild
··· 30 30 generic-y += sections.h 31 31 generic-y += segment.h 32 32 generic-y += serial.h 33 + generic-y += shmparam.h 33 34 generic-y += sizes.h 34 35 generic-y += topology.h 35 36 generic-y += trace_clock.h
-1
arch/hexagon/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 - generic-y += shmparam.h 4 3 generic-y += ucontext.h
+7 -3
arch/m68k/emu/nfblock.c
··· 155 155 static int __init nfhd_init(void) 156 156 { 157 157 u32 blocks, bsize; 158 + int ret; 158 159 int i; 159 160 160 161 nfhd_id = nf_get_id("XHDI"); 161 162 if (!nfhd_id) 162 163 return -ENODEV; 163 164 164 - major_num = register_blkdev(major_num, "nfhd"); 165 - if (major_num <= 0) { 165 + ret = register_blkdev(major_num, "nfhd"); 166 + if (ret < 0) { 166 167 pr_warn("nfhd: unable to get major number\n"); 167 - return major_num; 168 + return ret; 168 169 } 170 + 171 + if (!major_num) 172 + major_num = ret; 169 173 170 174 for (i = NFHD_DEV_OFFSET; i < 24; i++) { 171 175 if (nfhd_get_capacity(i, 0, &blocks, &bsize))
+1
arch/m68k/include/asm/Kbuild
··· 20 20 generic-y += percpu.h 21 21 generic-y += preempt.h 22 22 generic-y += sections.h 23 + generic-y += shmparam.h 23 24 generic-y += spinlock.h 24 25 generic-y += topology.h 25 26 generic-y += trace_clock.h
-1
arch/m68k/include/uapi/asm/Kbuild
··· 2 2 3 3 generated-y += unistd_32.h 4 4 generic-y += kvm_para.h 5 - generic-y += shmparam.h
+1
arch/microblaze/include/asm/Kbuild
··· 26 26 generic-y += percpu.h 27 27 generic-y += preempt.h 28 28 generic-y += serial.h 29 + generic-y += shmparam.h 29 30 generic-y += syscalls.h 30 31 generic-y += topology.h 31 32 generic-y += trace_clock.h
-1
arch/microblaze/include/uapi/asm/Kbuild
··· 2 2 3 3 generated-y += unistd_32.h 4 4 generic-y += kvm_para.h 5 - generic-y += shmparam.h 6 5 generic-y += ucontext.h
+15
arch/mips/Kconfig
··· 1403 1403 please say 'N' here. If you want a high-performance kernel to run on 1404 1404 new Loongson 3 machines only, please say 'Y' here. 1405 1405 1406 + config CPU_LOONGSON3_WORKAROUNDS 1407 + bool "Old Loongson 3 LLSC Workarounds" 1408 + default y if SMP 1409 + depends on CPU_LOONGSON3 1410 + help 1411 + Loongson 3 processors have the llsc issues which require workarounds. 1412 + Without workarounds the system may hang unexpectedly. 1413 + 1414 + Newer Loongson 3 will fix these issues and no workarounds are needed. 1415 + The workarounds have no significant side effect on them but may 1416 + decrease the performance of the system so this option should be 1417 + disabled unless the kernel is intended to be run on old systems. 1418 + 1419 + If unsure, please say Y. 1420 + 1406 1421 config CPU_LOONGSON2E 1407 1422 bool "Loongson 2E" 1408 1423 depends on SYS_HAS_CPU_LOONGSON2E
+4 -4
arch/mips/boot/dts/ingenic/ci20.dts
··· 76 76 status = "okay"; 77 77 78 78 pinctrl-names = "default"; 79 - pinctrl-0 = <&pins_uart2>; 79 + pinctrl-0 = <&pins_uart3>; 80 80 }; 81 81 82 82 &uart4 { ··· 196 196 bias-disable; 197 197 }; 198 198 199 - pins_uart2: uart2 { 200 - function = "uart2"; 201 - groups = "uart2-data", "uart2-hwflow"; 199 + pins_uart3: uart3 { 200 + function = "uart3"; 201 + groups = "uart3-data", "uart3-hwflow"; 202 202 bias-disable; 203 203 }; 204 204
+1 -1
arch/mips/boot/dts/ingenic/jz4740.dtsi
··· 161 161 #dma-cells = <2>; 162 162 163 163 interrupt-parent = <&intc>; 164 - interrupts = <29>; 164 + interrupts = <20>; 165 165 166 166 clocks = <&cgu JZ4740_CLK_DMA>; 167 167
+4 -4
arch/mips/boot/dts/xilfpga/nexys4ddr.dts
··· 90 90 interrupts = <0>; 91 91 }; 92 92 93 - axi_i2c: i2c@10A00000 { 93 + axi_i2c: i2c@10a00000 { 94 94 compatible = "xlnx,xps-iic-2.00.a"; 95 95 interrupt-parent = <&axi_intc>; 96 96 interrupts = <4>; 97 - reg = < 0x10A00000 0x10000 >; 97 + reg = < 0x10a00000 0x10000 >; 98 98 clocks = <&ext>; 99 99 xlnx,clk-freq = <0x5f5e100>; 100 100 xlnx,family = "Artix7"; ··· 106 106 #address-cells = <1>; 107 107 #size-cells = <0>; 108 108 109 - ad7420@4B { 109 + ad7420@4b { 110 110 compatible = "adi,adt7420"; 111 - reg = <0x4B>; 111 + reg = <0x4b>; 112 112 }; 113 113 } ; 114 114 };
+6
arch/mips/include/asm/atomic.h
··· 58 58 if (kernel_uses_llsc) { \ 59 59 int temp; \ 60 60 \ 61 + loongson_llsc_mb(); \ 61 62 __asm__ __volatile__( \ 62 63 " .set push \n" \ 63 64 " .set "MIPS_ISA_LEVEL" \n" \ ··· 86 85 if (kernel_uses_llsc) { \ 87 86 int temp; \ 88 87 \ 88 + loongson_llsc_mb(); \ 89 89 __asm__ __volatile__( \ 90 90 " .set push \n" \ 91 91 " .set "MIPS_ISA_LEVEL" \n" \ ··· 120 118 if (kernel_uses_llsc) { \ 121 119 int temp; \ 122 120 \ 121 + loongson_llsc_mb(); \ 123 122 __asm__ __volatile__( \ 124 123 " .set push \n" \ 125 124 " .set "MIPS_ISA_LEVEL" \n" \ ··· 259 256 if (kernel_uses_llsc) { \ 260 257 long temp; \ 261 258 \ 259 + loongson_llsc_mb(); \ 262 260 __asm__ __volatile__( \ 263 261 " .set push \n" \ 264 262 " .set "MIPS_ISA_LEVEL" \n" \ ··· 287 283 if (kernel_uses_llsc) { \ 288 284 long temp; \ 289 285 \ 286 + loongson_llsc_mb(); \ 290 287 __asm__ __volatile__( \ 291 288 " .set push \n" \ 292 289 " .set "MIPS_ISA_LEVEL" \n" \ ··· 321 316 if (kernel_uses_llsc) { \ 322 317 long temp; \ 323 318 \ 319 + loongson_llsc_mb(); \ 324 320 __asm__ __volatile__( \ 325 321 " .set push \n" \ 326 322 " .set "MIPS_ISA_LEVEL" \n" \
+36
arch/mips/include/asm/barrier.h
··· 222 222 #define __smp_mb__before_atomic() __smp_mb__before_llsc() 223 223 #define __smp_mb__after_atomic() smp_llsc_mb() 224 224 225 + /* 226 + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, 227 + * store or pref) in between an ll & sc can cause the sc instruction to 228 + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code 229 + * containing such sequences, this bug bites harder than we might otherwise 230 + * expect due to reordering & speculation: 231 + * 232 + * 1) A memory access appearing prior to the ll in program order may actually 233 + * be executed after the ll - this is the reordering case. 234 + * 235 + * In order to avoid this we need to place a memory barrier (ie. a sync 236 + * instruction) prior to every ll instruction, in between it & any earlier 237 + * memory access instructions. Many of these cases are already covered by 238 + * smp_mb__before_llsc() but for the remaining cases, typically ones in 239 + * which multiple CPUs may operate on a memory location but ordering is not 240 + * usually guaranteed, we use loongson_llsc_mb() below. 241 + * 242 + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. 243 + * 244 + * 2) If a conditional branch exists between an ll & sc with a target outside 245 + * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() 246 + * or similar, then misprediction of the branch may allow speculative 247 + * execution of memory accesses from outside of the ll-sc loop. 248 + * 249 + * In order to avoid this we need a memory barrier (ie. a sync instruction) 250 + * at each affected branch target, for which we also use loongson_llsc_mb() 251 + * defined below. 252 + * 253 + * This case affects all current Loongson 3 CPUs. 254 + */ 255 + #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ 256 + #define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 257 + #else 258 + #define loongson_llsc_mb() do { } while (0) 259 + #endif 260 + 225 261 #include <asm-generic/barrier.h> 226 262 227 263 #endif /* __ASM_BARRIER_H */
+5
arch/mips/include/asm/bitops.h
··· 69 69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 70 70 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 71 71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 72 + loongson_llsc_mb(); 72 73 do { 73 74 __asm__ __volatile__( 74 75 " " __LL "%0, %1 # set_bit \n" ··· 80 79 } while (unlikely(!temp)); 81 80 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 82 81 } else if (kernel_uses_llsc) { 82 + loongson_llsc_mb(); 83 83 do { 84 84 __asm__ __volatile__( 85 85 " .set push \n" ··· 125 123 : "ir" (~(1UL << bit))); 126 124 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 127 125 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 126 + loongson_llsc_mb(); 128 127 do { 129 128 __asm__ __volatile__( 130 129 " " __LL "%0, %1 # clear_bit \n" ··· 136 133 } while (unlikely(!temp)); 137 134 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 138 135 } else if (kernel_uses_llsc) { 136 + loongson_llsc_mb(); 139 137 do { 140 138 __asm__ __volatile__( 141 139 " .set push \n" ··· 197 193 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 198 194 unsigned long temp; 199 195 196 + loongson_llsc_mb(); 200 197 do { 201 198 __asm__ __volatile__( 202 199 " .set push \n"
+3
arch/mips/include/asm/futex.h
··· 50 50 "i" (-EFAULT) \ 51 51 : "memory"); \ 52 52 } else if (cpu_has_llsc) { \ 53 + loongson_llsc_mb(); \ 53 54 __asm__ __volatile__( \ 54 55 " .set push \n" \ 55 56 " .set noat \n" \ ··· 164 163 "i" (-EFAULT) 165 164 : "memory"); 166 165 } else if (cpu_has_llsc) { 166 + loongson_llsc_mb(); 167 167 __asm__ __volatile__( 168 168 "# futex_atomic_cmpxchg_inatomic \n" 169 169 " .set push \n" ··· 194 192 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 195 193 "i" (-EFAULT) 196 194 : "memory"); 195 + loongson_llsc_mb(); 197 196 } else 198 197 return -ENOSYS; 199 198
+2
arch/mips/include/asm/pgtable.h
··· 228 228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 229 229 : [global] "r" (page_global)); 230 230 } else if (kernel_uses_llsc) { 231 + loongson_llsc_mb(); 231 232 __asm__ __volatile__ ( 232 233 " .set push \n" 233 234 " .set "MIPS_ISA_ARCH_LEVEL" \n" ··· 243 242 " .set pop \n" 244 243 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 245 244 : [global] "r" (page_global)); 245 + loongson_llsc_mb(); 246 246 } 247 247 #else /* !CONFIG_SMP */ 248 248 if (pte_none(*buddy))
+1 -1
arch/mips/kernel/mips-cm.c
··· 457 457 } 458 458 459 459 /* reprime cause register */ 460 - write_gcr_error_cause(0); 460 + write_gcr_error_cause(cm_error); 461 461 }
+3 -4
arch/mips/kernel/process.c
··· 371 371 static int get_frame_info(struct mips_frame_info *info) 372 372 { 373 373 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 374 - union mips_instruction insn, *ip, *ip_end; 374 + union mips_instruction insn, *ip; 375 375 const unsigned int max_insns = 128; 376 376 unsigned int last_insn_size = 0; 377 377 unsigned int i; ··· 384 384 if (!ip) 385 385 goto err; 386 386 387 - ip_end = (void *)ip + info->func_size; 388 - 389 - for (i = 0; i < max_insns && ip < ip_end; i++) { 387 + for (i = 0; i < max_insns; i++) { 390 388 ip = (void *)ip + last_insn_size; 389 + 391 390 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { 392 391 insn.word = ip->halfword[0] << 16; 393 392 last_insn_size = 2;
+23
arch/mips/loongson64/Platform
··· 23 23 endif 24 24 25 25 cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap 26 + 27 + # 28 + # Some versions of binutils, not currently mainline as of 2019/02/04, support 29 + # an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction 30 + # to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a 31 + # description). 32 + # 33 + # We disable this in order to prevent the assembler meddling with the 34 + # instruction that labels refer to, ie. if we label an ll instruction: 35 + # 36 + # 1: ll v0, 0(a0) 37 + # 38 + # ...then with the assembler fix applied the label may actually point at a sync 39 + # instruction inserted by the assembler, and if we were using the label in an 40 + # exception table the table would no longer contain the address of the ll 41 + # instruction. 42 + # 43 + # Avoid this by explicitly disabling that assembler behaviour. If upstream 44 + # binutils does not merge support for the flag then we can revisit & remove 45 + # this later - for now it ensures vendor toolchains don't cause problems. 46 + # 47 + cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) 48 + 26 49 # 27 50 # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a 28 51 # as MIPS64 R2; older versions as just R1. This leaves the possibility open
+6 -1
arch/mips/loongson64/common/reset.c
··· 59 59 { 60 60 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE 61 61 mach_prepare_shutdown(); 62 - unreachable(); 62 + 63 + /* 64 + * It needs a wait loop here, but mips/kernel/reset.c already calls 65 + * a generic delay loop, machine_hang(), so simply return. 66 + */ 67 + return; 63 68 #else 64 69 void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; 65 70
+10
arch/mips/mm/tlbex.c
··· 932 932 * to mimic that here by taking a load/istream page 933 933 * fault. 934 934 */ 935 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 936 + uasm_i_sync(p, 0); 935 937 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 936 938 uasm_i_jr(p, ptr); 937 939 ··· 1648 1646 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1649 1647 { 1650 1648 #ifdef CONFIG_SMP 1649 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 1650 + uasm_i_sync(p, 0); 1651 1651 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1652 1652 if (cpu_has_64bits) 1653 1653 uasm_i_lld(p, pte, 0, ptr); ··· 2263 2259 #endif 2264 2260 2265 2261 uasm_l_nopage_tlbl(&l, p); 2262 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2263 + uasm_i_sync(&p, 0); 2266 2264 build_restore_work_registers(&p); 2267 2265 #ifdef CONFIG_CPU_MICROMIPS 2268 2266 if ((unsigned long)tlb_do_page_fault_0 & 1) { ··· 2319 2313 #endif 2320 2314 2321 2315 uasm_l_nopage_tlbs(&l, p); 2316 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2317 + uasm_i_sync(&p, 0); 2322 2318 build_restore_work_registers(&p); 2323 2319 #ifdef CONFIG_CPU_MICROMIPS 2324 2320 if ((unsigned long)tlb_do_page_fault_1 & 1) { ··· 2376 2368 #endif 2377 2369 2378 2370 uasm_l_nopage_tlbm(&l, p); 2371 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2372 + uasm_i_sync(&p, 0); 2379 2373 build_restore_work_registers(&p); 2380 2374 #ifdef CONFIG_CPU_MICROMIPS 2381 2375 if ((unsigned long)tlb_do_page_fault_1 & 1) {
+5 -5
arch/mips/pci/pci-octeon.c
··· 568 568 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 569 569 return 0; 570 570 571 + if (!octeon_is_pci_host()) { 572 + pr_notice("Not in host mode, PCI Controller not initialized\n"); 573 + return 0; 574 + } 575 + 571 576 /* Point pcibios_map_irq() to the PCI version of it */ 572 577 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; 573 578 ··· 583 578 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL; 584 579 else 585 580 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; 586 - 587 - if (!octeon_is_pci_host()) { 588 - pr_notice("Not in host mode, PCI Controller not initialized\n"); 589 - return 0; 590 - } 591 581 592 582 /* PCI I/O and PCI MEM values */ 593 583 set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+3 -2
arch/mips/vdso/Makefile
··· 8 8 $(filter -E%,$(KBUILD_CFLAGS)) \ 9 9 $(filter -mmicromips,$(KBUILD_CFLAGS)) \ 10 10 $(filter -march=%,$(KBUILD_CFLAGS)) \ 11 + $(filter -m%-float,$(KBUILD_CFLAGS)) \ 11 12 -D__VDSO__ 12 13 13 14 ifdef CONFIG_CC_IS_CLANG ··· 130 129 $(call cmd,force_checksrc) 131 130 $(call if_changed_rule,cc_o_c) 132 131 133 - $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 132 + $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 134 133 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE 135 134 $(call if_changed_dep,cpp_lds_S) 136 135 ··· 170 169 $(call cmd,force_checksrc) 171 170 $(call if_changed_rule,cc_o_c) 172 171 173 - $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 172 + $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 174 173 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE 175 174 $(call if_changed_dep,cpp_lds_S) 176 175
+1
arch/openrisc/include/asm/Kbuild
··· 34 34 generic-y += qrwlock.h 35 35 generic-y += sections.h 36 36 generic-y += segment.h 37 + generic-y += shmparam.h 37 38 generic-y += string.h 38 39 generic-y += switch_to.h 39 40 generic-y += topology.h
-1
arch/openrisc/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 - generic-y += shmparam.h 5 4 generic-y += ucontext.h
+7 -15
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 1258 1258 1259 1259 #define pmd_move_must_withdraw pmd_move_must_withdraw 1260 1260 struct spinlock; 1261 - static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 1262 - struct spinlock *old_pmd_ptl, 1263 - struct vm_area_struct *vma) 1264 - { 1265 - if (radix_enabled()) 1266 - return false; 1267 - /* 1268 - * Archs like ppc64 use pgtable to store per pmd 1269 - * specific information. So when we switch the pmd, 1270 - * we should also withdraw and deposit the pgtable 1271 - */ 1272 - return true; 1273 - } 1274 - 1275 - 1261 + extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 1262 + struct spinlock *old_pmd_ptl, 1263 + struct vm_area_struct *vma); 1264 + /* 1265 + * Hash translation mode use the deposited table to store hash pte 1266 + * slot information. 1267 + */ 1276 1268 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit 1277 1269 static inline bool arch_needs_pgtable_deposit(void) 1278 1270 {
+22
arch/powerpc/mm/pgtable-book3s64.c
··· 400 400 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); 401 401 } 402 402 #endif /* CONFIG_PROC_FS */ 403 + 404 + /* 405 + * For hash translation mode, we use the deposited table to store hash slot 406 + * information and they are stored at PTRS_PER_PMD offset from related pmd 407 + * location. Hence a pmd move requires deposit and withdraw. 408 + * 409 + * For radix translation with split pmd ptl, we store the deposited table in the 410 + * pmd page. Hence if we have different pmd page we need to withdraw during pmd 411 + * move. 412 + * 413 + * With hash we use deposited table always irrespective of anon or not. 414 + * With radix we use deposited table only for anonymous mapping. 415 + */ 416 + int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 417 + struct spinlock *old_pmd_ptl, 418 + struct vm_area_struct *vma) 419 + { 420 + if (radix_enabled()) 421 + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 422 + 423 + return true; 424 + }
+4 -1
arch/powerpc/platforms/pseries/papr_scm.c
··· 43 43 { 44 44 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 45 45 uint64_t rc, token; 46 + uint64_t saved = 0; 46 47 47 48 /* 48 49 * When the hypervisor cannot map all the requested memory in a single ··· 57 56 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, 58 57 p->blocks, BIND_ANY_ADDR, token); 59 58 token = ret[0]; 59 + if (!saved) 60 + saved = ret[1]; 60 61 cond_resched(); 61 62 } while (rc == H_BUSY); 62 63 ··· 67 64 return -ENXIO; 68 65 } 69 66 70 - p->bound_addr = ret[1]; 67 + p->bound_addr = saved; 71 68 72 69 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); 73 70
+1 -1
arch/riscv/Kconfig
··· 103 103 prompt "Base ISA" 104 104 default ARCH_RV64I 105 105 help 106 - This selects the base ISA that this kernel will traget and must match 106 + This selects the base ISA that this kernel will target and must match 107 107 the target platform. 108 108 109 109 config ARCH_RV32I
+5 -3
arch/riscv/configs/defconfig
··· 13 13 CONFIG_EXPERT=y 14 14 CONFIG_BPF_SYSCALL=y 15 15 CONFIG_SMP=y 16 - CONFIG_PCI=y 17 - CONFIG_PCIE_XILINX=y 18 16 CONFIG_MODULES=y 19 17 CONFIG_MODULE_UNLOAD=y 20 18 CONFIG_NET=y ··· 26 28 CONFIG_IP_PNP_BOOTP=y 27 29 CONFIG_IP_PNP_RARP=y 28 30 CONFIG_NETLINK_DIAG=y 31 + CONFIG_PCI=y 32 + CONFIG_PCIEPORTBUS=y 33 + CONFIG_PCI_HOST_GENERIC=y 34 + CONFIG_PCIE_XILINX=y 29 35 CONFIG_DEVTMPFS=y 30 36 CONFIG_BLK_DEV_LOOP=y 31 37 CONFIG_VIRTIO_BLK=y ··· 65 63 CONFIG_USB_UAS=y 66 64 CONFIG_VIRTIO_MMIO=y 67 65 CONFIG_SIFIVE_PLIC=y 68 - CONFIG_RAS=y 69 66 CONFIG_EXT4_FS=y 70 67 CONFIG_EXT4_FS_POSIX_ACL=y 71 68 CONFIG_AUTOFS4_FS=y ··· 78 77 CONFIG_NFS_V4_2=y 79 78 CONFIG_ROOT_NFS=y 80 79 CONFIG_CRYPTO_USER_API_HASH=y 80 + CONFIG_CRYPTO_DEV_VIRTIO=y 81 81 CONFIG_PRINTK_TIME=y 82 82 # CONFIG_RCU_TRACE is not set
+1 -1
arch/riscv/include/asm/page.h
··· 80 80 #define __pgd(x) ((pgd_t) { (x) }) 81 81 #define __pgprot(x) ((pgprot_t) { (x) }) 82 82 83 - #ifdef CONFIG_64BITS 83 + #ifdef CONFIG_64BIT 84 84 #define PTE_FMT "%016lx" 85 85 #else 86 86 #define PTE_FMT "%08lx"
+1 -1
arch/riscv/include/asm/processor.h
··· 22 22 * This decides where the kernel will search for a free chunk of vm 23 23 * space during mmap's. 24 24 */ 25 - #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) 25 + #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) 26 26 27 27 #define STACK_TOP TASK_SIZE 28 28 #define STACK_TOP_MAX STACK_TOP
+1
arch/riscv/kernel/asm-offsets.c
··· 39 39 OFFSET(TASK_STACK, task_struct, stack); 40 40 OFFSET(TASK_TI, task_struct, thread_info); 41 41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 42 + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); 42 43 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 43 44 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 44 45 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
+17 -1
arch/riscv/kernel/entry.S
··· 144 144 REG_L x2, PT_SP(sp) 145 145 .endm 146 146 147 + #if !IS_ENABLED(CONFIG_PREEMPT) 148 + .set resume_kernel, restore_all 149 + #endif 150 + 147 151 ENTRY(handle_exception) 148 152 SAVE_ALL 149 153 ··· 232 228 REG_L s0, PT_SSTATUS(sp) 233 229 csrc sstatus, SR_SIE 234 230 andi s0, s0, SR_SPP 235 - bnez s0, restore_all 231 + bnez s0, resume_kernel 236 232 237 233 resume_userspace: 238 234 /* Interrupts must be disabled here so flags are checked atomically */ ··· 253 249 restore_all: 254 250 RESTORE_ALL 255 251 sret 252 + 253 + #if IS_ENABLED(CONFIG_PREEMPT) 254 + resume_kernel: 255 + REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 256 + bnez s0, restore_all 257 + need_resched: 258 + REG_L s0, TASK_TI_FLAGS(tp) 259 + andi s0, s0, _TIF_NEED_RESCHED 260 + beqz s0, restore_all 261 + call preempt_schedule_irq 262 + j need_resched 263 + #endif 256 264 257 265 work_pending: 258 266 /* Enter slow path for supplementary processing */
+1 -1
arch/riscv/kernel/setup.c
··· 181 181 BUG_ON(mem_size == 0); 182 182 183 183 set_max_mapnr(PFN_DOWN(mem_size)); 184 - max_low_pfn = memblock_end_of_DRAM(); 184 + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); 185 185 186 186 #ifdef CONFIG_BLK_DEV_INITRD 187 187 setup_initrd();
+1 -5
arch/riscv/kernel/smpboot.c
··· 57 57 58 58 while ((dn = of_find_node_by_type(dn, "cpu"))) { 59 59 hart = riscv_of_processor_hartid(dn); 60 - if (hart < 0) { 61 - of_node_put(dn); 60 + if (hart < 0) 62 61 continue; 63 - } 64 62 65 63 if (hart == cpuid_to_hartid_map(0)) { 66 64 BUG_ON(found_boot_cpu); 67 65 found_boot_cpu = 1; 68 - of_node_put(dn); 69 66 continue; 70 67 } 71 68 ··· 70 73 set_cpu_possible(cpuid, true); 71 74 set_cpu_present(cpuid, true); 72 75 cpuid++; 73 - of_node_put(dn); 74 76 } 75 77 76 78 BUG_ON(!found_boot_cpu);
+2 -1
arch/riscv/mm/init.c
··· 28 28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 29 29 30 30 #ifdef CONFIG_ZONE_DMA32 31 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); 31 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, 32 + (unsigned long) PFN_PHYS(max_low_pfn))); 32 33 #endif 33 34 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 34 35
+1
arch/unicore32/include/asm/Kbuild
··· 28 28 generic-y += sections.h 29 29 generic-y += segment.h 30 30 generic-y += serial.h 31 + generic-y += shmparam.h 31 32 generic-y += sizes.h 32 33 generic-y += syscalls.h 33 34 generic-y += topology.h
-1
arch/unicore32/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 - generic-y += shmparam.h 5 4 generic-y += ucontext.h
+3 -3
arch/x86/Kconfig
··· 446 446 branches. Requires a compiler with -mindirect-branch=thunk-extern 447 447 support for full protection. The kernel may run slower. 448 448 449 - config X86_RESCTRL 450 - bool "Resource Control support" 449 + config X86_CPU_RESCTRL 450 + bool "x86 CPU resource control support" 451 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 452 452 select KERNFS 453 453 help 454 - Enable Resource Control support. 454 + Enable x86 CPU resource control support. 455 455 456 456 Provide support for the allocation and monitoring of system resources 457 457 usage by the CPU.
+10
arch/x86/boot/compressed/head_64.S
··· 600 600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 601 601 movl %eax, %cr3 602 602 3: 603 + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ 604 + pushl %ecx 605 + pushl %edx 606 + movl $MSR_EFER, %ecx 607 + rdmsr 608 + btsl $_EFER_LME, %eax 609 + wrmsr 610 + popl %edx 611 + popl %ecx 612 + 603 613 /* Enable PAE and LA57 (if required) paging modes */ 604 614 movl $X86_CR4_PAE, %eax 605 615 cmpl $0, %edx
+1 -1
arch/x86/boot/compressed/pgtable.h
··· 6 6 #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 7 7 8 8 #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE 9 - #define TRAMPOLINE_32BIT_CODE_SIZE 0x60 9 + #define TRAMPOLINE_32BIT_CODE_SIZE 0x70 10 10 11 11 #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE 12 12
+11 -5
arch/x86/events/intel/core.c
··· 3559 3559 3560 3560 static void intel_pmu_cpu_dying(int cpu) 3561 3561 { 3562 + fini_debug_store_on_cpu(cpu); 3563 + 3564 + if (x86_pmu.counter_freezing) 3565 + disable_counter_freeze(); 3566 + } 3567 + 3568 + static void intel_pmu_cpu_dead(int cpu) 3569 + { 3562 3570 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 3563 3571 struct intel_shared_regs *pc; 3564 3572 ··· 3578 3570 } 3579 3571 3580 3572 free_excl_cntrs(cpu); 3581 - 3582 - fini_debug_store_on_cpu(cpu); 3583 - 3584 - if (x86_pmu.counter_freezing) 3585 - disable_counter_freeze(); 3586 3573 } 3587 3574 3588 3575 static void intel_pmu_sched_task(struct perf_event_context *ctx, ··· 3666 3663 .cpu_prepare = intel_pmu_cpu_prepare, 3667 3664 .cpu_starting = intel_pmu_cpu_starting, 3668 3665 .cpu_dying = intel_pmu_cpu_dying, 3666 + .cpu_dead = intel_pmu_cpu_dead, 3669 3667 }; 3670 3668 3671 3669 static struct attribute *intel_pmu_attrs[]; ··· 3707 3703 .cpu_prepare = intel_pmu_cpu_prepare, 3708 3704 .cpu_starting = intel_pmu_cpu_starting, 3709 3705 .cpu_dying = intel_pmu_cpu_dying, 3706 + .cpu_dead = intel_pmu_cpu_dead, 3707 + 3710 3708 .guest_get_msrs = intel_guest_get_msrs, 3711 3709 .sched_task = intel_pmu_sched_task, 3712 3710 };
+3 -1
arch/x86/events/intel/uncore_snbep.c
··· 1222 1222 .id_table = snbep_uncore_pci_ids, 1223 1223 }; 1224 1224 1225 + #define NODE_ID_MASK 0x7 1226 + 1225 1227 /* 1226 1228 * build pci bus to socket mapping 1227 1229 */ ··· 1245 1243 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); 1246 1244 if (err) 1247 1245 break; 1248 - nodeid = config; 1246 + nodeid = config & NODE_ID_MASK; 1249 1247 /* get the Node ID mapping */ 1250 1248 err = pci_read_config_dword(ubox_dev, idmap_loc, &config); 1251 1249 if (err)
+2 -1
arch/x86/include/asm/intel-family.h
··· 6 6 * "Big Core" Processors (Branded as Core, Xeon, etc...) 7 7 * 8 8 * The "_X" parts are generally the EP and EX Xeons, or the 9 - * "Extreme" ones, like Broadwell-E. 9 + * "Extreme" ones, like Broadwell-E, or Atom microserver. 10 10 * 11 11 * While adding a new CPUID for a new microarchitecture, add a new 12 12 * group to keep logically sorted out in chronological order. Within ··· 71 71 #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 72 72 #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ 73 73 #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 74 + #define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ 74 75 75 76 /* Xeon Phi */ 76 77
+4
arch/x86/include/asm/page_64_types.h
··· 7 7 #endif 8 8 9 9 #ifdef CONFIG_KASAN 10 + #ifdef CONFIG_KASAN_EXTRA 11 + #define KASAN_STACK_ORDER 2 12 + #else 10 13 #define KASAN_STACK_ORDER 1 14 + #endif 11 15 #else 12 16 #define KASAN_STACK_ORDER 0 13 17 #endif
+1 -1
arch/x86/include/asm/pgtable.h
··· 1065 1065 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1066 1066 pmd_t *pmdp, pmd_t pmd) 1067 1067 { 1068 - native_set_pmd(pmdp, pmd); 1068 + set_pmd(pmdp, pmd); 1069 1069 } 1070 1070 1071 1071 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+2 -2
arch/x86/include/asm/resctrl_sched.h
··· 2 2 #ifndef _ASM_X86_RESCTRL_SCHED_H 3 3 #define _ASM_X86_RESCTRL_SCHED_H 4 4 5 - #ifdef CONFIG_X86_RESCTRL 5 + #ifdef CONFIG_X86_CPU_RESCTRL 6 6 7 7 #include <linux/sched.h> 8 8 #include <linux/jump_label.h> ··· 88 88 89 89 static inline void resctrl_sched_in(void) {} 90 90 91 - #endif /* CONFIG_X86_RESCTRL */ 91 + #endif /* CONFIG_X86_CPU_RESCTRL */ 92 92 93 93 #endif /* _ASM_X86_RESCTRL_SCHED_H */
+1 -1
arch/x86/kernel/cpu/Makefile
··· 39 39 obj-$(CONFIG_X86_MCE) += mce/ 40 40 obj-$(CONFIG_MTRR) += mtrr/ 41 41 obj-$(CONFIG_MICROCODE) += microcode/ 42 - obj-$(CONFIG_X86_RESCTRL) += resctrl/ 42 + obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/ 43 43 44 44 obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 45 45
+1 -1
arch/x86/kernel/cpu/bugs.c
··· 71 71 * identify_boot_cpu() initialized SMT support information, let the 72 72 * core code know. 73 73 */ 74 - cpu_smt_check_topology_early(); 74 + cpu_smt_check_topology(); 75 75 76 76 if (!IS_ENABLED(CONFIG_SMP)) { 77 77 pr_info("CPU: ");
+1
arch/x86/kernel/cpu/mce/core.c
··· 784 784 quirk_no_way_out(i, m, regs); 785 785 786 786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 787 + m->bank = i; 787 788 mce_read_aux(m, i); 788 789 *msg = tmp; 789 790 return 1;
+1 -1
arch/x86/kernel/cpu/microcode/amd.c
··· 855 855 if (!p) { 856 856 return ret; 857 857 } else { 858 - if (boot_cpu_data.microcode == p->patch_id) 858 + if (boot_cpu_data.microcode >= p->patch_id) 859 859 return ret; 860 860 861 861 ret = UCODE_NEW;
+2 -2
arch/x86/kernel/cpu/resctrl/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o 3 - obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o 2 + obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o 3 + obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o 4 4 CFLAGS_pseudo_lock.o = -I$(src)
+3
arch/x86/kernel/kexec-bzimage64.c
··· 167 167 struct efi_info *current_ei = &boot_params.efi_info; 168 168 struct efi_info *ei = &params->efi_info; 169 169 170 + if (!efi_enabled(EFI_RUNTIME_SERVICES)) 171 + return 0; 172 + 170 173 if (!current_ei->efi_memmap_size) 171 174 return 0; 172 175
+1
arch/x86/kvm/vmx/nested.c
··· 211 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 212 212 return; 213 213 214 + hrtimer_cancel(&vmx->nested.preemption_timer); 214 215 vmx->nested.vmxon = false; 215 216 vmx->nested.smm.vmxon = false; 216 217 free_vpid(vmx->nested.vpid02);
+2 -1
arch/x86/kvm/vmx/vmx.c
··· 26 26 #include <linux/mod_devicetable.h> 27 27 #include <linux/mm.h> 28 28 #include <linux/sched.h> 29 + #include <linux/sched/smt.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/tboot.h> 31 32 #include <linux/trace_events.h> ··· 6824 6823 * Warn upon starting the first VM in a potentially 6825 6824 * insecure environment. 6826 6825 */ 6827 - if (cpu_smt_control == CPU_SMT_ENABLED) 6826 + if (sched_smt_active()) 6828 6827 pr_warn_once(L1TF_MSG_SMT); 6829 6828 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6830 6829 pr_warn_once(L1TF_MSG_L1D);
+7
arch/x86/kvm/x86.c
··· 5116 5116 { 5117 5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5118 5118 5119 + /* 5120 + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 5121 + * is returned, but our callers are not ready for that and they blindly 5122 + * call kvm_inject_page_fault. Ensure that they at least do not leak 5123 + * uninitialized kernel stack memory into cr2 and error code. 5124 + */ 5125 + memset(exception, 0, sizeof(*exception)); 5119 5126 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 5120 5127 exception); 5121 5128 }
+30 -3
arch/x86/lib/iomem.c
··· 2 2 #include <linux/module.h> 3 3 #include <linux/io.h> 4 4 5 + #define movs(type,to,from) \ 6 + asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory") 7 + 5 8 /* Originally from i386/string.h */ 6 - static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) 9 + static __always_inline void rep_movs(void *to, const void *from, size_t n) 7 10 { 8 11 unsigned long d0, d1, d2; 9 12 asm volatile("rep ; movsl\n\t" ··· 24 21 25 22 void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) 26 23 { 27 - __iomem_memcpy(to, (const void *)from, n); 24 + if (unlikely(!n)) 25 + return; 26 + 27 + /* Align any unaligned source IO */ 28 + if (unlikely(1 & (unsigned long)from)) { 29 + movs("b", to, from); 30 + n--; 31 + } 32 + if (n > 1 && unlikely(2 & (unsigned long)from)) { 33 + movs("w", to, from); 34 + n-=2; 35 + } 36 + rep_movs(to, (const void *)from, n); 28 37 } 29 38 EXPORT_SYMBOL(memcpy_fromio); 30 39 31 40 void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) 32 41 { 33 - __iomem_memcpy((void *)to, (const void *) from, n); 42 + if (unlikely(!n)) 43 + return; 44 + 45 + /* Align any unaligned destination IO */ 46 + if (unlikely(1 & (unsigned long)to)) { 47 + movs("b", to, from); 48 + n--; 49 + } 50 + if (n > 1 && unlikely(2 & (unsigned long)to)) { 51 + movs("w", to, from); 52 + n-=2; 53 + } 54 + rep_movs((void *)to, (const void *) from, n); 34 55 } 35 56 EXPORT_SYMBOL(memcpy_toio); 36 57
+1 -1
arch/x86/mm/fault.c
··· 595 595 return; 596 596 } 597 597 598 - addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); 598 + addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); 599 599 #ifdef CONFIG_X86_64 600 600 addr |= ((u64)desc.base3 << 32); 601 601 #endif
+25 -25
arch/x86/mm/pageattr.c
··· 230 230 231 231 #endif 232 232 233 + /* 234 + * See set_mce_nospec(). 235 + * 236 + * Machine check recovery code needs to change cache mode of poisoned pages to 237 + * UC to avoid speculative access logging another error. But passing the 238 + * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a 239 + * speculative access. So we cheat and flip the top bit of the address. This 240 + * works fine for the code that updates the page tables. But at the end of the 241 + * process we need to flush the TLB and cache and the non-canonical address 242 + * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. 243 + * 244 + * But in the common case we already have a canonical address. This code 245 + * will fix the top bit if needed and is a no-op otherwise. 246 + */ 247 + static inline unsigned long fix_addr(unsigned long addr) 248 + { 249 + #ifdef CONFIG_X86_64 250 + return (long)(addr << 1) >> 1; 251 + #else 252 + return addr; 253 + #endif 254 + } 255 + 233 256 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 234 257 { 235 258 if (cpa->flags & CPA_PAGES_ARRAY) { ··· 336 313 unsigned int i; 337 314 338 315 for (i = 0; i < cpa->numpages; i++) 339 - __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 316 + __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); 340 317 } 341 318 342 319 static void cpa_flush(struct cpa_data *data, int cache) ··· 370 347 * Only flush present addresses: 371 348 */ 372 349 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 373 - clflush_cache_range_opt((void *)addr, PAGE_SIZE); 350 + clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); 374 351 } 375 352 mb(); 376 353 } ··· 1649 1626 cpa->numpages = numpages; 1650 1627 return ret; 1651 1628 } 1652 - 1653 - /* 1654 - * Machine check recovery code needs to change cache mode of poisoned 1655 - * pages to UC to avoid speculative access logging another error. But 1656 - * passing the address of the 1:1 mapping to set_memory_uc() is a fine 1657 - * way to encourage a speculative access. So we cheat and flip the top 1658 - * bit of the address. This works fine for the code that updates the 1659 - * page tables. But at the end of the process we need to flush the cache 1660 - * and the non-canonical address causes a #GP fault when used by the 1661 - * CLFLUSH instruction. 1662 - * 1663 - * But in the common case we already have a canonical address. This code 1664 - * will fix the top bit if needed and is a no-op otherwise. 1665 - */ 1666 - static inline unsigned long make_addr_canonical_again(unsigned long addr) 1667 - { 1668 - #ifdef CONFIG_X86_64 1669 - return (long)(addr << 1) >> 1; 1670 - #else 1671 - return addr; 1672 - #endif 1673 - } 1674 - 1675 1629 1676 1630 static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1677 1631 pgprot_t mask_set, pgprot_t mask_clr,
+2 -2
arch/xtensa/Kconfig
··· 164 164 If unsure, say N. 165 165 166 166 config XTENSA_UNALIGNED_USER 167 - bool "Unaligned memory access in use space" 167 + bool "Unaligned memory access in user space" 168 168 help 169 169 The Xtensa architecture currently does not handle unaligned 170 170 memory accesses in hardware but through an exception handler. ··· 451 451 help 452 452 Include support for flattened device tree machine descriptions. 453 453 454 - config BUILTIN_DTB 454 + config BUILTIN_DTB_SOURCE 455 455 string "DTB to build into the kernel image" 456 456 depends on OF 457 457
+3 -3
arch/xtensa/boot/dts/Makefile
··· 7 7 # 8 8 # 9 9 10 - BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o 11 - ifneq ($(CONFIG_BUILTIN_DTB),"") 12 - obj-$(CONFIG_OF) += $(BUILTIN_DTB) 10 + BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o 11 + ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") 12 + obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE) 13 13 endif 14 14 15 15 # for CONFIG_OF_ALL_DTBS test
+1 -1
arch/xtensa/configs/audio_kc705_defconfig
··· 34 34 CONFIG_CMDLINE_BOOL=y 35 35 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 36 36 CONFIG_USE_OF=y 37 - CONFIG_BUILTIN_DTB="kc705" 37 + CONFIG_BUILTIN_DTB_SOURCE="kc705" 38 38 # CONFIG_COMPACTION is not set 39 39 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 40 40 CONFIG_PM=y
+1 -1
arch/xtensa/configs/cadence_csp_defconfig
··· 38 38 # CONFIG_PCI is not set 39 39 CONFIG_XTENSA_PLATFORM_XTFPGA=y 40 40 CONFIG_USE_OF=y 41 - CONFIG_BUILTIN_DTB="csp" 41 + CONFIG_BUILTIN_DTB_SOURCE="csp" 42 42 # CONFIG_COMPACTION is not set 43 43 CONFIG_XTFPGA_LCD=y 44 44 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+1 -1
arch/xtensa/configs/generic_kc705_defconfig
··· 33 33 CONFIG_CMDLINE_BOOL=y 34 34 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 35 35 CONFIG_USE_OF=y 36 - CONFIG_BUILTIN_DTB="kc705" 36 + CONFIG_BUILTIN_DTB_SOURCE="kc705" 37 37 # CONFIG_COMPACTION is not set 38 38 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 39 39 CONFIG_NET=y
+1 -1
arch/xtensa/configs/nommu_kc705_defconfig
··· 39 39 CONFIG_CMDLINE_BOOL=y 40 40 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" 41 41 CONFIG_USE_OF=y 42 - CONFIG_BUILTIN_DTB="kc705_nommu" 42 + CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu" 43 43 CONFIG_BINFMT_FLAT=y 44 44 CONFIG_NET=y 45 45 CONFIG_PACKET=y
+2 -1
arch/xtensa/configs/smp_lx200_defconfig
··· 33 33 CONFIG_HOTPLUG_CPU=y 34 34 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set 35 35 # CONFIG_PCI is not set 36 + CONFIG_VECTORS_OFFSET=0x00002000 36 37 CONFIG_XTENSA_PLATFORM_XTFPGA=y 37 38 CONFIG_CMDLINE_BOOL=y 38 39 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" 39 40 CONFIG_USE_OF=y 40 - CONFIG_BUILTIN_DTB="lx200mx" 41 + CONFIG_BUILTIN_DTB_SOURCE="lx200mx" 41 42 # CONFIG_COMPACTION is not set 42 43 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 43 44 CONFIG_NET=y
+4 -1
arch/xtensa/kernel/head.S
··· 276 276 277 277 movi a2, cpu_start_ccount 278 278 1: 279 + memw 279 280 l32i a3, a2, 0 280 281 beqi a3, 0, 1b 281 282 movi a3, 0 282 283 s32i a3, a2, 0 283 - memw 284 284 1: 285 + memw 285 286 l32i a3, a2, 0 286 287 beqi a3, 0, 1b 287 288 wsr a3, ccount ··· 318 317 rsr a0, prid 319 318 neg a2, a0 320 319 movi a3, cpu_start_id 320 + memw 321 321 s32i a2, a3, 0 322 322 #if XCHAL_DCACHE_IS_WRITEBACK 323 323 dhwbi a3, 0 324 324 #endif 325 325 1: 326 + memw 326 327 l32i a2, a3, 0 327 328 dhi a3, 0 328 329 bne a2, a0, 1b
+27 -14
arch/xtensa/kernel/smp.c
··· 83 83 { 84 84 unsigned i; 85 85 86 - for (i = 0; i < max_cpus; ++i) 86 + for_each_possible_cpu(i) 87 87 set_cpu_present(i, true); 88 88 } 89 89 ··· 95 95 96 96 pr_info("%s: Core Count = %d\n", __func__, ncpus); 97 97 pr_info("%s: Core Id = %d\n", __func__, core_id); 98 + 99 + if (ncpus > NR_CPUS) { 100 + ncpus = NR_CPUS; 101 + pr_info("%s: limiting core count by %d\n", __func__, ncpus); 102 + } 98 103 99 104 for (i = 0; i < ncpus; ++i) 100 105 set_cpu_possible(i, true); ··· 200 195 int i; 201 196 202 197 #ifdef CONFIG_HOTPLUG_CPU 203 - cpu_start_id = cpu; 204 - system_flush_invalidate_dcache_range( 205 - (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 198 + WRITE_ONCE(cpu_start_id, cpu); 199 + /* Pairs with the third memw in the cpu_restart */ 200 + mb(); 201 + system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, 202 + sizeof(cpu_start_id)); 206 203 #endif 207 204 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 208 205 ··· 213 206 ccount = get_ccount(); 214 207 while (!ccount); 215 208 216 - cpu_start_ccount = ccount; 209 + WRITE_ONCE(cpu_start_ccount, ccount); 217 210 218 - while (time_before(jiffies, timeout)) { 211 + do { 212 + /* 213 + * Pairs with the first two memws in the 214 + * .Lboot_secondary. 215 + */ 219 216 mb(); 220 - if (!cpu_start_ccount) 221 - break; 222 - } 217 + ccount = READ_ONCE(cpu_start_ccount); 218 + } while (ccount && time_before(jiffies, timeout)); 223 219 224 - if (cpu_start_ccount) { 220 + if (ccount) { 225 221 smp_call_function_single(0, mx_cpu_stop, 226 - (void *)cpu, 1); 227 - cpu_start_ccount = 0; 222 + (void *)cpu, 1); 223 + WRITE_ONCE(cpu_start_ccount, 0); 228 224 return -EIO; 229 225 } 230 226 } ··· 247 237 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", 248 238 __func__, cpu, idle, start_info.stack); 249 239 240 + init_completion(&cpu_running); 250 241 ret = boot_secondary(cpu, idle); 251 242 if (ret == 0) { 252 243 wait_for_completion_timeout(&cpu_running, ··· 309 298 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 310 299 while (time_before(jiffies, timeout)) { 311 300 system_invalidate_dcache_range((unsigned long)&cpu_start_id, 312 - sizeof(cpu_start_id)); 313 - if (cpu_start_id == -cpu) { 301 + sizeof(cpu_start_id)); 302 + /* Pairs with the second memw in the cpu_restart */ 303 + mb(); 304 + if (READ_ONCE(cpu_start_id) == -cpu) { 314 305 platform_cpu_kill(cpu); 315 306 return; 316 307 }
+1 -1
arch/xtensa/kernel/time.c
··· 89 89 container_of(evt, struct ccount_timer, evt); 90 90 91 91 if (timer->irq_enabled) { 92 - disable_irq(evt->irq); 92 + disable_irq_nosync(evt->irq); 93 93 timer->irq_enabled = 0; 94 94 } 95 95 return 0;
+5 -1
block/blk-core.c
··· 462 462 kblockd_schedule_work(&q->timeout_work); 463 463 } 464 464 465 + static void blk_timeout_work(struct work_struct *work) 466 + { 467 + } 468 + 465 469 /** 466 470 * blk_alloc_queue_node - allocate a request queue 467 471 * @gfp_mask: memory allocation flags ··· 509 505 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 510 506 laptop_mode_timer_fn, 0); 511 507 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 512 - INIT_WORK(&q->timeout_work, NULL); 508 + INIT_WORK(&q->timeout_work, blk_timeout_work); 513 509 INIT_LIST_HEAD(&q->icq_list); 514 510 #ifdef CONFIG_BLK_CGROUP 515 511 INIT_LIST_HEAD(&q->blkg_list);
+1 -1
block/blk-flush.c
··· 335 335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 336 336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 337 337 338 - blk_mq_run_hw_queue(hctx, true); 338 + blk_mq_sched_restart(hctx); 339 339 } 340 340 341 341 /**
+48 -8
block/blk-iolatency.c
··· 72 72 #include <linux/sched/loadavg.h> 73 73 #include <linux/sched/signal.h> 74 74 #include <trace/events/block.h> 75 + #include <linux/blk-mq.h> 75 76 #include "blk-rq-qos.h" 76 77 #include "blk-stat.h" 77 78 ··· 592 591 u64 now = ktime_to_ns(ktime_get()); 593 592 bool issue_as_root = bio_issue_as_root_blkg(bio); 594 593 bool enabled = false; 594 + int inflight = 0; 595 595 596 596 blkg = bio->bi_blkg; 597 597 if (!blkg || !bio_flagged(bio, BIO_TRACKED)) ··· 603 601 return; 604 602 605 603 enabled = blk_iolatency_enabled(iolat->blkiolat); 604 + if (!enabled) 605 + return; 606 + 606 607 while (blkg && blkg->parent) { 607 608 iolat = blkg_to_lat(blkg); 608 609 if (!iolat) { ··· 614 609 } 615 610 rqw = &iolat->rq_wait; 616 611 617 - atomic_dec(&rqw->inflight); 618 - if (!enabled || iolat->min_lat_nsec == 0) 612 + inflight = atomic_dec_return(&rqw->inflight); 613 + WARN_ON_ONCE(inflight < 0); 614 + if (iolat->min_lat_nsec == 0) 619 615 goto next; 620 616 iolatency_record_time(iolat, &bio->bi_issue, now, 621 617 issue_as_root); ··· 760 754 return 0; 761 755 } 762 756 763 - static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) 757 + /* 758 + * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise 759 + * return 0. 760 + */ 761 + static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) 764 762 { 765 763 struct iolatency_grp *iolat = blkg_to_lat(blkg); 766 - struct blk_iolatency *blkiolat = iolat->blkiolat; 767 764 u64 oldval = iolat->min_lat_nsec; 768 765 769 766 iolat->min_lat_nsec = val; ··· 775 766 BLKIOLATENCY_MAX_WIN_SIZE); 776 767 777 768 if (!oldval && val) 778 - atomic_inc(&blkiolat->enabled); 769 + return 1; 779 770 if (oldval && !val) 780 - atomic_dec(&blkiolat->enabled); 771 + return -1; 772 + return 0; 781 773 } 782 774 783 775 static void iolatency_clear_scaling(struct blkcg_gq *blkg) ··· 810 800 u64 lat_val = 0; 811 801 u64 oldval; 812 802 int ret; 803 + int enable = 0; 813 804 814 805 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); 815 806 if (ret) ··· 845 834 blkg = ctx.blkg; 846 835 oldval = iolat->min_lat_nsec; 847 836 848 - iolatency_set_min_lat_nsec(blkg, lat_val); 837 + enable = iolatency_set_min_lat_nsec(blkg, lat_val); 838 + if (enable) { 839 + WARN_ON_ONCE(!blk_get_queue(blkg->q)); 840 + blkg_get(blkg); 841 + } 842 + 849 843 if (oldval != iolat->min_lat_nsec) { 850 844 iolatency_clear_scaling(blkg); 851 845 } ··· 858 842 ret = 0; 859 843 out: 860 844 blkg_conf_finish(&ctx); 845 + if (ret == 0 && enable) { 846 + struct iolatency_grp *tmp = blkg_to_lat(blkg); 847 + struct blk_iolatency *blkiolat = tmp->blkiolat; 848 + 849 + blk_mq_freeze_queue(blkg->q); 850 + 851 + if (enable == 1) 852 + atomic_inc(&blkiolat->enabled); 853 + else if (enable == -1) 854 + atomic_dec(&blkiolat->enabled); 855 + else 856 + WARN_ON_ONCE(1); 857 + 858 + blk_mq_unfreeze_queue(blkg->q); 859 + 860 + blkg_put(blkg); 861 + blk_put_queue(blkg->q); 862 + } 861 863 return ret ?: nbytes; 862 864 } 863 865 ··· 1011 977 { 1012 978 struct iolatency_grp *iolat = pd_to_lat(pd); 1013 979 struct blkcg_gq *blkg = lat_to_blkg(iolat); 980 + struct blk_iolatency *blkiolat = iolat->blkiolat; 981 + int ret; 1014 982 1015 - iolatency_set_min_lat_nsec(blkg, 0); 983 + ret = iolatency_set_min_lat_nsec(blkg, 0); 984 + if (ret == 1) 985 + atomic_inc(&blkiolat->enabled); 986 + if (ret == -1) 987 + atomic_dec(&blkiolat->enabled); 1016 988 iolatency_clear_scaling(blkg); 1017 989 } 1018 990
+3
block/blk-mq-debugfs.c
··· 839 839 static bool debugfs_create_files(struct dentry *parent, void *data, 840 840 const struct blk_mq_debugfs_attr *attr) 841 841 { 842 + if (IS_ERR_OR_NULL(parent)) 843 + return false; 844 + 842 845 d_inode(parent)->i_private = data; 843 846 844 847 for (; attr->name; attr++) {
-1
block/blk-mq.h
··· 36 36 struct kobject kobj; 37 37 } ____cacheline_aligned_in_smp; 38 38 39 - void blk_mq_freeze_queue(struct request_queue *q); 40 39 void blk_mq_free_queue(struct request_queue *q); 41 40 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 42 41 void blk_mq_wake_waiters(struct request_queue *q);
+3
drivers/acpi/bus.c
··· 1029 1029 1030 1030 acpi_permanent_mmap = true; 1031 1031 1032 + /* Initialize debug output. Linux does not use ACPICA defaults */ 1033 + acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; 1034 + 1032 1035 #ifdef CONFIG_X86 1033 1036 /* 1034 1037 * If the machine falls into the DMI check table,
+22 -15
drivers/android/binder.c
··· 5914 5914 static int __init binder_init(void) 5915 5915 { 5916 5916 int ret; 5917 - char *device_name, *device_names, *device_tmp; 5917 + char *device_name, *device_tmp; 5918 5918 struct binder_device *device; 5919 5919 struct hlist_node *tmp; 5920 + char *device_names = NULL; 5920 5921 5921 5922 ret = binder_alloc_shrinker_init(); 5922 5923 if (ret) ··· 5959 5958 &transaction_log_fops); 5960 5959 } 5961 5960 5962 - /* 5963 - * Copy the module_parameter string, because we don't want to 5964 - * tokenize it in-place. 5965 - */ 5966 - device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5967 - if (!device_names) { 5968 - ret = -ENOMEM; 5969 - goto err_alloc_device_names_failed; 5961 + if (strcmp(binder_devices_param, "") != 0) { 5962 + /* 5963 + * Copy the module_parameter string, because we don't want to 5964 + * tokenize it in-place. 5965 + */ 5966 + device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5967 + if (!device_names) { 5968 + ret = -ENOMEM; 5969 + goto err_alloc_device_names_failed; 5970 + } 5971 + 5972 + device_tmp = device_names; 5973 + while ((device_name = strsep(&device_tmp, ","))) { 5974 + ret = init_binder_device(device_name); 5975 + if (ret) 5976 + goto err_init_binder_device_failed; 5977 + } 5970 5978 } 5971 5979 5972 - device_tmp = device_names; 5973 - while ((device_name = strsep(&device_tmp, ","))) { 5974 - ret = init_binder_device(device_name); 5975 - if (ret) 5976 - goto err_init_binder_device_failed; 5977 - } 5980 + ret = init_binderfs(); 5981 + if (ret) 5982 + goto err_init_binder_device_failed; 5978 5983 5979 5984 return ret; 5980 5985
+9
drivers/android/binder_internal.h
··· 46 46 } 47 47 #endif 48 48 49 + #ifdef CONFIG_ANDROID_BINDERFS 50 + extern int __init init_binderfs(void); 51 + #else 52 + static inline int __init init_binderfs(void) 53 + { 54 + return 0; 55 + } 56 + #endif 57 + 49 58 #endif /* _LINUX_BINDER_INTERNAL_H */
+10 -4
drivers/android/binderfs.c
··· 395 395 struct inode *inode = NULL; 396 396 struct dentry *root = sb->s_root; 397 397 struct binderfs_info *info = sb->s_fs_info; 398 + #if defined(CONFIG_IPC_NS) 399 + bool use_reserve = (info->ipc_ns == &init_ipc_ns); 400 + #else 401 + bool use_reserve = true; 402 + #endif 398 403 399 404 device = kzalloc(sizeof(*device), GFP_KERNEL); 400 405 if (!device) ··· 418 413 419 414 /* Reserve a new minor number for the new device. */ 420 415 mutex_lock(&binderfs_minors_mutex); 421 - minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 416 + minor = ida_alloc_max(&binderfs_minors, 417 + use_reserve ? BINDERFS_MAX_MINOR : 418 + BINDERFS_MAX_MINOR_CAPPED, 419 + GFP_KERNEL); 422 420 mutex_unlock(&binderfs_minors_mutex); 423 421 if (minor < 0) { 424 422 ret = minor; ··· 550 542 .fs_flags = FS_USERNS_MOUNT, 551 543 }; 552 544 553 - static int __init init_binderfs(void) 545 + int __init init_binderfs(void) 554 546 { 555 547 int ret; 556 548 ··· 568 560 569 561 return ret; 570 562 } 571 - 572 - device_initcall(init_binderfs);
+1
drivers/ata/libata-core.c
··· 4554 4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4555 4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, 4556 4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, 4557 + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, 4557 4558 4558 4559 /* devices that don't properly handle queued TRIM commands */ 4559 4560 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+2 -4
drivers/base/cacheinfo.c
··· 79 79 ct_idx = get_cacheinfo_idx(this_leaf->type); 80 80 propname = cache_type_info[ct_idx].size_prop; 81 81 82 - if (of_property_read_u32(np, propname, &this_leaf->size)) 83 - this_leaf->size = 0; 82 + of_property_read_u32(np, propname, &this_leaf->size); 84 83 } 85 84 86 85 /* not cache_line_size() because that's a macro in include/linux/cache.h */ ··· 113 114 ct_idx = get_cacheinfo_idx(this_leaf->type); 114 115 propname = cache_type_info[ct_idx].nr_sets_prop; 115 116 116 - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) 117 - this_leaf->number_of_sets = 0; 117 + of_property_read_u32(np, propname, &this_leaf->number_of_sets); 118 118 } 119 119 120 120 static void cache_associativity(struct cacheinfo *this_leaf)
+5 -5
drivers/base/power/runtime.c
··· 130 130 { 131 131 int autosuspend_delay; 132 132 u64 last_busy, expires = 0; 133 - u64 now = ktime_to_ns(ktime_get()); 133 + u64 now = ktime_get_mono_fast_ns(); 134 134 135 135 if (!dev->power.use_autosuspend) 136 136 goto out; ··· 909 909 * If 'expires' is after the current time, we've been called 910 910 * too early. 911 911 */ 912 - if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 912 + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { 913 913 dev->power.timer_expires = 0; 914 914 rpm_suspend(dev, dev->power.timer_autosuspends ? 915 915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); ··· 928 928 int pm_schedule_suspend(struct device *dev, unsigned int delay) 929 929 { 930 930 unsigned long flags; 931 - ktime_t expires; 931 + u64 expires; 932 932 int retval; 933 933 934 934 spin_lock_irqsave(&dev->power.lock, flags); ··· 945 945 /* Other scheduled or pending requests need to be canceled. */ 946 946 pm_runtime_cancel_pending(dev); 947 947 948 - expires = ktime_add(ktime_get(), ms_to_ktime(delay)); 949 - dev->power.timer_expires = ktime_to_ns(expires); 948 + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; 949 + dev->power.timer_expires = expires; 950 950 dev->power.timer_autosuspends = 0; 951 951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 952 952
+12 -2
drivers/clk/clk.c
··· 1513 1513 if (!parent) 1514 1514 return -EINVAL; 1515 1515 1516 - for (i = 0; i < core->num_parents; i++) 1517 - if (clk_core_get_parent_by_index(core, i) == parent) 1516 + for (i = 0; i < core->num_parents; i++) { 1517 + if (core->parents[i] == parent) 1518 1518 return i; 1519 + 1520 + if (core->parents[i]) 1521 + continue; 1522 + 1523 + /* Fallback to comparing globally unique names */ 1524 + if (!strcmp(parent->name, core->parent_names[i])) { 1525 + core->parents[i] = parent; 1526 + return i; 1527 + } 1528 + } 1519 1529 1520 1530 return -EINVAL; 1521 1531 }
+3 -2
drivers/clk/imx/clk-frac-pll.c
··· 155 155 { 156 156 struct clk_frac_pll *pll = to_clk_frac_pll(hw); 157 157 u32 val, divfi, divff; 158 - u64 temp64 = parent_rate; 158 + u64 temp64; 159 159 int ret; 160 160 161 161 parent_rate *= 8; 162 162 rate *= 2; 163 163 divfi = rate / parent_rate; 164 - temp64 *= rate - divfi; 164 + temp64 = parent_rate * divfi; 165 + temp64 = rate - temp64; 165 166 temp64 *= PLL_FRAC_DENOM; 166 167 do_div(temp64, parent_rate); 167 168 divff = temp64;
-4
drivers/clk/mmp/clk-of-mmp2.c
··· 53 53 #define APMU_DISP1 0x110 54 54 #define APMU_CCIC0 0x50 55 55 #define APMU_CCIC1 0xf4 56 - #define APMU_SP 0x68 57 56 #define MPMU_UART_PLL 0x14 58 57 59 58 struct mmp2_clk_unit { ··· 209 210 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), 210 211 }; 211 212 212 - static DEFINE_SPINLOCK(sp_lock); 213 - 214 213 static struct mmp_param_mux_clk apmu_mux_clks[] = { 215 214 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, 216 215 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, ··· 239 242 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, 240 243 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, 241 244 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, 242 - {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock}, 243 245 }; 244 246 245 247 static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
+10 -4
drivers/clk/qcom/gcc-sdm845.c
··· 115 115 "core_bi_pll_test_se", 116 116 }; 117 117 118 - static const char * const gcc_parent_names_7[] = { 119 - "bi_tcxo", 118 + static const char * const gcc_parent_names_7_ao[] = { 119 + "bi_tcxo_ao", 120 120 "gpll0", 121 121 "gpll0_out_even", 122 122 "core_bi_pll_test_se", ··· 124 124 125 125 static const char * const gcc_parent_names_8[] = { 126 126 "bi_tcxo", 127 + "gpll0", 128 + "core_bi_pll_test_se", 129 + }; 130 + 131 + static const char * const gcc_parent_names_8_ao[] = { 132 + "bi_tcxo_ao", 127 133 "gpll0", 128 134 "core_bi_pll_test_se", 129 135 }; ··· 216 210 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, 217 211 .clkr.hw.init = &(struct clk_init_data){ 218 212 .name = "gcc_cpuss_ahb_clk_src", 219 - .parent_names = gcc_parent_names_7, 213 + .parent_names = gcc_parent_names_7_ao, 220 214 .num_parents = 4, 221 215 .ops = &clk_rcg2_ops, 222 216 }, ··· 235 229 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, 236 230 .clkr.hw.init = &(struct clk_init_data){ 237 231 .name = "gcc_cpuss_rbcpr_clk_src", 238 - .parent_names = gcc_parent_names_8, 232 + .parent_names = gcc_parent_names_8_ao, 239 233 .num_parents = 3, 240 234 .ops = &clk_rcg2_ops, 241 235 },
+10 -1
drivers/clk/ti/divider.c
··· 403 403 num_dividers = i; 404 404 405 405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); 406 - if (!tmp) 406 + if (!tmp) { 407 + *table = ERR_PTR(-ENOMEM); 407 408 return -ENOMEM; 409 + } 408 410 409 411 valid_div = 0; 410 412 *width = 0; ··· 441 439 { 442 440 struct clk_omap_divider *div; 443 441 struct clk_omap_reg *reg; 442 + int ret; 444 443 445 444 if (!setup) 446 445 return NULL; ··· 461 458 div->flags |= CLK_DIVIDER_POWER_OF_TWO; 462 459 463 460 div->table = _get_div_table_from_setup(setup, &div->width); 461 + if (IS_ERR(div->table)) { 462 + ret = PTR_ERR(div->table); 463 + kfree(div); 464 + return ERR_PTR(ret); 465 + } 466 + 464 467 465 468 div->shift = setup->bit_shift; 466 469 div->latch = -EINVAL;
+1 -1
drivers/cpuidle/poll_state.c
··· 21 21 local_irq_enable(); 22 22 if (!current_set_polling_and_test()) { 23 23 unsigned int loop_count = 0; 24 - u64 limit = TICK_USEC; 24 + u64 limit = TICK_NSEC; 25 25 int i; 26 26 27 27 for (i = 1; i < drv->state_count; i++) {
+6 -4
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 537 537 struct nitrox_device *ndev = cmdq->ndev; 538 538 struct nitrox_softreq *sr; 539 539 int req_completed = 0, err = 0, budget; 540 + completion_t callback; 541 + void *cb_arg; 540 542 541 543 /* check all pending requests */ 542 544 budget = atomic_read(&cmdq->pending_count); ··· 566 564 smp_mb__after_atomic(); 567 565 /* remove from response list */ 568 566 response_list_del(sr, cmdq); 569 - 570 567 /* ORH error code */ 571 568 err = READ_ONCE(*sr->resp.orh) & 0xff; 572 - 573 - if (sr->callback) 574 - sr->callback(sr->cb_arg, err); 569 + callback = sr->callback; 570 + cb_arg = sr->cb_arg; 575 571 softreq_destroy(sr); 572 + if (callback) 573 + callback(cb_arg, err); 576 574 577 575 req_completed++; 578 576 }
+10 -9
drivers/dma/at_xdmac.c
··· 203 203 u32 save_cim; 204 204 u32 save_cnda; 205 205 u32 save_cndc; 206 + u32 irq_status; 206 207 unsigned long status; 207 208 struct tasklet_struct tasklet; 208 209 struct dma_slave_config sconfig; ··· 1581 1580 struct at_xdmac_desc *desc; 1582 1581 u32 error_mask; 1583 1582 1584 - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", 1585 - __func__, atchan->status); 1583 + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", 1584 + __func__, atchan->irq_status); 1586 1585 1587 1586 error_mask = AT_XDMAC_CIS_RBEIS 1588 1587 | AT_XDMAC_CIS_WBEIS ··· 1590 1589 1591 1590 if (at_xdmac_chan_is_cyclic(atchan)) { 1592 1591 at_xdmac_handle_cyclic(atchan); 1593 - } else if ((atchan->status & AT_XDMAC_CIS_LIS) 1594 - || (atchan->status & error_mask)) { 1592 + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) 1593 + || (atchan->irq_status & error_mask)) { 1595 1594 struct dma_async_tx_descriptor *txd; 1596 1595 1597 - if (atchan->status & AT_XDMAC_CIS_RBEIS) 1596 + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) 1598 1597 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); 1599 - if (atchan->status & AT_XDMAC_CIS_WBEIS) 1598 + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) 1600 1599 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); 1601 - if (atchan->status & AT_XDMAC_CIS_ROIS) 1600 + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) 1602 1601 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); 1603 1602 1604 1603 spin_lock(&atchan->lock); ··· 1653 1652 atchan = &atxdmac->chan[i]; 1654 1653 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1655 1654 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); 1656 - atchan->status = chan_status & chan_imr; 1655 + atchan->irq_status = chan_status & chan_imr; 1657 1656 dev_vdbg(atxdmac->dma.dev, 1658 1657 "%s: chan%d: imr=0x%x, status=0x%x\n", 1659 1658 __func__, i, chan_imr, chan_status); ··· 1667 1666 at_xdmac_chan_read(atchan, AT_XDMAC_CDA), 1668 1667 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); 1669 1668 1670 - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1669 + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1671 1670 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1672 1671 1673 1672 tasklet_schedule(&atchan->tasklet);
+25 -45
drivers/dma/bcm2835-dma.c
··· 406 406 } 407 407 } 408 408 409 - static int bcm2835_dma_abort(void __iomem *chan_base) 409 + static int bcm2835_dma_abort(struct bcm2835_chan *c) 410 410 { 411 - unsigned long cs; 411 + void __iomem *chan_base = c->chan_base; 412 412 long int timeout = 10000; 413 413 414 - cs = readl(chan_base + BCM2835_DMA_CS); 415 - if (!(cs & BCM2835_DMA_ACTIVE)) 414 + /* 415 + * A zero control block address means the channel is idle. 416 + * (The ACTIVE flag in the CS register is not a reliable indicator.) 417 + */ 418 + if (!readl(chan_base + BCM2835_DMA_ADDR)) 416 419 return 0; 417 420 418 421 /* Write 0 to the active bit - Pause the DMA */ 419 422 writel(0, chan_base + BCM2835_DMA_CS); 420 423 421 424 /* Wait for any current AXI transfer to complete */ 422 - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 425 + while ((readl(chan_base + BCM2835_DMA_CS) & 426 + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) 423 427 cpu_relax(); 424 - cs = readl(chan_base + BCM2835_DMA_CS); 425 - } 426 428 427 - /* We'll un-pause when we set of our next DMA */ 429 + /* Peripheral might be stuck and fail to signal AXI write responses */ 428 430 if (!timeout) 429 - return -ETIMEDOUT; 431 + dev_err(c->vc.chan.device->dev, 432 + "failed to complete outstanding writes\n"); 430 433 431 - if (!(cs & BCM2835_DMA_ACTIVE)) 432 - return 0; 433 - 434 - /* Terminate the control block chain */ 435 - writel(0, chan_base + BCM2835_DMA_NEXTCB); 436 - 437 - /* Abort the whole DMA */ 438 - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, 439 - chan_base + BCM2835_DMA_CS); 440 - 434 + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); 441 435 return 0; 442 436 } 443 437 ··· 470 476 471 477 spin_lock_irqsave(&c->vc.lock, flags); 472 478 473 - /* Acknowledge interrupt */ 474 - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 479 + /* 480 + * Clear the INT flag to receive further interrupts. Keep the channel 481 + * active in case the descriptor is cyclic or in case the client has 482 + * already terminated the descriptor and issued a new one. (May happen 483 + * if this IRQ handler is threaded.) If the channel is finished, it 484 + * will remain idle despite the ACTIVE flag being set. 485 + */ 486 + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, 487 + c->chan_base + BCM2835_DMA_CS); 475 488 476 489 d = c->desc; 477 490 ··· 486 485 if (d->cyclic) { 487 486 /* call the cyclic callback */ 488 487 vchan_cyclic_callback(&d->vd); 489 - 490 - /* Keep the DMA engine running */ 491 - writel(BCM2835_DMA_ACTIVE, 492 - c->chan_base + BCM2835_DMA_CS); 493 - } else { 488 + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { 494 489 vchan_cookie_complete(&c->desc->vd); 495 490 bcm2835_dma_start_desc(c); 496 491 } ··· 776 779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 777 780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 778 781 unsigned long flags; 779 - int timeout = 10000; 780 782 LIST_HEAD(head); 781 783 782 784 spin_lock_irqsave(&c->vc.lock, flags); ··· 785 789 list_del_init(&c->node); 786 790 spin_unlock(&d->lock); 787 791 788 - /* 789 - * Stop DMA activity: we assume the callback will not be called 790 - * after bcm_dma_abort() returns (even if it does, it will see 791 - * c->desc is NULL and exit.) 792 - */ 792 + /* stop DMA activity */ 793 793 if (c->desc) { 794 794 vchan_terminate_vdesc(&c->desc->vd); 795 795 c->desc = NULL; 796 - bcm2835_dma_abort(c->chan_base); 797 - 798 - /* Wait for stopping */ 799 - while (--timeout) { 800 - if (!(readl(c->chan_base + BCM2835_DMA_CS) & 801 - BCM2835_DMA_ACTIVE)) 802 - break; 803 - 804 - cpu_relax(); 805 - } 806 - 807 - if (!timeout) 808 - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); 796 + bcm2835_dma_abort(c); 809 797 } 810 798 811 799 vchan_get_all_descriptors(&c->vc, &head);
+14 -18
drivers/dma/dmatest.c
··· 711 711 srcs[i] = um->addr[i] + src_off; 712 712 ret = dma_mapping_error(dev->dev, um->addr[i]); 713 713 if (ret) { 714 - dmaengine_unmap_put(um); 715 714 result("src mapping error", total_tests, 716 715 src_off, dst_off, len, ret); 717 - failed_tests++; 718 - continue; 716 + goto error_unmap_continue; 719 717 } 720 718 um->to_cnt++; 721 719 } ··· 728 730 DMA_BIDIRECTIONAL); 729 731 ret = dma_mapping_error(dev->dev, dsts[i]); 730 732 if (ret) { 731 - dmaengine_unmap_put(um); 732 733 result("dst mapping error", total_tests, 733 734 src_off, dst_off, len, ret); 734 - failed_tests++; 735 - continue; 735 + goto error_unmap_continue; 736 736 } 737 737 um->bidi_cnt++; 738 738 } ··· 758 762 } 759 763 760 764 if (!tx) { 761 - dmaengine_unmap_put(um); 762 765 result("prep error", total_tests, src_off, 763 766 dst_off, len, ret); 764 767 msleep(100); 765 - failed_tests++; 766 - continue; 768 + goto error_unmap_continue; 767 769 } 768 770 769 771 done->done = false; ··· 770 776 cookie = tx->tx_submit(tx); 771 777 772 778 if (dma_submit_error(cookie)) { 773 - dmaengine_unmap_put(um); 774 779 result("submit error", total_tests, src_off, 775 780 dst_off, len, ret); 776 781 msleep(100); 777 - failed_tests++; 778 - continue; 782 + goto error_unmap_continue; 779 783 } 780 784 dma_async_issue_pending(chan); 781 785 ··· 782 790 783 791 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 784 792 785 - dmaengine_unmap_put(um); 786 - 787 793 if (!done->done) { 788 794 result("test timed out", total_tests, src_off, dst_off, 789 795 len, 0); 790 - failed_tests++; 791 - continue; 796 + goto error_unmap_continue; 792 797 } else if (status != DMA_COMPLETE) { 793 798 result(status == DMA_ERROR ? 794 799 "completion error status" : 795 800 "completion busy status", total_tests, src_off, 796 801 dst_off, len, ret); 797 - failed_tests++; 798 - continue; 802 + goto error_unmap_continue; 799 803 } 804 + 805 + dmaengine_unmap_put(um); 800 806 801 807 if (params->noverify) { 802 808 verbose_result("test passed", total_tests, src_off, ··· 836 846 verbose_result("test passed", total_tests, src_off, 837 847 dst_off, len, 0); 838 848 } 849 + 850 + continue; 851 + 852 + error_unmap_continue: 853 + dmaengine_unmap_put(um); 854 + failed_tests++; 839 855 } 840 856 ktime = ktime_sub(ktime_get(), ktime); 841 857 ktime = ktime_sub(ktime, comparetime);
+4 -4
drivers/dma/imx-dma.c
··· 618 618 { 619 619 struct imxdma_channel *imxdmac = (void *)data; 620 620 struct imxdma_engine *imxdma = imxdmac->imxdma; 621 - struct imxdma_desc *desc; 621 + struct imxdma_desc *desc, *next_desc; 622 622 unsigned long flags; 623 623 624 624 spin_lock_irqsave(&imxdma->lock, flags); ··· 648 648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 649 649 650 650 if (!list_empty(&imxdmac->ld_queue)) { 651 - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 652 - node); 651 + next_desc = list_first_entry(&imxdmac->ld_queue, 652 + struct imxdma_desc, node); 653 653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 654 - if (imxdma_xfer_desc(desc) < 0) 654 + if (imxdma_xfer_desc(next_desc) < 0) 655 655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 656 656 __func__, imxdmac->channel); 657 657 }
+7 -2
drivers/firmware/arm_scmi/bus.c
··· 119 119 } 120 120 EXPORT_SYMBOL_GPL(scmi_driver_unregister); 121 121 122 + static void scmi_device_release(struct device *dev) 123 + { 124 + kfree(to_scmi_dev(dev)); 125 + } 126 + 122 127 struct scmi_device * 123 128 scmi_device_create(struct device_node *np, struct device *parent, int protocol) 124 129 { ··· 143 138 scmi_dev->dev.parent = parent; 144 139 scmi_dev->dev.of_node = np; 145 140 scmi_dev->dev.bus = &scmi_bus_type; 141 + scmi_dev->dev.release = scmi_device_release; 146 142 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); 147 143 148 144 retval = device_register(&scmi_dev->dev); ··· 162 156 void scmi_device_destroy(struct scmi_device *scmi_dev) 163 157 { 164 158 scmi_handle_put(scmi_dev->handle); 165 - device_unregister(&scmi_dev->dev); 166 159 ida_simple_remove(&scmi_bus_id, scmi_dev->id); 167 - kfree(scmi_dev); 160 + device_unregister(&scmi_dev->dev); 168 161 } 169 162 170 163 void scmi_set_handle(struct scmi_device *scmi_dev)
+3 -2
drivers/firmware/efi/arm-runtime.c
··· 37 37 static struct ptdump_info efi_ptdump_info = { 38 38 .mm = &efi_mm, 39 39 .markers = (struct addr_marker[]){ 40 - { 0, "UEFI runtime start" }, 41 - { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } 40 + { 0, "UEFI runtime start" }, 41 + { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }, 42 + { -1, NULL } 42 43 }, 43 44 .base_addr = 0, 44 45 };
+1 -4
drivers/fpga/stratix10-soc.c
··· 508 508 return -ENODEV; 509 509 510 510 np = of_find_matching_node(fw_np, s10_of_match); 511 - if (!np) { 512 - of_node_put(fw_np); 511 + if (!np) 513 512 return -ENODEV; 514 - } 515 513 516 514 of_node_put(np); 517 515 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); 518 - of_node_put(fw_np); 519 516 if (ret) 520 517 return ret; 521 518
+3 -1
drivers/gpio/gpio-altera-a10sr.c
··· 66 66 static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, 67 67 unsigned int nr, int value) 68 68 { 69 - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 69 + if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) { 70 + altr_a10sr_gpio_set(gc, nr, value); 70 71 return 0; 72 + } 71 73 return -EINVAL; 72 74 } 73 75
+13 -1
drivers/gpio/gpio-eic-sprd.c
··· 180 180 181 181 static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) 182 182 { 183 - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 183 + struct sprd_eic *sprd_eic = gpiochip_get_data(chip); 184 + 185 + switch (sprd_eic->type) { 186 + case SPRD_EIC_DEBOUNCE: 187 + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 188 + case SPRD_EIC_ASYNC: 189 + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); 190 + case SPRD_EIC_SYNC: 191 + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); 192 + default: 193 + return -ENOTSUPP; 194 + } 184 195 } 185 196 186 197 static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) ··· 379 368 irq_set_handler_locked(data, handle_edge_irq); 380 369 break; 381 370 case IRQ_TYPE_EDGE_BOTH: 371 + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); 382 372 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); 383 373 irq_set_handler_locked(data, handle_edge_irq); 384 374 break;
+12 -14
drivers/gpio/gpio-pcf857x.c
··· 84 84 */ 85 85 struct pcf857x { 86 86 struct gpio_chip chip; 87 + struct irq_chip irqchip; 87 88 struct i2c_client *client; 88 89 struct mutex lock; /* protect 'out' */ 89 90 unsigned out; /* software latch */ ··· 253 252 mutex_unlock(&gpio->lock); 254 253 } 255 254 256 - static struct irq_chip pcf857x_irq_chip = { 257 - .name = "pcf857x", 258 - .irq_enable = pcf857x_irq_enable, 259 - .irq_disable = pcf857x_irq_disable, 260 - .irq_ack = noop, 261 - .irq_mask = noop, 262 - .irq_unmask = noop, 263 - .irq_set_wake = pcf857x_irq_set_wake, 264 - .irq_bus_lock = pcf857x_irq_bus_lock, 265 - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, 266 - }; 267 - 268 255 /*-------------------------------------------------------------------------*/ 269 256 270 257 static int pcf857x_probe(struct i2c_client *client, ··· 365 376 366 377 /* Enable irqchip if we have an interrupt */ 367 378 if (client->irq) { 379 + gpio->irqchip.name = "pcf857x", 380 + gpio->irqchip.irq_enable = pcf857x_irq_enable, 381 + gpio->irqchip.irq_disable = pcf857x_irq_disable, 382 + gpio->irqchip.irq_ack = noop, 383 + gpio->irqchip.irq_mask = noop, 384 + gpio->irqchip.irq_unmask = noop, 385 + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, 386 + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, 387 + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, 368 388 status = gpiochip_irqchip_add_nested(&gpio->chip, 369 - &pcf857x_irq_chip, 389 + &gpio->irqchip, 370 390 0, handle_level_irq, 371 391 IRQ_TYPE_NONE); 372 392 if (status) { ··· 390 392 if (status) 391 393 goto fail; 392 394 393 - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, 395 + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, 394 396 client->irq); 395 397 gpio->irq_parent = client->irq; 396 398 }
+5
drivers/gpio/gpio-vf610.c
··· 253 253 struct vf610_gpio_port *port; 254 254 struct resource *iores; 255 255 struct gpio_chip *gc; 256 + int i; 256 257 int ret; 257 258 258 259 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); ··· 319 318 ret = gpiochip_add_data(gc, port); 320 319 if (ret < 0) 321 320 return ret; 321 + 322 + /* Mask all GPIO interrupts */ 323 + for (i = 0; i < gc->ngpio; i++) 324 + vf610_gpio_writel(0, port->base + PORT_PCR(i)); 322 325 323 326 /* Clear the interrupt status register for all GPIO's */ 324 327 vf610_gpio_writel(~0, port->base + PORT_ISFR);
+8 -1
drivers/gpio/gpiolib.c
··· 828 828 /* Do not leak kernel stack to userspace */ 829 829 memset(&ge, 0, sizeof(ge)); 830 830 831 - ge.timestamp = le->timestamp; 831 + /* 832 + * We may be running from a nested threaded interrupt in which case 833 + * we didn't get the timestamp from lineevent_irq_handler(). 834 + */ 835 + if (!le->timestamp) 836 + ge.timestamp = ktime_get_real_ns(); 837 + else 838 + ge.timestamp = le->timestamp; 832 839 833 840 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 834 841 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 1686 1686 effective_mode &= ~S_IWUSR; 1687 1687 1688 1688 if ((adev->flags & AMD_IS_APU) && 1689 - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1689 + (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 1690 + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1690 1691 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1691 1692 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1692 1693 return 0;
+51 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
··· 38 38 #include "amdgpu_gem.h" 39 39 #include <drm/amdgpu_drm.h> 40 40 #include <linux/dma-buf.h> 41 + #include <linux/dma-fence-array.h> 41 42 42 43 /** 43 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table ··· 188 187 return ERR_PTR(ret); 189 188 } 190 189 190 + static int 191 + __reservation_object_make_exclusive(struct reservation_object *obj) 192 + { 193 + struct dma_fence **fences; 194 + unsigned int count; 195 + int r; 196 + 197 + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ 198 + return 0; 199 + 200 + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); 201 + if (r) 202 + return r; 203 + 204 + if (count == 0) { 205 + /* Now that was unexpected. */ 206 + } else if (count == 1) { 207 + reservation_object_add_excl_fence(obj, fences[0]); 208 + dma_fence_put(fences[0]); 209 + kfree(fences); 210 + } else { 211 + struct dma_fence_array *array; 212 + 213 + array = dma_fence_array_create(count, fences, 214 + dma_fence_context_alloc(1), 0, 215 + false); 216 + if (!array) 217 + goto err_fences_put; 218 + 219 + reservation_object_add_excl_fence(obj, &array->base); 220 + dma_fence_put(&array->base); 221 + } 222 + 223 + return 0; 224 + 225 + err_fences_put: 226 + while (count--) 227 + dma_fence_put(fences[count]); 228 + kfree(fences); 229 + return -ENOMEM; 230 + } 231 + 191 232 /** 192 233 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 193 234 * @dma_buf: Shared DMA buffer ··· 261 218 262 219 if (attach->dev->driver != adev->dev->driver) { 263 220 /* 264 - * Wait for all shared fences to complete before we switch to future 265 - * use of exclusive fence on this prime shared bo. 221 + * We only create shared fences for internal use, but importers 222 + * of the dmabuf rely on exclusive fences for implicitly 223 + * tracking write hazards. As any of the current fences may 224 + * correspond to a write, we need to convert all existing 225 + * fences on the reservation object into a single exclusive 226 + * fence. 266 227 */ 267 - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 268 - true, false, 269 - MAX_SCHEDULE_TIMEOUT); 270 - if (unlikely(r < 0)) { 271 - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); 228 + r = __reservation_object_make_exclusive(bo->tbo.resv); 229 + if (r) 272 230 goto error_unreserve; 273 - } 274 231 } 275 232 276 233 /* pin buffer into GTT */
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 3363 3363 struct amdgpu_task_info *task_info) 3364 3364 { 3365 3365 struct amdgpu_vm *vm; 3366 + unsigned long flags; 3366 3367 3367 - spin_lock(&adev->vm_manager.pasid_lock); 3368 + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3368 3369 3369 3370 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3370 3371 if (vm) 3371 3372 *task_info = vm->task_info; 3372 3373 3373 - spin_unlock(&adev->vm_manager.pasid_lock); 3374 + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3374 3375 } 3375 3376 3376 3377 /**
+13
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 93 93 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 94 94 bool enable) 95 95 { 96 + u32 tmp = 0; 96 97 98 + if (enable) { 99 + tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | 100 + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | 101 + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); 102 + 103 + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, 104 + lower_32_bits(adev->doorbell.base)); 105 + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, 106 + upper_32_bits(adev->doorbell.base)); 107 + } 108 + 109 + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); 97 110 } 98 111 99 112 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
+4 -2
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 729 729 case CHIP_RAVEN: 730 730 adev->asic_funcs = &soc15_asic_funcs; 731 731 if (adev->rev_id >= 0x8) 732 - adev->external_rev_id = adev->rev_id + 0x81; 732 + adev->external_rev_id = adev->rev_id + 0x79; 733 733 else if (adev->pdev->device == 0x15d8) 734 734 adev->external_rev_id = adev->rev_id + 0x41; 735 + else if (adev->rev_id == 1) 736 + adev->external_rev_id = adev->rev_id + 0x20; 735 737 else 736 - adev->external_rev_id = 0x1; 738 + adev->external_rev_id = adev->rev_id + 0x01; 737 739 738 740 if (adev->rev_id >= 0x8) { 739 741 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 863 863 return 0; 864 864 } 865 865 866 - #if CONFIG_X86_64 866 + #ifdef CONFIG_X86_64 867 867 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 868 868 uint32_t *num_entries, 869 869 struct crat_subtype_iolink *sub_type_hdr)
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 4082 4082 } 4083 4083 4084 4084 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 4085 - connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 4085 + connector_type == DRM_MODE_CONNECTOR_DisplayPort || 4086 + connector_type == DRM_MODE_CONNECTOR_eDP) { 4086 4087 drm_connector_attach_vrr_capable_property( 4087 4088 &aconnector->base); 4088 4089 }
+9 -1
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
··· 591 591 dc, 592 592 context->bw.dce.sclk_khz); 593 593 594 - pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; 594 + /* 595 + * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. 596 + * This is not required for less than 5 displays, 597 + * thus don't request decfclk in dc to avoid impact 598 + * on power saving. 599 + * 600 + */ 601 + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? 602 + pp_display_cfg->min_engine_clock_khz : 0; 595 603 596 604 pp_display_cfg->min_engine_clock_deep_sleep_khz 597 605 = context->bw.dce.sclk_deep_sleep_khz;
+1
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
··· 1033 1033 break; 1034 1034 case amd_pp_dpp_clock: 1035 1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk; 1036 + break; 1036 1037 default: 1037 1038 return -EINVAL; 1038 1039 }
+1 -1
drivers/gpu/drm/drm_modes.c
··· 758 758 if (mode->hsync) 759 759 return mode->hsync; 760 760 761 - if (mode->htotal < 0) 761 + if (mode->htotal <= 0) 762 762 return 0; 763 763 764 764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+1 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 1086 1086 return DDI_CLK_SEL_TBT_810; 1087 1087 default: 1088 1088 MISSING_CASE(clock); 1089 - break; 1089 + return DDI_CLK_SEL_NONE; 1090 1090 } 1091 1091 case DPLL_ID_ICL_MGPLL1: 1092 1092 case DPLL_ID_ICL_MGPLL2:
+44 -6
drivers/gpu/drm/i915/intel_display.c
··· 15415 15415 } 15416 15416 } 15417 15417 15418 + static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 15419 + { 15420 + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 15421 + 15422 + /* 15423 + * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 15424 + * the hardware when a high res displays plugged in. DPLL P 15425 + * divider is zero, and the pipe timings are bonkers. We'll 15426 + * try to disable everything in that case. 15427 + * 15428 + * FIXME would be nice to be able to sanitize this state 15429 + * without several WARNs, but for now let's take the easy 15430 + * road. 15431 + */ 15432 + return IS_GEN6(dev_priv) && 15433 + crtc_state->base.active && 15434 + crtc_state->shared_dpll && 15435 + crtc_state->port_clock == 0; 15436 + } 15437 + 15418 15438 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15419 15439 { 15420 15440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 15421 15441 struct intel_connector *connector; 15442 + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 15443 + struct intel_crtc_state *crtc_state = crtc ? 15444 + to_intel_crtc_state(crtc->base.state) : NULL; 15422 15445 15423 15446 /* We need to check both for a crtc link (meaning that the 15424 15447 * encoder is active and trying to read from a pipe) and the 15425 15448 * pipe itself being active. */ 15426 - bool has_active_crtc = encoder->base.crtc && 15427 - to_intel_crtc(encoder->base.crtc)->active; 15449 + bool has_active_crtc = crtc_state && 15450 + crtc_state->base.active; 15451 + 15452 + if (crtc_state && has_bogus_dpll_config(crtc_state)) { 15453 + DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 15454 + pipe_name(crtc->pipe)); 15455 + has_active_crtc = false; 15456 + } 15428 15457 15429 15458 connector = intel_encoder_find_connector(encoder); 15430 15459 if (connector && !has_active_crtc) { ··· 15464 15435 /* Connector is active, but has no active pipe. This is 15465 15436 * fallout from our resume register restoring. Disable 15466 15437 * the encoder manually again. */ 15467 - if (encoder->base.crtc) { 15468 - struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15438 + if (crtc_state) { 15439 + struct drm_encoder *best_encoder; 15469 15440 15470 15441 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15471 15442 encoder->base.base.id, 15472 15443 encoder->base.name); 15444 + 15445 + /* avoid oopsing in case the hooks consult best_encoder */ 15446 + best_encoder = connector->base.state->best_encoder; 15447 + connector->base.state->best_encoder = &encoder->base; 15448 + 15473 15449 if (encoder->disable) 15474 - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15450 + encoder->disable(encoder, crtc_state, 15451 + connector->base.state); 15475 15452 if (encoder->post_disable) 15476 - encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15453 + encoder->post_disable(encoder, crtc_state, 15454 + connector->base.state); 15455 + 15456 + connector->base.state->best_encoder = best_encoder; 15477 15457 } 15478 15458 encoder->base.crtc = NULL; 15479 15459
+1 -1
drivers/gpu/drm/i915/intel_sprite.c
··· 494 494 495 495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 496 496 497 - keymsk = key->channel_mask & 0x3ffffff; 497 + keymsk = key->channel_mask & 0x7ffffff; 498 498 if (alpha < 0xff) 499 499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 500 500
+19 -8
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 1406 1406 1407 1407 static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) 1408 1408 { 1409 - struct dsi_data *dsi = p; 1409 + struct dsi_data *dsi = s->private; 1410 1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1411 1411 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1412 1412 int dsi_module = dsi->module_id; ··· 1467 1467 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1468 1468 static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) 1469 1469 { 1470 - struct dsi_data *dsi = p; 1470 + struct dsi_data *dsi = s->private; 1471 1471 unsigned long flags; 1472 1472 struct dsi_irq_stats stats; 1473 1473 ··· 1558 1558 1559 1559 static int dsi_dump_dsi_regs(struct seq_file *s, void *p) 1560 1560 { 1561 - struct dsi_data *dsi = p; 1561 + struct dsi_data *dsi = s->private; 1562 1562 1563 1563 if (dsi_runtime_get(dsi)) 1564 1564 return 0; ··· 4751 4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; 4752 4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; 4753 4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; 4754 + /* 4755 + * HACK: These flags should be handled through the omap_dss_device bus 4756 + * flags, but this will only be possible when the DSI encoder will be 4757 + * converted to the omapdrm-managed encoder model. 4758 + */ 4759 + dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; 4760 + dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; 4761 + dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; 4762 + dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; 4763 + dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; 4764 + dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; 4754 4765 4755 4766 dss_mgr_set_timings(&dsi->output, &dsi->vm); 4756 4767 ··· 5094 5083 5095 5084 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); 5096 5085 dsi->debugfs.regs = dss_debugfs_create_file(dss, name, 5097 - dsi_dump_dsi_regs, &dsi); 5086 + dsi_dump_dsi_regs, dsi); 5098 5087 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 5099 5088 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); 5100 5089 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, 5101 - dsi_dump_dsi_irqs, &dsi); 5090 + dsi_dump_dsi_irqs, dsi); 5102 5091 #endif 5103 5092 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); 5104 5093 dsi->debugfs.clks = dss_debugfs_create_file(dss, name, 5105 - dsi_dump_dsi_clocks, &dsi); 5094 + dsi_dump_dsi_clocks, dsi); 5106 5095 5107 5096 return 0; 5108 5097 } ··· 5114 5103 dss_debugfs_remove_file(dsi->debugfs.clks); 5115 5104 dss_debugfs_remove_file(dsi->debugfs.irqs); 5116 5105 dss_debugfs_remove_file(dsi->debugfs.regs); 5117 - 5118 - of_platform_depopulate(dev); 5119 5106 5120 5107 WARN_ON(dsi->scp_clk_refcount > 0); 5121 5108 ··· 5465 5456 component_del(&pdev->dev, &dsi_component_ops); 5466 5457 5467 5458 dsi_uninit_output(dsi); 5459 + 5460 + of_platform_depopulate(&pdev->dev); 5468 5461 5469 5462 pm_runtime_disable(&pdev->dev); 5470 5463
+3 -2
drivers/gpu/drm/radeon/ci_dpm.c
··· 5676 5676 u16 data_offset, size; 5677 5677 u8 frev, crev; 5678 5678 struct ci_power_info *pi; 5679 - enum pci_bus_speed speed_cap; 5679 + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; 5680 5680 struct pci_dev *root = rdev->pdev->bus->self; 5681 5681 int ret; 5682 5682 ··· 5685 5685 return -ENOMEM; 5686 5686 rdev->pm.dpm.priv = pi; 5687 5687 5688 - speed_cap = pcie_get_speed_cap(root); 5688 + if (!pci_is_root_bus(rdev->pdev->bus)) 5689 + speed_cap = pcie_get_speed_cap(root); 5689 5690 if (speed_cap == PCI_SPEED_UNKNOWN) { 5690 5691 pi->sys_pcie_mask = 0; 5691 5692 } else {
+3 -2
drivers/gpu/drm/radeon/si_dpm.c
··· 6899 6899 struct ni_power_info *ni_pi; 6900 6900 struct si_power_info *si_pi; 6901 6901 struct atom_clock_dividers dividers; 6902 - enum pci_bus_speed speed_cap; 6902 + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; 6903 6903 struct pci_dev *root = rdev->pdev->bus->self; 6904 6904 int ret; 6905 6905 ··· 6911 6911 eg_pi = &ni_pi->eg; 6912 6912 pi = &eg_pi->rv7xx; 6913 6913 6914 - speed_cap = pcie_get_speed_cap(root); 6914 + if (!pci_is_root_bus(rdev->pdev->bus)) 6915 + speed_cap = pcie_get_speed_cap(root); 6915 6916 if (speed_cap == PCI_SPEED_UNKNOWN) { 6916 6917 si_pi->sys_pcie_mask = 0; 6917 6918 } else {
+1 -10
drivers/gpu/drm/rockchip/rockchip_rgb.c
··· 1 - //SPDX-License-Identifier: GPL-2.0+ 1 + // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 4 * Author: 5 5 * Sandy Huang <hjc@rock-chips.com> 6 - * 7 - * This software is licensed under the terms of the GNU General Public 8 - * License version 2, as published by the Free Software Foundation, and 9 - * may be copied, distributed, and modified under those terms. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 */ 16 7 17 8 #include <drm/drmP.h>
+1 -10
drivers/gpu/drm/rockchip/rockchip_rgb.h
··· 1 - //SPDX-License-Identifier: GPL-2.0+ 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 4 * Author: 5 5 * Sandy Huang <hjc@rock-chips.com> 6 - * 7 - * This software is licensed under the terms of the GNU General Public 8 - * License version 2, as published by the Free Software Foundation, and 9 - * may be copied, distributed, and modified under those terms. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 */ 16 7 17 8 #ifdef CONFIG_ROCKCHIP_RGB
+2
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 761 761 return PTR_ERR(tcon->sclk0); 762 762 } 763 763 } 764 + clk_prepare_enable(tcon->sclk0); 764 765 765 766 if (tcon->quirks->has_channel_1) { 766 767 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); ··· 776 775 777 776 static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) 778 777 { 778 + clk_disable_unprepare(tcon->sclk0); 779 779 clk_disable_unprepare(tcon->clk); 780 780 } 781 781
+36 -53
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 26 26 **************************************************************************/ 27 27 #include <linux/module.h> 28 28 #include <linux/console.h> 29 + #include <linux/dma-mapping.h> 29 30 30 31 #include <drm/drmP.h> 31 32 #include "vmwgfx_drv.h" ··· 35 34 #include <drm/ttm/ttm_placement.h> 36 35 #include <drm/ttm/ttm_bo_driver.h> 37 36 #include <drm/ttm/ttm_module.h> 38 - #include <linux/intel-iommu.h> 39 37 40 38 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 41 39 #define VMWGFX_CHIP_SVGAII 0 ··· 546 546 } 547 547 548 548 /** 549 + * vmw_assume_iommu - Figure out whether coherent dma-remapping might be 550 + * taking place. 551 + * @dev: Pointer to the struct drm_device. 552 + * 553 + * Return: true if iommu present, false otherwise. 554 + */ 555 + static bool vmw_assume_iommu(struct drm_device *dev) 556 + { 557 + const struct dma_map_ops *ops = get_dma_ops(dev->dev); 558 + 559 + return !dma_is_direct(ops) && ops && 560 + ops->map_page != dma_direct_map_page; 561 + } 562 + 563 + /** 549 564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 550 565 * system. 551 566 * ··· 580 565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 581 566 [vmw_dma_map_populate] = "Keeping DMA mappings.", 582 567 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 583 - #ifdef CONFIG_X86 584 - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); 585 - 586 - #ifdef CONFIG_INTEL_IOMMU 587 - if (intel_iommu_enabled) { 588 - dev_priv->map_mode = vmw_dma_map_populate; 589 - goto out_fixup; 590 - } 591 - #endif 592 - 593 - if (!(vmw_force_iommu || vmw_force_coherent)) { 594 - dev_priv->map_mode = vmw_dma_phys; 595 - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 596 - return 0; 597 - } 598 - 599 - dev_priv->map_mode = vmw_dma_map_populate; 600 - 601 - if (dma_ops && dma_ops->sync_single_for_cpu) 602 - dev_priv->map_mode = vmw_dma_alloc_coherent; 603 - #ifdef CONFIG_SWIOTLB 604 - if (swiotlb_nr_tbl() == 0) 605 - dev_priv->map_mode = vmw_dma_map_populate; 606 - #endif 607 - 608 - #ifdef CONFIG_INTEL_IOMMU 609 - out_fixup: 610 - #endif 611 - if (dev_priv->map_mode == vmw_dma_map_populate && 612 - vmw_restrict_iommu) 613 - dev_priv->map_mode = vmw_dma_map_bind; 614 568 615 569 if (vmw_force_coherent) 616 570 dev_priv->map_mode = vmw_dma_alloc_coherent; 571 + else if (vmw_assume_iommu(dev_priv->dev)) 572 + dev_priv->map_mode = vmw_dma_map_populate; 573 + else if (!vmw_force_iommu) 574 + dev_priv->map_mode = vmw_dma_phys; 575 + else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) 576 + dev_priv->map_mode = vmw_dma_alloc_coherent; 577 + else 578 + dev_priv->map_mode = vmw_dma_map_populate; 617 579 618 - #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) 619 - /* 620 - * No coherent page pool 621 - */ 622 - if (dev_priv->map_mode == vmw_dma_alloc_coherent) 580 + if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) 581 + dev_priv->map_mode = vmw_dma_map_bind; 582 + 583 + /* No TTM coherent page pool? FIXME: Ask TTM instead! */ 584 + if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && 585 + (dev_priv->map_mode == vmw_dma_alloc_coherent)) 623 586 return -EINVAL; 624 - #endif 625 - 626 - #else /* CONFIG_X86 */ 627 - dev_priv->map_mode = vmw_dma_map_populate; 628 - #endif /* CONFIG_X86 */ 629 587 630 588 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 631 - 632 589 return 0; 633 590 } 634 591 ··· 612 625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 613 626 * restriction also for 64-bit systems. 614 627 */ 615 - #ifdef CONFIG_INTEL_IOMMU 616 628 static int vmw_dma_masks(struct vmw_private *dev_priv) 617 629 { 618 630 struct drm_device *dev = dev_priv->dev; 631 + int ret = 0; 619 632 620 - if (intel_iommu_enabled && 633 + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 634 + if (dev_priv->map_mode != vmw_dma_phys && 621 635 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 636 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 637 + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 624 638 } 625 - return 0; 639 + 640 + return ret; 626 641 } 627 - #else 628 - static int vmw_dma_masks(struct vmw_private *dev_priv) 629 - { 630 - return 0; 631 - } 632 - #endif 633 642 634 643 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 635 644 {
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 3570 3570 *p_fence = NULL; 3571 3571 } 3572 3572 3573 - return 0; 3573 + return ret; 3574 3574 } 3575 3575 3576 3576 /**
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1646 1646 struct drm_connector_state *conn_state; 1647 1647 struct vmw_connector_state *vmw_conn_state; 1648 1648 1649 - if (!du->pref_active) { 1649 + if (!du->pref_active && new_crtc_state->enable) { 1650 1650 ret = -EINVAL; 1651 1651 goto clean; 1652 1652 } ··· 2554 2554 user_fence_rep) 2555 2555 { 2556 2556 struct vmw_fence_obj *fence = NULL; 2557 - uint32_t handle; 2558 - int ret; 2557 + uint32_t handle = 0; 2558 + int ret = 0; 2559 2559 2560 2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2561 2561 out_fence)
+48 -74
drivers/hid/hid-debug.c
··· 30 30 31 31 #include <linux/debugfs.h> 32 32 #include <linux/seq_file.h> 33 + #include <linux/kfifo.h> 33 34 #include <linux/sched/signal.h> 34 35 #include <linux/export.h> 35 36 #include <linux/slab.h> ··· 662 661 /* enqueue string to 'events' ring buffer */ 663 662 void hid_debug_event(struct hid_device *hdev, char *buf) 664 663 { 665 - unsigned i; 666 664 struct hid_debug_list *list; 667 665 unsigned long flags; 668 666 669 667 spin_lock_irqsave(&hdev->debug_list_lock, flags); 670 - list_for_each_entry(list, &hdev->debug_list, node) { 671 - for (i = 0; buf[i]; i++) 672 - list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = 673 - buf[i]; 674 - list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; 675 - } 668 + list_for_each_entry(list, &hdev->debug_list, node) 669 + kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); 676 670 spin_unlock_irqrestore(&hdev->debug_list_lock, flags); 677 671 678 672 wake_up_interruptible(&hdev->debug_wait); ··· 718 722 hid_debug_event(hdev, buf); 719 723 720 724 kfree(buf); 721 - wake_up_interruptible(&hdev->debug_wait); 722 - 725 + wake_up_interruptible(&hdev->debug_wait); 723 726 } 724 727 EXPORT_SYMBOL_GPL(hid_dump_input); 725 728 ··· 1078 1083 goto out; 1079 1084 } 1080 1085 1081 - if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { 1082 - err = -ENOMEM; 1086 + err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); 1087 + if (err) { 1083 1088 kfree(list); 1084 1089 goto out; 1085 1090 } ··· 1099 1104 size_t count, loff_t *ppos) 1100 1105 { 1101 1106 struct hid_debug_list *list = file->private_data; 1102 - int ret = 0, len; 1107 + int ret = 0, copied; 1103 1108 DECLARE_WAITQUEUE(wait, current); 1104 1109 1105 1110 mutex_lock(&list->read_mutex); 1106 - while (ret == 0) { 1107 - if (list->head == list->tail) { 1108 - add_wait_queue(&list->hdev->debug_wait, &wait); 1109 - set_current_state(TASK_INTERRUPTIBLE); 1111 + if (kfifo_is_empty(&list->hid_debug_fifo)) { 1112 + add_wait_queue(&list->hdev->debug_wait, &wait); 1113 + set_current_state(TASK_INTERRUPTIBLE); 1110 1114 1111 - while (list->head == list->tail) { 1112 - if (file->f_flags & O_NONBLOCK) { 1113 - ret = -EAGAIN; 1114 - break; 1115 - } 1116 - if (signal_pending(current)) { 1117 - ret = -ERESTARTSYS; 1118 - break; 1119 - } 1120 - 1121 - if (!list->hdev || !list->hdev->debug) { 1122 - ret = -EIO; 1123 - set_current_state(TASK_RUNNING); 1124 - goto out; 1125 - } 1126 - 1127 - /* allow O_NONBLOCK from other threads */ 1128 - mutex_unlock(&list->read_mutex); 1129 - schedule(); 1130 - mutex_lock(&list->read_mutex); 1131 - set_current_state(TASK_INTERRUPTIBLE); 1115 + while (kfifo_is_empty(&list->hid_debug_fifo)) { 1116 + if (file->f_flags & O_NONBLOCK) { 1117 + ret = -EAGAIN; 1118 + break; 1132 1119 } 1133 1120 1134 - set_current_state(TASK_RUNNING); 1135 - remove_wait_queue(&list->hdev->debug_wait, &wait); 1121 + if (signal_pending(current)) { 1122 + ret = -ERESTARTSYS; 1123 + break; 1124 + } 1125 + 1126 + /* if list->hdev is NULL we cannot remove_wait_queue(). 1127 + * if list->hdev->debug is 0 then hid_debug_unregister() 1128 + * was already called and list->hdev is being destroyed. 1129 + * if we add remove_wait_queue() here we can hit a race. 1130 + */ 1131 + if (!list->hdev || !list->hdev->debug) { 1132 + ret = -EIO; 1133 + set_current_state(TASK_RUNNING); 1134 + goto out; 1135 + } 1136 + 1137 + /* allow O_NONBLOCK from other threads */ 1138 + mutex_unlock(&list->read_mutex); 1139 + schedule(); 1140 + mutex_lock(&list->read_mutex); 1141 + set_current_state(TASK_INTERRUPTIBLE); 1136 1142 } 1143 + 1144 + __set_current_state(TASK_RUNNING); 1145 + remove_wait_queue(&list->hdev->debug_wait, &wait); 1137 1146 1138 1147 if (ret) 1139 1148 goto out; 1140 - 1141 - /* pass the ringbuffer contents to userspace */ 1142 - copy_rest: 1143 - if (list->tail == list->head) 1144 - goto out; 1145 - if (list->tail > list->head) { 1146 - len = list->tail - list->head; 1147 - if (len > count) 1148 - len = count; 1149 - 1150 - if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { 1151 - ret = -EFAULT; 1152 - goto out; 1153 - } 1154 - ret += len; 1155 - list->head += len; 1156 - } else { 1157 - len = HID_DEBUG_BUFSIZE - list->head; 1158 - if (len > count) 1159 - len = count; 1160 - 1161 - if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { 1162 - ret = -EFAULT; 1163 - goto out; 1164 - } 1165 - list->head = 0; 1166 - ret += len; 1167 - count -= len; 1168 - if (count > 0) 1169 - goto copy_rest; 1170 - } 1171 - 1172 1149 } 1150 + 1151 + /* pass the fifo content to userspace, locking is not needed with only 1152 + * one concurrent reader and one concurrent writer 1153 + */ 1154 + ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); 1155 + if (ret) 1156 + goto out; 1157 + ret = copied; 1173 1158 out: 1174 1159 mutex_unlock(&list->read_mutex); 1175 1160 return ret; ··· 1160 1185 struct hid_debug_list *list = file->private_data; 1161 1186 1162 1187 poll_wait(file, &list->hdev->debug_wait, wait); 1163 - if (list->head != list->tail) 1188 + if (!kfifo_is_empty(&list->hid_debug_fifo)) 1164 1189 return EPOLLIN | EPOLLRDNORM; 1165 1190 if (!list->hdev->debug) 1166 1191 return EPOLLERR | EPOLLHUP; ··· 1175 1200 spin_lock_irqsave(&list->hdev->debug_list_lock, flags); 1176 1201 list_del(&list->node); 1177 1202 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1178 - kfree(list->hid_debug_buf); 1203 + kfifo_free(&list->hid_debug_fifo); 1179 1204 kfree(list); 1180 1205 1181 1206 return 0; ··· 1221 1246 { 1222 1247 debugfs_remove_recursive(hid_debug_root); 1223 1248 } 1224 -
+5 -8
drivers/i2c/busses/i2c-omap.c
··· 1500 1500 return 0; 1501 1501 } 1502 1502 1503 - #ifdef CONFIG_PM 1504 - static int omap_i2c_runtime_suspend(struct device *dev) 1503 + static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) 1505 1504 { 1506 1505 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1507 1506 ··· 1526 1527 return 0; 1527 1528 } 1528 1529 1529 - static int omap_i2c_runtime_resume(struct device *dev) 1530 + static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) 1530 1531 { 1531 1532 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1532 1533 ··· 1541 1542 } 1542 1543 1543 1544 static const struct dev_pm_ops omap_i2c_pm_ops = { 1545 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1546 + pm_runtime_force_resume) 1544 1547 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, 1545 1548 omap_i2c_runtime_resume, NULL) 1546 1549 }; 1547 - #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) 1548 - #else 1549 - #define OMAP_I2C_PM_OPS NULL 1550 - #endif /* CONFIG_PM */ 1551 1550 1552 1551 static struct platform_driver omap_i2c_driver = { 1553 1552 .probe = omap_i2c_probe, 1554 1553 .remove = omap_i2c_remove, 1555 1554 .driver = { 1556 1555 .name = "omap_i2c", 1557 - .pm = OMAP_I2C_PM_OPS, 1556 + .pm = &omap_i2c_pm_ops, 1558 1557 .of_match_table = of_match_ptr(omap_i2c_of_match), 1559 1558 }, 1560 1559 };
+1 -1
drivers/i3c/master.c
··· 1828 1828 1829 1829 ret = i3c_master_retrieve_dev_info(newdev); 1830 1830 if (ret) 1831 - goto err_free_dev; 1831 + goto err_detach_dev; 1832 1832 1833 1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev); 1834 1834 if (olddev) {
+12 -6
drivers/i3c/master/dw-i3c-master.c
··· 419 419 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 420 420 } 421 421 422 - static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 423 - struct dw_i3c_xfer *xfer) 422 + static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, 423 + struct dw_i3c_xfer *xfer) 424 424 { 425 - unsigned long flags; 426 - 427 - spin_lock_irqsave(&master->xferqueue.lock, flags); 428 425 if (master->xferqueue.cur == xfer) { 429 426 u32 status; 430 427 ··· 436 439 } else { 437 440 list_del_init(&xfer->node); 438 441 } 442 + } 443 + 444 + static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 445 + struct dw_i3c_xfer *xfer) 446 + { 447 + unsigned long flags; 448 + 449 + spin_lock_irqsave(&master->xferqueue.lock, flags); 450 + dw_i3c_master_dequeue_xfer_locked(master, xfer); 439 451 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 440 452 } 441 453 ··· 500 494 complete(&xfer->comp); 501 495 502 496 if (ret < 0) { 503 - dw_i3c_master_dequeue_xfer(master, xfer); 497 + dw_i3c_master_dequeue_xfer_locked(master, xfer); 504 498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 505 499 master->regs + DEVICE_CTRL); 506 500 }
+8 -1
drivers/ide/ide-atapi.c
··· 235 235 236 236 int ide_queue_sense_rq(ide_drive_t *drive, void *special) 237 237 { 238 - struct request *sense_rq = drive->sense_rq; 238 + ide_hwif_t *hwif = drive->hwif; 239 + struct request *sense_rq; 240 + unsigned long flags; 241 + 242 + spin_lock_irqsave(&hwif->lock, flags); 239 243 240 244 /* deferred failure from ide_prep_sense() */ 241 245 if (!drive->sense_rq_armed) { 242 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 243 247 drive->name); 248 + spin_unlock_irqrestore(&hwif->lock, flags); 244 249 return -ENOMEM; 245 250 } 246 251 252 + sense_rq = drive->sense_rq; 247 253 ide_req(sense_rq)->special = special; 248 254 drive->sense_rq_armed = false; 249 255 250 256 drive->hwif->rq = NULL; 251 257 252 258 ide_insert_request_head(drive, sense_rq); 259 + spin_unlock_irqrestore(&hwif->lock, flags); 253 260 return 0; 254 261 } 255 262 EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
+31 -30
drivers/ide/ide-io.c
··· 68 68 } 69 69 70 70 if (!blk_update_request(rq, error, nr_bytes)) { 71 - if (rq == drive->sense_rq) 71 + if (rq == drive->sense_rq) { 72 72 drive->sense_rq = NULL; 73 + drive->sense_rq_active = false; 74 + } 73 75 74 76 __blk_mq_end_request(rq, error); 75 77 return 0; ··· 453 451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 454 452 } 455 453 456 - /* 457 - * Issue a new request to a device. 458 - */ 459 - blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, 460 - const struct blk_mq_queue_data *bd) 454 + blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq, 455 + bool local_requeue) 461 456 { 462 - ide_drive_t *drive = hctx->queue->queuedata; 463 - ide_hwif_t *hwif = drive->hwif; 457 + ide_hwif_t *hwif = drive->hwif; 464 458 struct ide_host *host = hwif->host; 465 - struct request *rq = bd->rq; 466 459 ide_startstop_t startstop; 467 460 468 461 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { ··· 470 473 471 474 if (ide_lock_host(host, hwif)) 472 475 return BLK_STS_DEV_RESOURCE; 473 - 474 - blk_mq_start_request(rq); 475 476 476 477 spin_lock_irq(&hwif->lock); 477 478 ··· 504 509 } 505 510 hwif->cur_dev = drive; 506 511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 507 - 508 - /* 509 - * we know that the queue isn't empty, but this can happen 510 - * if ->prep_rq() decides to kill a request 511 - */ 512 - if (!rq) { 513 - rq = bd->rq; 514 - if (!rq) { 515 - ide_unlock_port(hwif); 516 - goto out; 517 - } 518 - } 519 512 520 513 /* 521 514 * Sanity: don't accept a request that isn't a PM request ··· 543 560 } 544 561 } else { 545 562 plug_device: 563 + if (local_requeue) 564 + list_add(&rq->queuelist, &drive->rq_list); 546 565 spin_unlock_irq(&hwif->lock); 547 566 ide_unlock_host(host); 548 - ide_requeue_and_plug(drive, rq); 567 + if (!local_requeue) 568 + ide_requeue_and_plug(drive, rq); 549 569 return BLK_STS_OK; 550 570 } 551 571 ··· 557 571 if (rq == NULL) 558 572 ide_unlock_host(host); 559 573 return BLK_STS_OK; 574 + } 575 + 576 + /* 577 + * Issue a new request to a device. 578 + */ 579 + blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, 580 + const struct blk_mq_queue_data *bd) 581 + { 582 + ide_drive_t *drive = hctx->queue->queuedata; 583 + ide_hwif_t *hwif = drive->hwif; 584 + 585 + spin_lock_irq(&hwif->lock); 586 + if (drive->sense_rq_active) { 587 + spin_unlock_irq(&hwif->lock); 588 + return BLK_STS_DEV_RESOURCE; 589 + } 590 + spin_unlock_irq(&hwif->lock); 591 + 592 + blk_mq_start_request(bd->rq); 593 + return ide_issue_rq(drive, bd->rq, false); 560 594 } 561 595 562 596 static int drive_is_ready(ide_drive_t *drive) ··· 899 893 900 894 void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 901 895 { 902 - ide_hwif_t *hwif = drive->hwif; 903 - unsigned long flags; 904 - 905 - spin_lock_irqsave(&hwif->lock, flags); 896 + drive->sense_rq_active = true; 906 897 list_add_tail(&rq->queuelist, &drive->rq_list); 907 - spin_unlock_irqrestore(&hwif->lock, flags); 908 - 909 898 kblockd_schedule_work(&drive->rq_work); 910 899 } 911 900 EXPORT_SYMBOL_GPL(ide_insert_request_head);
+2
drivers/ide/ide-park.c
··· 54 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 55 55 scsi_req(rq)->cmd_len = 1; 56 56 ide_req(rq)->type = ATA_PRIV_MISC; 57 + spin_lock_irq(&hwif->lock); 57 58 ide_insert_request_head(drive, rq); 59 + spin_unlock_irq(&hwif->lock); 58 60 59 61 out: 60 62 return;
+16 -7
drivers/ide/ide-probe.c
··· 1159 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1160 1160 ide_hwif_t *hwif = drive->hwif; 1161 1161 struct request *rq; 1162 + blk_status_t ret; 1162 1163 LIST_HEAD(list); 1163 1164 1165 + blk_mq_quiesce_queue(drive->queue); 1166 + 1167 + ret = BLK_STS_OK; 1164 1168 spin_lock_irq(&hwif->lock); 1165 - if (!list_empty(&drive->rq_list)) 1166 - list_splice_init(&drive->rq_list, &list); 1169 + while (!list_empty(&drive->rq_list)) { 1170 + rq = list_first_entry(&drive->rq_list, struct request, queuelist); 1171 + list_del_init(&rq->queuelist); 1172 + 1173 + spin_unlock_irq(&hwif->lock); 1174 + ret = ide_issue_rq(drive, rq, true); 1175 + spin_lock_irq(&hwif->lock); 1176 + } 1167 1177 spin_unlock_irq(&hwif->lock); 1168 1178 1169 - while (!list_empty(&list)) { 1170 - rq = list_first_entry(&list, struct request, queuelist); 1171 - list_del_init(&rq->queuelist); 1172 - blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1173 - } 1179 + blk_mq_unquiesce_queue(drive->queue); 1180 + 1181 + if (ret != BLK_STS_OK) 1182 + kblockd_schedule_work(&drive->rq_work); 1174 1183 } 1175 1184 1176 1185 static const u8 ide_hwif_to_major[] =
+60 -16
drivers/iio/adc/axp288_adc.c
··· 27 27 #include <linux/iio/machine.h> 28 28 #include <linux/iio/driver.h> 29 29 30 - #define AXP288_ADC_EN_MASK 0xF1 31 - #define AXP288_ADC_TS_PIN_GPADC 0xF2 32 - #define AXP288_ADC_TS_PIN_ON 0xF3 30 + /* 31 + * This mask enables all ADCs except for the battery temp-sensor (TS), that is 32 + * left as-is to avoid breaking charging on devices without a temp-sensor. 33 + */ 34 + #define AXP288_ADC_EN_MASK 0xF0 35 + #define AXP288_ADC_TS_ENABLE 0x01 36 + 37 + #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) 38 + #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) 39 + #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) 40 + #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) 41 + #define AXP288_ADC_TS_CURRENT_ON (3 << 0) 33 42 34 43 enum axp288_adc_id { 35 44 AXP288_ADC_TS, ··· 53 44 struct axp288_adc_info { 54 45 int irq; 55 46 struct regmap *regmap; 47 + bool ts_enabled; 56 48 }; 57 49 58 50 static const struct iio_chan_spec axp288_adc_channels[] = { ··· 125 115 return IIO_VAL_INT; 126 116 } 127 117 128 - static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, 129 - unsigned long address) 118 + /* 119 + * The current-source used for the battery temp-sensor (TS) is shared 120 + * with the GPADC. For proper fuel-gauge and charger operation the TS 121 + * current-source needs to be permanently on. But to read the GPADC we 122 + * need to temporary switch the TS current-source to ondemand, so that 123 + * the GPADC can use it, otherwise we will always read an all 0 value. 124 + */ 125 + static int axp288_adc_set_ts(struct axp288_adc_info *info, 126 + unsigned int mode, unsigned long address) 130 127 { 131 128 int ret; 132 129 133 - /* channels other than GPADC do not need to switch TS pin */ 130 + /* No need to switch the current-source if the TS pin is disabled */ 131 + if (!info->ts_enabled) 132 + return 0; 133 + 134 + /* Channels other than GPADC do not need the current source */ 134 135 if (address != AXP288_GP_ADC_H) 135 136 return 0; 136 137 137 - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); 138 + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, 139 + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); 138 140 if (ret) 139 141 return ret; 140 142 141 143 /* When switching to the GPADC pin give things some time to settle */ 142 - if (mode == AXP288_ADC_TS_PIN_GPADC) 144 + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) 143 145 usleep_range(6000, 10000); 144 146 145 147 return 0; ··· 167 145 mutex_lock(&indio_dev->mlock); 168 146 switch (mask) { 169 147 case IIO_CHAN_INFO_RAW: 170 - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, 148 + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, 171 149 chan->address)) { 172 150 dev_err(&indio_dev->dev, "GPADC mode\n"); 173 151 ret = -EINVAL; 174 152 break; 175 153 } 176 154 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 177 - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, 155 + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, 178 156 chan->address)) 179 157 dev_err(&indio_dev->dev, "TS pin restore\n"); 180 158 break; ··· 186 164 return ret; 187 165 } 188 166 189 - static int axp288_adc_set_state(struct regmap *regmap) 167 + static int axp288_adc_initialize(struct axp288_adc_info *info) 190 168 { 191 - /* ADC should be always enabled for internal FG to function */ 192 - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) 193 - return -EIO; 169 + int ret, adc_enable_val; 194 170 195 - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 171 + /* 172 + * Determine if the TS pin is enabled and set the TS current-source 173 + * accordingly. 174 + */ 175 + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); 176 + if (ret) 177 + return ret; 178 + 179 + if (adc_enable_val & AXP288_ADC_TS_ENABLE) { 180 + info->ts_enabled = true; 181 + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, 182 + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, 183 + AXP288_ADC_TS_CURRENT_ON); 184 + } else { 185 + info->ts_enabled = false; 186 + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, 187 + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, 188 + AXP288_ADC_TS_CURRENT_OFF); 189 + } 190 + if (ret) 191 + return ret; 192 + 193 + /* Turn on the ADC for all channels except TS, leave TS as is */ 194 + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, 195 + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); 196 196 } 197 197 198 198 static const struct iio_info axp288_adc_iio_info = { ··· 244 200 * Set ADC to enabled state at all time, including system suspend. 245 201 * otherwise internal fuel gauge functionality may be affected. 246 202 */ 247 - ret = axp288_adc_set_state(axp20x->regmap); 203 + ret = axp288_adc_initialize(info); 248 204 if (ret) { 249 205 dev_err(&pdev->dev, "unable to enable ADC device\n"); 250 206 return ret;
+2 -1
drivers/iio/adc/ti-ads8688.c
··· 41 41 42 42 #define ADS8688_VREF_MV 4096 43 43 #define ADS8688_REALBITS 16 44 + #define ADS8688_MAX_CHANNELS 8 44 45 45 46 /* 46 47 * enum ads8688_range - ADS8688 reference voltage range ··· 386 385 { 387 386 struct iio_poll_func *pf = p; 388 387 struct iio_dev *indio_dev = pf->indio_dev; 389 - u16 buffer[8]; 388 + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; 390 389 int i, j = 0; 391 390 392 391 for (i = 0; i < indio_dev->masklength; i++) {
+3 -4
drivers/iio/chemical/atlas-ph-sensor.c
··· 444 444 case IIO_CHAN_INFO_SCALE: 445 445 switch (chan->type) { 446 446 case IIO_TEMP: 447 - *val = 1; /* 0.01 */ 448 - *val2 = 100; 449 - break; 447 + *val = 10; 448 + return IIO_VAL_INT; 450 449 case IIO_PH: 451 450 *val = 1; /* 0.001 */ 452 451 *val2 = 1000; ··· 476 477 int val, int val2, long mask) 477 478 { 478 479 struct atlas_data *data = iio_priv(indio_dev); 479 - __be32 reg = cpu_to_be32(val); 480 + __be32 reg = cpu_to_be32(val / 10); 480 481 481 482 if (val2 != 0 || val < 0 || val > 20000) 482 483 return -EINVAL;
-1
drivers/infiniband/core/core_priv.h
··· 267 267 #endif 268 268 269 269 struct ib_device *ib_device_get_by_index(u32 ifindex); 270 - void ib_device_put(struct ib_device *device); 271 270 /* RDMA device netlink */ 272 271 void nldev_init(void); 273 272 void nldev_exit(void);
+10 -3
drivers/infiniband/core/device.c
··· 156 156 down_read(&lists_rwsem); 157 157 device = __ib_device_get_by_index(index); 158 158 if (device) { 159 - /* Do not return a device if unregistration has started. */ 160 - if (!refcount_inc_not_zero(&device->refcount)) 159 + if (!ib_device_try_get(device)) 161 160 device = NULL; 162 161 } 163 162 up_read(&lists_rwsem); 164 163 return device; 165 164 } 166 165 166 + /** 167 + * ib_device_put - Release IB device reference 168 + * @device: device whose reference to be released 169 + * 170 + * ib_device_put() releases reference to the IB device to allow it to be 171 + * unregistered and eventually free. 172 + */ 167 173 void ib_device_put(struct ib_device *device) 168 174 { 169 175 if (refcount_dec_and_test(&device->refcount)) 170 176 complete(&device->unreg_completion); 171 177 } 178 + EXPORT_SYMBOL(ib_device_put); 172 179 173 180 static struct ib_device *__ib_device_get_by_name(const char *name) 174 181 { ··· 310 303 rwlock_init(&device->client_data_lock); 311 304 INIT_LIST_HEAD(&device->client_data_list); 312 305 INIT_LIST_HEAD(&device->port_list); 313 - refcount_set(&device->refcount, 1); 314 306 init_completion(&device->unreg_completion); 315 307 316 308 return device; ··· 626 620 goto cg_cleanup; 627 621 } 628 622 623 + refcount_set(&device->refcount, 1); 629 624 device->reg_state = IB_DEV_REGISTERED; 630 625 631 626 list_for_each_entry(client, &client_list, list)
+3
drivers/infiniband/core/umem_odp.c
··· 352 352 umem->writable = 1; 353 353 umem->is_odp = 1; 354 354 odp_data->per_mm = per_mm; 355 + umem->owning_mm = per_mm->mm; 356 + mmgrab(umem->owning_mm); 355 357 356 358 mutex_init(&odp_data->umem_mutex); 357 359 init_completion(&odp_data->notifier_completion); ··· 386 384 out_page_list: 387 385 vfree(odp_data->page_list); 388 386 out_odp_data: 387 + mmdrop(umem->owning_mm); 389 388 kfree(odp_data); 390 389 return ERR_PTR(ret); 391 390 }
+16 -9
drivers/infiniband/core/uverbs_main.c
··· 204 204 if (atomic_dec_and_test(&file->device->refcount)) 205 205 ib_uverbs_comp_dev(file->device); 206 206 207 + if (file->async_file) 208 + kref_put(&file->async_file->ref, 209 + ib_uverbs_release_async_event_file); 207 210 put_device(&file->device->dev); 208 211 kfree(file); 209 212 } ··· 967 964 968 965 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 969 966 mutex_lock(&ufile->umap_lock); 970 - if (!list_empty(&ufile->umaps)) { 971 - mm = list_first_entry(&ufile->umaps, 972 - struct rdma_umap_priv, list) 973 - ->vma->vm_mm; 974 - mmget(mm); 967 + while (!list_empty(&ufile->umaps)) { 968 + int ret; 969 + 970 + priv = list_first_entry(&ufile->umaps, 971 + struct rdma_umap_priv, list); 972 + mm = priv->vma->vm_mm; 973 + ret = mmget_not_zero(mm); 974 + if (!ret) { 975 + list_del_init(&priv->list); 976 + mm = NULL; 977 + continue; 978 + } 979 + break; 975 980 } 976 981 mutex_unlock(&ufile->umap_lock); 977 982 if (!mm) ··· 1106 1095 mutex_lock(&file->device->lists_mutex); 1107 1096 list_del_init(&file->list); 1108 1097 mutex_unlock(&file->device->lists_mutex); 1109 - 1110 - if (file->async_file) 1111 - kref_put(&file->async_file->ref, 1112 - ib_uverbs_release_async_event_file); 1113 1098 1114 1099 kref_put(&file->ref, ib_uverbs_release_file); 1115 1100
+7 -1
drivers/infiniband/core/uverbs_std_types_device.c
··· 168 168 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( 169 169 struct uverbs_attr_bundle *attrs) 170 170 { 171 - struct ib_device *ib_dev = attrs->ufile->device->ib_dev; 171 + struct ib_device *ib_dev; 172 172 struct ib_port_attr attr = {}; 173 173 struct ib_uverbs_query_port_resp_ex resp = {}; 174 + struct ib_ucontext *ucontext; 174 175 int ret; 175 176 u8 port_num; 177 + 178 + ucontext = ib_uverbs_get_ucontext(attrs); 179 + if (IS_ERR(ucontext)) 180 + return PTR_ERR(ucontext); 181 + ib_dev = ucontext->device; 176 182 177 183 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ 178 184 if (!ib_dev->ops.query_port)
+1 -1
drivers/infiniband/hw/hfi1/file_ops.c
··· 488 488 vmf = 1; 489 489 break; 490 490 case STATUS: 491 - if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { 491 + if (flags & VM_WRITE) { 492 492 ret = -EPERM; 493 493 goto done; 494 494 }
-1
drivers/infiniband/hw/hfi1/ud.c
··· 987 987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 988 988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data; 989 989 wc.wc_flags = IB_WC_WITH_IMM; 990 - tlen -= sizeof(u32); 991 990 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 992 991 wc.ex.imm_data = 0; 993 992 wc.wc_flags = 0;
+8 -2
drivers/infiniband/hw/hns/hns_roce_srq.c
··· 210 210 struct ib_udata *udata) 211 211 { 212 212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 213 + struct hns_roce_ib_create_srq_resp resp = {}; 213 214 struct hns_roce_srq *srq; 214 215 int srq_desc_size; 215 216 int srq_buf_size; ··· 379 378 380 379 srq->event = hns_roce_ib_srq_event; 381 380 srq->ibsrq.ext.xrc.srq_num = srq->srqn; 381 + resp.srqn = srq->srqn; 382 382 383 383 if (udata) { 384 - if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 384 + if (ib_copy_to_udata(udata, &resp, 385 + min(udata->outlen, sizeof(resp)))) { 385 386 ret = -EFAULT; 386 - goto err_wrid; 387 + goto err_srqc_alloc; 387 388 } 388 389 } 389 390 390 391 return &srq->ibsrq; 392 + 393 + err_srqc_alloc: 394 + hns_roce_srq_free(hr_dev, srq); 391 395 392 396 err_wrid: 393 397 kvfree(srq->wrid);
+3 -3
drivers/infiniband/hw/mlx4/mad.c
··· 1411 1411 1412 1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1413 1413 if (sqp->tx_ring[wire_tx_ix].ah) 1414 - rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1414 + mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1415 1415 sqp->tx_ring[wire_tx_ix].ah = ah; 1416 1416 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1417 1417 sqp->tx_ring[wire_tx_ix].buf.map, ··· 1902 1902 if (wc.status == IB_WC_SUCCESS) { 1903 1903 switch (wc.opcode) { 1904 1904 case IB_WC_SEND: 1905 - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1905 + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & 1906 1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1907 1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1908 1908 = NULL; ··· 1931 1931 " status = %d, wrid = 0x%llx\n", 1932 1932 ctx->slave, wc.status, wc.wr_id); 1933 1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1934 - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1934 + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & 1935 1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1936 1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1937 1937 = NULL;
+1 -2
drivers/infiniband/hw/mlx5/flow.c
··· 630 630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 631 631 UAPI_DEF_CHAIN_OBJ_TREE( 632 632 UVERBS_OBJECT_FLOW, 633 - &mlx5_ib_fs, 634 - UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 633 + &mlx5_ib_fs), 635 634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 636 635 &mlx5_ib_flow_actions), 637 636 {},
+5 -5
drivers/infiniband/hw/mlx5/odp.c
··· 1595 1595 struct prefetch_mr_work *w = 1596 1596 container_of(work, struct prefetch_mr_work, work); 1597 1597 1598 - if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) 1598 + if (ib_device_try_get(&w->dev->ib_dev)) { 1599 1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, 1600 1600 w->num_sge); 1601 - 1601 + ib_device_put(&w->dev->ib_dev); 1602 + } 1603 + put_device(&w->dev->ib_dev.dev); 1602 1604 kfree(w); 1603 1605 } 1604 1606 ··· 1619 1617 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, 1620 1618 num_sge); 1621 1619 1622 - if (dev->ib_dev.reg_state != IB_DEV_REGISTERED) 1623 - return -ENODEV; 1624 - 1625 1620 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); 1626 1621 if (!work) 1627 1622 return -ENOMEM; 1628 1623 1629 1624 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); 1630 1625 1626 + get_device(&dev->ib_dev.dev); 1631 1627 work->dev = dev; 1632 1628 work->pf_flags = pf_flags; 1633 1629 work->num_sge = num_sge;
+9 -7
drivers/infiniband/hw/mlx5/qp.c
··· 1912 1912 } 1913 1913 1914 1914 if (!check_flags_mask(ucmd.flags, 1915 + MLX5_QP_FLAG_ALLOW_SCATTER_CQE | 1916 + MLX5_QP_FLAG_BFREG_INDEX | 1917 + MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE | 1918 + MLX5_QP_FLAG_SCATTER_CQE | 1915 1919 MLX5_QP_FLAG_SIGNATURE | 1916 - MLX5_QP_FLAG_SCATTER_CQE | 1917 - MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1918 - MLX5_QP_FLAG_BFREG_INDEX | 1919 - MLX5_QP_FLAG_TYPE_DCT | 1920 - MLX5_QP_FLAG_TYPE_DCI | 1921 - MLX5_QP_FLAG_ALLOW_SCATTER_CQE | 1922 - MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)) 1920 + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC | 1921 + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 1922 + MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1923 + MLX5_QP_FLAG_TYPE_DCI | 1924 + MLX5_QP_FLAG_TYPE_DCT)) 1923 1925 return -EINVAL; 1924 1926 1925 1927 err = get_qp_user_index(to_mucontext(pd->uobject->context),
-1
drivers/infiniband/hw/qib/qib_ud.c
··· 512 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 513 513 wc.ex.imm_data = ohdr->u.ud.imm_data; 514 514 wc.wc_flags = IB_WC_WITH_IMM; 515 - tlen -= sizeof(u32); 516 515 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 517 516 wc.ex.imm_data = 0; 518 517 wc.wc_flags = 0;
+6 -1
drivers/infiniband/sw/rdmavt/qp.c
··· 2910 2910 goto op_err; 2911 2911 if (!ret) 2912 2912 goto rnr_nak; 2913 + if (wqe->length > qp->r_len) 2914 + goto inv_err; 2913 2915 break; 2914 2916 2915 2917 case IB_WR_RDMA_WRITE_WITH_IMM: ··· 3080 3078 goto err; 3081 3079 3082 3080 inv_err: 3083 - send_status = IB_WC_REM_INV_REQ_ERR; 3081 + send_status = 3082 + sqp->ibqp.qp_type == IB_QPT_RC ? 3083 + IB_WC_REM_INV_REQ_ERR : 3084 + IB_WC_SUCCESS; 3084 3085 wc.status = IB_WC_LOC_QP_OP_ERR; 3085 3086 goto err; 3086 3087
-1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 248 248 struct list_head list; 249 249 struct net_device *dev; 250 250 struct ipoib_neigh *neigh; 251 - struct ipoib_path *path; 252 251 struct ipoib_tx_buf *tx_ring; 253 252 unsigned int tx_head; 254 253 unsigned int tx_tail;
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 1312 1312 1313 1313 neigh->cm = tx; 1314 1314 tx->neigh = neigh; 1315 - tx->path = path; 1316 1315 tx->dev = dev; 1317 1316 list_add(&tx->list, &priv->cm.start_list); 1318 1317 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); ··· 1370 1371 neigh->daddr + QPN_AND_OPTIONS_OFFSET); 1371 1372 goto free_neigh; 1372 1373 } 1373 - memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); 1374 + memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); 1374 1375 1375 1376 spin_unlock_irqrestore(&priv->lock, flags); 1376 1377 netif_tx_unlock_bh(dev);
-14
drivers/input/serio/olpc_apsp.c
··· 23 23 #include <linux/of.h> 24 24 #include <linux/slab.h> 25 25 #include <linux/delay.h> 26 - #include <linux/clk.h> 27 26 28 27 /* 29 28 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. ··· 74 75 struct serio *kbio; 75 76 struct serio *padio; 76 77 void __iomem *base; 77 - struct clk *clk; 78 78 int open_count; 79 79 int irq; 80 80 }; ··· 146 148 struct olpc_apsp *priv = port->port_data; 147 149 unsigned int tmp; 148 150 unsigned long l; 149 - int error; 150 151 151 152 if (priv->open_count++ == 0) { 152 - error = clk_prepare_enable(priv->clk); 153 - if (error) 154 - return error; 155 - 156 153 l = readl(priv->base + COMMAND_FIFO_STATUS); 157 154 if (!(l & CMD_STS_MASK)) { 158 155 dev_err(priv->dev, "SP cannot accept commands.\n"); 159 - clk_disable_unprepare(priv->clk); 160 156 return -EIO; 161 157 } 162 158 ··· 171 179 /* Disable interrupt 0 */ 172 180 tmp = readl(priv->base + PJ_INTERRUPT_MASK); 173 181 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); 174 - 175 - clk_disable_unprepare(priv->clk); 176 182 } 177 183 } 178 184 ··· 197 207 priv->irq = platform_get_irq(pdev, 0); 198 208 if (priv->irq < 0) 199 209 return priv->irq; 200 - 201 - priv->clk = devm_clk_get(&pdev->dev, "sp"); 202 - if (IS_ERR(priv->clk)) 203 - return PTR_ERR(priv->clk); 204 210 205 211 /* KEYBOARD */ 206 212 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+13 -6
drivers/iommu/amd_iommu.c
··· 1991 1991 1992 1992 static void do_detach(struct iommu_dev_data *dev_data) 1993 1993 { 1994 + struct protection_domain *domain = dev_data->domain; 1994 1995 struct amd_iommu *iommu; 1995 1996 u16 alias; 1996 1997 1997 1998 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1998 1999 alias = dev_data->alias; 1999 - 2000 - /* decrease reference counters */ 2001 - dev_data->domain->dev_iommu[iommu->index] -= 1; 2002 - dev_data->domain->dev_cnt -= 1; 2003 2000 2004 2001 /* Update data structures */ 2005 2002 dev_data->domain = NULL; ··· 2007 2010 2008 2011 /* Flush the DTE entry */ 2009 2012 device_flush_dte(dev_data); 2013 + 2014 + /* Flush IOTLB */ 2015 + domain_flush_tlb_pde(domain); 2016 + 2017 + /* Wait for the flushes to finish */ 2018 + domain_flush_complete(domain); 2019 + 2020 + /* decrease reference counters - needs to happen after the flushes */ 2021 + domain->dev_iommu[iommu->index] -= 1; 2022 + domain->dev_cnt -= 1; 2010 2023 } 2011 2024 2012 2025 /* ··· 2624 2617 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2625 2618 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); 2626 2619 2627 - if (--mapped_pages) 2620 + if (--mapped_pages == 0) 2628 2621 goto out_free_iova; 2629 2622 } 2630 2623 } 2631 2624 2632 2625 out_free_iova: 2633 - free_iova_fast(&dma_dom->iovad, address, npages); 2626 + free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); 2634 2627 2635 2628 out_err: 2636 2629 return 0;
+5 -5
drivers/iommu/intel-iommu.c
··· 363 363 static int dmar_forcedac; 364 364 static int intel_iommu_strict; 365 365 static int intel_iommu_superpage = 1; 366 - static int intel_iommu_sm = 1; 366 + static int intel_iommu_sm; 367 367 static int iommu_identity_mapping; 368 368 369 369 #define IDENTMAP_ALL 1 ··· 456 456 } else if (!strncmp(str, "sp_off", 6)) { 457 457 pr_info("Disable supported super page\n"); 458 458 intel_iommu_superpage = 0; 459 - } else if (!strncmp(str, "sm_off", 6)) { 460 - pr_info("Intel-IOMMU: disable scalable mode support\n"); 461 - intel_iommu_sm = 0; 459 + } else if (!strncmp(str, "sm_on", 5)) { 460 + pr_info("Intel-IOMMU: scalable mode supported\n"); 461 + intel_iommu_sm = 1; 462 462 } else if (!strncmp(str, "tboot_noforce", 13)) { 463 463 printk(KERN_INFO 464 464 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); ··· 5294 5294 struct iommu_resv_region *entry, *next; 5295 5295 5296 5296 list_for_each_entry_safe(entry, next, head, list) { 5297 - if (entry->type == IOMMU_RESV_RESERVED) 5297 + if (entry->type == IOMMU_RESV_MSI) 5298 5298 kfree(entry); 5299 5299 } 5300 5300 }
+4
drivers/iommu/mtk_iommu_v1.c
··· 441 441 iommu_spec.args_count = count; 442 442 443 443 mtk_iommu_create_mapping(dev, &iommu_spec); 444 + 445 + /* dev->iommu_fwspec might have changed */ 446 + fwspec = dev_iommu_fwspec_get(dev); 447 + 444 448 of_node_put(iommu_spec.np); 445 449 } 446 450
+79 -22
drivers/irqchip/irq-gic-v3-its.c
··· 97 97 * The ITS structure - contains most of the infrastructure, with the 98 98 * top-level MSI domain, the command queue, the collections, and the 99 99 * list of devices writing to it. 100 + * 101 + * dev_alloc_lock has to be taken for device allocations, while the 102 + * spinlock must be taken to parse data structures such as the device 103 + * list. 100 104 */ 101 105 struct its_node { 102 106 raw_spinlock_t lock; 107 + struct mutex dev_alloc_lock; 103 108 struct list_head entry; 104 109 void __iomem *base; 105 110 phys_addr_t phys_base; ··· 161 156 void *itt; 162 157 u32 nr_ites; 163 158 u32 device_id; 159 + bool shared; 164 160 }; 165 161 166 162 static struct { ··· 1586 1580 nr_irqs /= 2; 1587 1581 } while (nr_irqs > 0); 1588 1582 1583 + if (!nr_irqs) 1584 + err = -ENOSPC; 1585 + 1589 1586 if (err) 1590 1587 goto out; 1591 1588 ··· 2068 2059 return 0; 2069 2060 } 2070 2061 2062 + static u64 its_clear_vpend_valid(void __iomem *vlpi_base) 2063 + { 2064 + u32 count = 1000000; /* 1s! */ 2065 + bool clean; 2066 + u64 val; 2067 + 2068 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2069 + val &= ~GICR_VPENDBASER_Valid; 2070 + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2071 + 2072 + do { 2073 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2074 + clean = !(val & GICR_VPENDBASER_Dirty); 2075 + if (!clean) { 2076 + count--; 2077 + cpu_relax(); 2078 + udelay(1); 2079 + } 2080 + } while (!clean && count); 2081 + 2082 + return val; 2083 + } 2084 + 2071 2085 static void its_cpu_init_lpis(void) 2072 2086 { 2073 2087 void __iomem *rbase = gic_data_rdist_rd_base(); ··· 2175 2143 val = readl_relaxed(rbase + GICR_CTLR); 2176 2144 val |= GICR_CTLR_ENABLE_LPIS; 2177 2145 writel_relaxed(val, rbase + GICR_CTLR); 2146 + 2147 + if (gic_rdists->has_vlpis) { 2148 + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2149 + 2150 + /* 2151 + * It's possible for CPU to receive VLPIs before it is 2152 + * sheduled as a vPE, especially for the first CPU, and the 2153 + * VLPI with INTID larger than 2^(IDbits+1) will be considered 2154 + * as out of range and dropped by GIC. 2155 + * So we initialize IDbits to known value to avoid VLPI drop. 2156 + */ 2157 + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2158 + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", 2159 + smp_processor_id(), val); 2160 + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2161 + 2162 + /* 2163 + * Also clear Valid bit of GICR_VPENDBASER, in case some 2164 + * ancient programming gets left in and has possibility of 2165 + * corrupting memory. 2166 + */ 2167 + val = its_clear_vpend_valid(vlpi_base); 2168 + WARN_ON(val & GICR_VPENDBASER_Dirty); 2169 + } 2178 2170 2179 2171 /* Make sure the GIC has seen the above */ 2180 2172 dsb(sy); ··· 2478 2422 struct its_device *its_dev; 2479 2423 struct msi_domain_info *msi_info; 2480 2424 u32 dev_id; 2425 + int err = 0; 2481 2426 2482 2427 /* 2483 2428 * We ignore "dev" entierely, and rely on the dev_id that has ··· 2501 2444 return -EINVAL; 2502 2445 } 2503 2446 2447 + mutex_lock(&its->dev_alloc_lock); 2504 2448 its_dev = its_find_device(its, dev_id); 2505 2449 if (its_dev) { 2506 2450 /* ··· 2509 2451 * another alias (PCI bridge of some sort). No need to 2510 2452 * create the device. 2511 2453 */ 2454 + its_dev->shared = true; 2512 2455 pr_debug("Reusing ITT for devID %x\n", dev_id); 2513 2456 goto out; 2514 2457 } 2515 2458 2516 2459 its_dev = its_create_device(its, dev_id, nvec, true); 2517 - if (!its_dev) 2518 - return -ENOMEM; 2460 + if (!its_dev) { 2461 + err = -ENOMEM; 2462 + goto out; 2463 + } 2519 2464 2520 2465 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2521 2466 out: 2467 + mutex_unlock(&its->dev_alloc_lock); 2522 2468 info->scratchpad[0].ptr = its_dev; 2523 - return 0; 2469 + return err; 2524 2470 } 2525 2471 2526 2472 static struct msi_domain_ops its_msi_domain_ops = { ··· 2628 2566 { 2629 2567 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2630 2568 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2569 + struct its_node *its = its_dev->its; 2631 2570 int i; 2632 2571 2633 2572 for (i = 0; i < nr_irqs; i++) { ··· 2643 2580 irq_domain_reset_irq_data(data); 2644 2581 } 2645 2582 2646 - /* If all interrupts have been freed, start mopping the floor */ 2647 - if (bitmap_empty(its_dev->event_map.lpi_map, 2583 + mutex_lock(&its->dev_alloc_lock); 2584 + 2585 + /* 2586 + * If all interrupts have been freed, start mopping the 2587 + * floor. This is conditionned on the device not being shared. 2588 + */ 2589 + if (!its_dev->shared && 2590 + bitmap_empty(its_dev->event_map.lpi_map, 2648 2591 its_dev->event_map.nr_lpis)) { 2649 2592 its_lpi_free(its_dev->event_map.lpi_map, 2650 2593 its_dev->event_map.lpi_base, ··· 2661 2592 its_send_mapd(its_dev, 0); 2662 2593 its_free_device(its_dev); 2663 2594 } 2595 + 2596 + mutex_unlock(&its->dev_alloc_lock); 2664 2597 2665 2598 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2666 2599 } ··· 2826 2755 static void its_vpe_deschedule(struct its_vpe *vpe) 2827 2756 { 2828 2757 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2829 - u32 count = 1000000; /* 1s! */ 2830 - bool clean; 2831 2758 u64 val; 2832 2759 2833 - /* We're being scheduled out */ 2834 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2835 - val &= ~GICR_VPENDBASER_Valid; 2836 - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2760 + val = its_clear_vpend_valid(vlpi_base); 2837 2761 2838 - do { 2839 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2840 - clean = !(val & GICR_VPENDBASER_Dirty); 2841 - if (!clean) { 2842 - count--; 2843 - cpu_relax(); 2844 - udelay(1); 2845 - } 2846 - } while (!clean && count); 2847 - 2848 - if (unlikely(!clean && !count)) { 2762 + if (unlikely(val & GICR_VPENDBASER_Dirty)) { 2849 2763 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2850 2764 vpe->idai = false; 2851 2765 vpe->pending_last = true; ··· 3573 3517 } 3574 3518 3575 3519 raw_spin_lock_init(&its->lock); 3520 + mutex_init(&its->dev_alloc_lock); 3576 3521 INIT_LIST_HEAD(&its->entry); 3577 3522 INIT_LIST_HEAD(&its->its_device_list); 3578 3523 typer = gic_read_typer(its_base + GITS_TYPER);
+5 -1
drivers/irqchip/irq-mmp.c
··· 34 34 #define SEL_INT_PENDING (1 << 6) 35 35 #define SEL_INT_NUM_MASK 0x3f 36 36 37 + #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) 38 + #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) 39 + 37 40 struct icu_chip_data { 38 41 int nr_irqs; 39 42 unsigned int virq_base; ··· 193 190 static const struct mmp_intc_conf mmp2_conf = { 194 191 .conf_enable = 0x20, 195 192 .conf_disable = 0x0, 196 - .conf_mask = 0x7f, 193 + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | 194 + MMP2_ICU_INT_ROUTE_PJ4_FIQ, 197 195 }; 198 196 199 197 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
+25 -15
drivers/irqchip/irq-xtensa-mx.c
··· 71 71 unsigned int mask = 1u << d->hwirq; 72 72 73 73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 74 - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 75 - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 76 - HW_IRQ_MX_BASE), MIENG); 77 - } else { 78 - mask = __this_cpu_read(cached_irq_mask) & ~mask; 79 - __this_cpu_write(cached_irq_mask, mask); 80 - xtensa_set_sr(mask, intenable); 74 + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 75 + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); 76 + 77 + if (ext_irq >= HW_IRQ_MX_BASE) { 78 + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG); 79 + return; 80 + } 81 81 } 82 + mask = __this_cpu_read(cached_irq_mask) & ~mask; 83 + __this_cpu_write(cached_irq_mask, mask); 84 + xtensa_set_sr(mask, intenable); 82 85 } 83 86 84 87 static void xtensa_mx_irq_unmask(struct irq_data *d) ··· 89 86 unsigned int mask = 1u << d->hwirq; 90 87 91 88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 92 - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 93 - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 94 - HW_IRQ_MX_BASE), MIENGSET); 95 - } else { 96 - mask |= __this_cpu_read(cached_irq_mask); 97 - __this_cpu_write(cached_irq_mask, mask); 98 - xtensa_set_sr(mask, intenable); 89 + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 90 + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); 91 + 92 + if (ext_irq >= HW_IRQ_MX_BASE) { 93 + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET); 94 + return; 95 + } 99 96 } 97 + mask |= __this_cpu_read(cached_irq_mask); 98 + __this_cpu_write(cached_irq_mask, mask); 99 + xtensa_set_sr(mask, intenable); 100 100 } 101 101 102 102 static void xtensa_mx_irq_enable(struct irq_data *d) ··· 119 113 120 114 static int xtensa_mx_irq_retrigger(struct irq_data *d) 121 115 { 122 - xtensa_set_sr(1 << d->hwirq, intset); 116 + unsigned int mask = 1u << d->hwirq; 117 + 118 + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) 119 + return 0; 120 + xtensa_set_sr(mask, intset); 123 121 return 1; 124 122 } 125 123
+5 -1
drivers/irqchip/irq-xtensa-pic.c
··· 70 70 71 71 static int xtensa_irq_retrigger(struct irq_data *d) 72 72 { 73 - xtensa_set_sr(1 << d->hwirq, intset); 73 + unsigned int mask = 1u << d->hwirq; 74 + 75 + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) 76 + return 0; 77 + xtensa_set_sr(mask, intset); 74 78 return 1; 75 79 } 76 80
+1 -1
drivers/isdn/mISDN/timerdev.c
··· 170 170 spin_lock_irqsave(&timer->dev->lock, flags); 171 171 if (timer->id >= 0) 172 172 list_move_tail(&timer->list, &timer->dev->expired); 173 - spin_unlock_irqrestore(&timer->dev->lock, flags); 174 173 wake_up_interruptible(&timer->dev->wait); 174 + spin_unlock_irqrestore(&timer->dev->lock, flags); 175 175 } 176 176 177 177 static int
+1 -1
drivers/md/dm-rq.c
··· 131 131 static void rq_completed(struct mapped_device *md) 132 132 { 133 133 /* nudge anyone waiting on suspend queue */ 134 - if (unlikely(waitqueue_active(&md->wait))) 134 + if (unlikely(wq_has_sleeper(&md->wait))) 135 135 wake_up(&md->wait); 136 136 137 137 /*
+6 -2
drivers/md/dm.c
··· 699 699 true, duration, &io->stats_aux); 700 700 701 701 /* nudge anyone waiting on suspend queue */ 702 - if (unlikely(waitqueue_active(&md->wait))) 702 + if (unlikely(wq_has_sleeper(&md->wait))) 703 703 wake_up(&md->wait); 704 704 } 705 705 ··· 1336 1336 return r; 1337 1337 } 1338 1338 1339 - bio_trim(clone, sector - clone->bi_iter.bi_sector, len); 1339 + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1340 + clone->bi_iter.bi_size = to_bytes(len); 1341 + 1342 + if (bio_integrity(bio)) 1343 + bio_integrity_trim(clone); 1340 1344 1341 1345 return 0; 1342 1346 }
+22 -11
drivers/md/raid5-cache.c
··· 1935 1935 } 1936 1936 1937 1937 static struct stripe_head * 1938 - r5c_recovery_alloc_stripe(struct r5conf *conf, 1939 - sector_t stripe_sect) 1938 + r5c_recovery_alloc_stripe( 1939 + struct r5conf *conf, 1940 + sector_t stripe_sect, 1941 + int noblock) 1940 1942 { 1941 1943 struct stripe_head *sh; 1942 1944 1943 - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); 1944 1946 if (!sh) 1945 1947 return NULL; /* no more stripe available */ 1946 1948 ··· 2152 2150 stripe_sect); 2153 2151 2154 2152 if (!sh) { 2155 - sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2153 + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); 2156 2154 /* 2157 2155 * cannot get stripe from raid5_get_active_stripe 2158 2156 * try replay some stripes ··· 2161 2159 r5c_recovery_replay_stripes( 2162 2160 cached_stripe_list, ctx); 2163 2161 sh = r5c_recovery_alloc_stripe( 2164 - conf, stripe_sect); 2162 + conf, stripe_sect, 1); 2165 2163 } 2166 2164 if (!sh) { 2165 + int new_size = conf->min_nr_stripes * 2; 2167 2166 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 2167 mdname(mddev), 2169 - conf->min_nr_stripes * 2); 2170 - raid5_set_cache_size(mddev, 2171 - conf->min_nr_stripes * 2); 2172 - sh = r5c_recovery_alloc_stripe(conf, 2173 - stripe_sect); 2168 + new_size); 2169 + ret = raid5_set_cache_size(mddev, new_size); 2170 + if (conf->min_nr_stripes <= new_size / 2) { 2171 + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", 2172 + mdname(mddev), 2173 + ret, 2174 + new_size, 2175 + conf->min_nr_stripes, 2176 + conf->max_nr_stripes); 2177 + return -ENOMEM; 2178 + } 2179 + sh = r5c_recovery_alloc_stripe( 2180 + conf, stripe_sect, 0); 2174 2181 } 2175 2182 if (!sh) { 2176 2183 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2177 - mdname(mddev)); 2184 + mdname(mddev)); 2178 2185 return -ENOMEM; 2179 2186 } 2180 2187 list_add_tail(&sh->lru, cached_stripe_list);
+6 -2
drivers/md/raid5.c
··· 6369 6369 int 6370 6370 raid5_set_cache_size(struct mddev *mddev, int size) 6371 6371 { 6372 + int result = 0; 6372 6373 struct r5conf *conf = mddev->private; 6373 6374 6374 6375 if (size <= 16 || size > 32768) ··· 6386 6385 6387 6386 mutex_lock(&conf->cache_size_mutex); 6388 6387 while (size > conf->max_nr_stripes) 6389 - if (!grow_one_stripe(conf, GFP_KERNEL)) 6388 + if (!grow_one_stripe(conf, GFP_KERNEL)) { 6389 + conf->min_nr_stripes = conf->max_nr_stripes; 6390 + result = -ENOMEM; 6390 6391 break; 6392 + } 6391 6393 mutex_unlock(&conf->cache_size_mutex); 6392 6394 6393 - return 0; 6395 + return result; 6394 6396 } 6395 6397 EXPORT_SYMBOL(raid5_set_cache_size); 6396 6398
+1 -1
drivers/mfd/Kconfig
··· 1419 1419 1420 1420 config MFD_TPS68470 1421 1421 bool "TI TPS68470 Power Management / LED chips" 1422 - depends on ACPI && I2C=y 1422 + depends on ACPI && PCI && I2C=y 1423 1423 select MFD_CORE 1424 1424 select REGMAP_I2C 1425 1425 select I2C_DESIGNWARE_PLATFORM
+4 -1
drivers/misc/mei/client.c
··· 401 401 struct mei_cl_cb *cb, *next; 402 402 403 403 list_for_each_entry_safe(cb, next, head, list) { 404 - if (cl == cb->cl) 404 + if (cl == cb->cl) { 405 405 list_del_init(&cb->list); 406 + if (cb->fop_type == MEI_FOP_READ) 407 + mei_io_cb_free(cb); 408 + } 406 409 } 407 410 } 408 411
+2
drivers/misc/mei/hw-me-regs.h
··· 139 139 #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ 140 140 #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ 141 141 142 + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ 143 + 142 144 /* 143 145 * MEI HW Section 144 146 */
+2
drivers/misc/mei/pci-me.c
··· 105 105 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, 106 106 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 107 107 108 + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, 109 + 108 110 /* required last entry */ 109 111 {0, } 110 112 };
+43 -30
drivers/misc/mic/vop/vop_main.c
··· 48 48 * @dc: Virtio device control 49 49 * @vpdev: VOP device which is the parent for this virtio device 50 50 * @vr: Buffer for accessing the VRING 51 - * @used: Buffer for used 51 + * @used_virt: Virtual address of used ring 52 + * @used: DMA address of used ring 52 53 * @used_size: Size of the used buffer 53 54 * @reset_done: Track whether VOP reset is complete 54 55 * @virtio_cookie: Cookie returned upon requesting a interrupt ··· 63 62 struct mic_device_ctrl __iomem *dc; 64 63 struct vop_device *vpdev; 65 64 void __iomem *vr[VOP_MAX_VRINGS]; 65 + void *used_virt[VOP_MAX_VRINGS]; 66 66 dma_addr_t used[VOP_MAX_VRINGS]; 67 67 int used_size[VOP_MAX_VRINGS]; 68 68 struct completion reset_done; ··· 263 261 static void vop_del_vq(struct virtqueue *vq, int n) 264 262 { 265 263 struct _vop_vdev *vdev = to_vopvdev(vq->vdev); 266 - struct vring *vr = (struct vring *)(vq + 1); 267 264 struct vop_device *vpdev = vdev->vpdev; 268 265 269 266 dma_unmap_single(&vpdev->dev, vdev->used[n], 270 267 vdev->used_size[n], DMA_BIDIRECTIONAL); 271 - free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); 268 + free_pages((unsigned long)vdev->used_virt[n], 269 + get_order(vdev->used_size[n])); 272 270 vring_del_virtqueue(vq); 273 271 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); 274 272 vdev->vr[n] = NULL; ··· 284 282 285 283 list_for_each_entry_safe(vq, n, &dev->vqs, list) 286 284 vop_del_vq(vq, idx++); 285 + } 286 + 287 + static struct virtqueue *vop_new_virtqueue(unsigned int index, 288 + unsigned int num, 289 + struct virtio_device *vdev, 290 + bool context, 291 + void *pages, 292 + bool (*notify)(struct virtqueue *vq), 293 + void (*callback)(struct virtqueue *vq), 294 + const char *name, 295 + void *used) 296 + { 297 + bool weak_barriers = false; 298 + struct vring vring; 299 + 300 + vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); 301 + vring.used = used; 302 + 303 + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 304 + notify, callback, name); 287 305 } 288 306 289 307 /* ··· 325 303 struct _mic_vring_info __iomem *info; 326 304 void *used; 327 305 int vr_size, _vr_size, err, magic; 328 - struct vring *vr; 329 306 u8 type = ioread8(&vdev->desc->type); 330 307 331 308 if (index >= ioread8(&vdev->desc->num_vq)) ··· 344 323 return ERR_PTR(-ENOMEM); 345 324 vdev->vr[index] = va; 346 325 memset_io(va, 0x0, _vr_size); 347 - vq = vring_new_virtqueue( 348 - index, 349 - le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, 350 - dev, 351 - false, 352 - ctx, 353 - (void __force *)va, vop_notify, callback, name); 354 - if (!vq) { 355 - err = -ENOMEM; 356 - goto unmap; 357 - } 326 + 358 327 info = va + _vr_size; 359 328 magic = ioread32(&info->magic); 360 329 ··· 353 342 goto unmap; 354 343 } 355 344 356 - /* Allocate and reassign used ring now */ 357 345 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 358 346 sizeof(struct vring_used_elem) * 359 347 le16_to_cpu(config.num)); 360 348 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 361 349 get_order(vdev->used_size[index])); 350 + vdev->used_virt[index] = used; 362 351 if (!used) { 363 352 err = -ENOMEM; 364 353 dev_err(_vop_dev(vdev), "%s %d err %d\n", 365 354 __func__, __LINE__, err); 366 - goto del_vq; 355 + goto unmap; 367 356 } 357 + 358 + vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, 359 + (void __force *)va, vop_notify, callback, 360 + name, used); 361 + if (!vq) { 362 + err = -ENOMEM; 363 + goto free_used; 364 + } 365 + 368 366 vdev->used[index] = dma_map_single(&vpdev->dev, used, 369 367 vdev->used_size[index], 370 368 DMA_BIDIRECTIONAL); ··· 381 361 err = -ENOMEM; 382 362 dev_err(_vop_dev(vdev), "%s %d err %d\n", 383 363 __func__, __LINE__, err); 384 - goto free_used; 364 + goto del_vq; 385 365 } 386 366 writeq(vdev->used[index], &vqconfig->used_address); 387 - /* 388 - * To reassign the used ring here we are directly accessing 389 - * struct vring_virtqueue which is a private data structure 390 - * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in 391 - * vring_new_virtqueue() would ensure that 392 - * (&vq->vring == (struct vring *) (&vq->vq + 1)); 393 - */ 394 - vr = (struct vring *)(vq + 1); 395 - vr->used = used; 396 367 397 368 vq->priv = vdev; 398 369 return vq; 370 + del_vq: 371 + vring_del_virtqueue(vq); 399 372 free_used: 400 373 free_pages((unsigned long)used, 401 374 get_order(vdev->used_size[index])); 402 - del_vq: 403 - vring_del_virtqueue(vq); 404 375 unmap: 405 376 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); 406 377 return ERR_PTR(err); ··· 593 582 int ret = -1; 594 583 595 584 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { 585 + struct device *dev = get_device(&vdev->vdev.dev); 586 + 596 587 dev_dbg(&vpdev->dev, 597 588 "%s %d config_change %d type %d vdev %p\n", 598 589 __func__, __LINE__, ··· 606 593 iowrite8(-1, &dc->h2c_vdev_db); 607 594 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 608 595 wait_for_completion(&vdev->reset_done); 609 - put_device(&vdev->vdev.dev); 596 + put_device(dev); 610 597 iowrite8(1, &dc->guest_ack); 611 598 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", 612 599 __func__, __LINE__, ioread8(&dc->guest_ack));
+2
drivers/mmc/host/bcm2835.c
··· 1431 1431 1432 1432 err: 1433 1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret); 1434 + if (host->dma_chan_rxtx) 1435 + dma_release_channel(host->dma_chan_rxtx); 1434 1436 mmc_free_host(mmc); 1435 1437 1436 1438 return ret;
+1 -1
drivers/mmc/host/mtk-sd.c
··· 846 846 847 847 if (timing == MMC_TIMING_MMC_HS400 && 848 848 host->dev_comp->hs400_tune) 849 - sdr_set_field(host->base + PAD_CMD_TUNE, 849 + sdr_set_field(host->base + tune_reg, 850 850 MSDC_PAD_TUNE_CMDRRDLY, 851 851 host->hs400_cmd_int_delay); 852 852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
+4 -1
drivers/mtd/mtdpart.c
··· 480 480 /* let's register it anyway to preserve ordering */ 481 481 slave->offset = 0; 482 482 slave->mtd.size = 0; 483 + 484 + /* Initialize ->erasesize to make add_mtd_device() happy. */ 485 + slave->mtd.erasesize = parent->erasesize; 486 + 483 487 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 484 488 part->name); 485 489 goto out_register; ··· 636 632 mutex_unlock(&mtd_partitions_mutex); 637 633 638 634 free_partition(new); 639 - pr_info("%s:%i\n", __func__, __LINE__); 640 635 641 636 return ret; 642 637 }
+6 -7
drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
··· 155 155 156 156 /* 157 157 * Reset BCH here, too. We got failures otherwise :( 158 - * See later BCH reset for explanation of MX23 handling 158 + * See later BCH reset for explanation of MX23 and MX28 handling 159 159 */ 160 - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 160 + ret = gpmi_reset_block(r->bch_regs, 161 + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); 161 162 if (ret) 162 163 goto err_out; 163 164 ··· 264 263 /* 265 264 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 266 265 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. 267 - * On the other hand, the MX28 needs the reset, because one case has been 268 - * seen where the BCH produced ECC errors constantly after 10000 269 - * consecutive reboots. The latter case has not been seen on the MX23 270 - * yet, still we don't know if it could happen there as well. 266 + * and MX28. 271 267 */ 272 - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 268 + ret = gpmi_reset_block(r->bch_regs, 269 + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); 273 270 if (ret) 274 271 goto err_out; 275 272
+1
drivers/mtd/nand/raw/nand_base.c
··· 410 410 411 411 /** 412 412 * nand_fill_oob - [INTERN] Transfer client buffer to oob 413 + * @chip: NAND chip object 413 414 * @oob: oob data buffer 414 415 * @len: oob data write length 415 416 * @ops: oob ops structure
+1 -1
drivers/mtd/nand/raw/nand_bbt.c
··· 158 158 159 159 /** 160 160 * read_bbt - [GENERIC] Read the bad block table starting from page 161 - * @chip: NAND chip object 161 + * @this: NAND chip object 162 162 * @buf: temporary buffer 163 163 * @page: the starting page 164 164 * @num: the number of bbt descriptors to read
+22 -24
drivers/mtd/nand/spi/core.c
··· 304 304 struct nand_device *nand = spinand_to_nand(spinand); 305 305 struct mtd_info *mtd = nanddev_to_mtd(nand); 306 306 struct nand_page_io_req adjreq = *req; 307 - unsigned int nbytes = 0; 308 - void *buf = NULL; 307 + void *buf = spinand->databuf; 308 + unsigned int nbytes; 309 309 u16 column = 0; 310 310 int ret; 311 311 312 - memset(spinand->databuf, 0xff, 313 - nanddev_page_size(nand) + 314 - nanddev_per_page_oobsize(nand)); 312 + /* 313 + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 314 + * the cache content to 0xFF (depends on vendor implementation), so we 315 + * must fill the page cache entirely even if we only want to program 316 + * the data portion of the page, otherwise we might corrupt the BBM or 317 + * user data previously programmed in OOB area. 318 + */ 319 + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 320 + memset(spinand->databuf, 0xff, nbytes); 321 + adjreq.dataoffs = 0; 322 + adjreq.datalen = nanddev_page_size(nand); 323 + adjreq.databuf.out = spinand->databuf; 324 + adjreq.ooblen = nanddev_per_page_oobsize(nand); 325 + adjreq.ooboffs = 0; 326 + adjreq.oobbuf.out = spinand->oobbuf; 315 327 316 - if (req->datalen) { 328 + if (req->datalen) 317 329 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 318 330 req->datalen); 319 - adjreq.dataoffs = 0; 320 - adjreq.datalen = nanddev_page_size(nand); 321 - adjreq.databuf.out = spinand->databuf; 322 - nbytes = adjreq.datalen; 323 - buf = spinand->databuf; 324 - } 325 331 326 332 if (req->ooblen) { 327 333 if (req->mode == MTD_OPS_AUTO_OOB) ··· 338 332 else 339 333 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 340 334 req->ooblen); 341 - 342 - adjreq.ooblen = nanddev_per_page_oobsize(nand); 343 - adjreq.ooboffs = 0; 344 - nbytes += nanddev_per_page_oobsize(nand); 345 - if (!buf) { 346 - buf = spinand->oobbuf; 347 - column = nanddev_page_size(nand); 348 - } 349 335 } 350 336 351 337 spinand_cache_op_adjust_colum(spinand, &adjreq, &column); ··· 368 370 369 371 /* 370 372 * We need to use the RANDOM LOAD CACHE operation if there's 371 - * more than one iteration, because the LOAD operation resets 372 - * the cache to 0xff. 373 + * more than one iteration, because the LOAD operation might 374 + * reset the cache to 0xff. 373 375 */ 374 376 if (nbytes) { 375 377 column = op.addr.val; ··· 1016 1018 for (i = 0; i < nand->memorg.ntargets; i++) { 1017 1019 ret = spinand_select_target(spinand, i); 1018 1020 if (ret) 1019 - goto err_free_bufs; 1021 + goto err_manuf_cleanup; 1020 1022 1021 1023 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1022 1024 if (ret) 1023 - goto err_free_bufs; 1025 + goto err_manuf_cleanup; 1024 1026 } 1025 1027 1026 1028 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
+1 -4
drivers/net/caif/caif_serial.c
··· 257 257 if (skb->len == 0) { 258 258 struct sk_buff *tmp = skb_dequeue(&ser->head); 259 259 WARN_ON(tmp != skb); 260 - if (in_interrupt()) 261 - dev_kfree_skb_irq(skb); 262 - else 263 - kfree_skb(skb); 260 + dev_consume_skb_any(skb); 264 261 } 265 262 } 266 263 /* Send flow off if queue is empty */
-3
drivers/net/dsa/b53/b53_srab.c
··· 511 511 /* Clear all pending interrupts */ 512 512 writel(0xffffffff, priv->regs + B53_SRAB_INTR); 513 513 514 - if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) 515 - return; 516 - 517 514 for (i = 0; i < B53_N_PORTS; i++) { 518 515 port = &priv->port_intrs[i]; 519 516
+12 -9
drivers/net/dsa/mv88e6xxx/global1_atu.c
··· 314 314 { 315 315 struct mv88e6xxx_chip *chip = dev_id; 316 316 struct mv88e6xxx_atu_entry entry; 317 + int spid; 317 318 int err; 318 319 u16 val; 319 320 ··· 337 336 if (err) 338 337 goto out; 339 338 339 + spid = entry.state; 340 + 340 341 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { 341 342 dev_err_ratelimited(chip->dev, 342 343 "ATU age out violation for %pM\n", ··· 347 344 348 345 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 349 346 dev_err_ratelimited(chip->dev, 350 - "ATU member violation for %pM portvec %x\n", 351 - entry.mac, entry.portvec); 352 - chip->ports[entry.portvec].atu_member_violation++; 347 + "ATU member violation for %pM portvec %x spid %d\n", 348 + entry.mac, entry.portvec, spid); 349 + chip->ports[spid].atu_member_violation++; 353 350 } 354 351 355 352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { 356 353 dev_err_ratelimited(chip->dev, 357 - "ATU miss violation for %pM portvec %x\n", 358 - entry.mac, entry.portvec); 359 - chip->ports[entry.portvec].atu_miss_violation++; 354 + "ATU miss violation for %pM portvec %x spid %d\n", 355 + entry.mac, entry.portvec, spid); 356 + chip->ports[spid].atu_miss_violation++; 360 357 } 361 358 362 359 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { 363 360 dev_err_ratelimited(chip->dev, 364 - "ATU full violation for %pM portvec %x\n", 365 - entry.mac, entry.portvec); 366 - chip->ports[entry.portvec].atu_full_violation++; 361 + "ATU full violation for %pM portvec %x spid %d\n", 362 + entry.mac, entry.portvec, spid); 363 + chip->ports[spid].atu_full_violation++; 367 364 } 368 365 mutex_unlock(&chip->reg_lock); 369 366
+1 -1
drivers/net/dsa/mv88e6xxx/serdes.c
··· 664 664 if (port < 9) 665 665 return 0; 666 666 667 - return mv88e6390_serdes_irq_setup(chip, port); 667 + return mv88e6390x_serdes_irq_setup(chip, port); 668 668 } 669 669 670 670 void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+1 -1
drivers/net/ethernet/alteon/acenic.c
··· 2059 2059 if (skb) { 2060 2060 dev->stats.tx_packets++; 2061 2061 dev->stats.tx_bytes += skb->len; 2062 - dev_kfree_skb_irq(skb); 2062 + dev_consume_skb_irq(skb); 2063 2063 info->skb = NULL; 2064 2064 } 2065 2065
+2 -1
drivers/net/ethernet/altera/altera_msgdma.c
··· 145 145 & 0xffff; 146 146 147 147 if (inuse) { /* Tx FIFO is not empty */ 148 - ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 + ready = max_t(int, 149 + priv->tx_prod - priv->tx_cons - inuse - 1, 0); 149 150 } else { 150 151 /* Check for buffered last packet */ 151 152 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
+1 -1
drivers/net/ethernet/amd/amd8111e.c
··· 666 666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], 667 667 lp->tx_skbuff[tx_index]->len, 668 668 PCI_DMA_TODEVICE); 669 - dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); 669 + dev_consume_skb_irq(lp->tx_skbuff[tx_index]); 670 670 lp->tx_skbuff[tx_index] = NULL; 671 671 lp->tx_dma_addr[tx_index] = 0; 672 672 }
+1 -1
drivers/net/ethernet/apple/bmac.c
··· 777 777 778 778 if (bp->tx_bufs[bp->tx_empty]) { 779 779 ++dev->stats.tx_packets; 780 - dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 780 + dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); 781 781 } 782 782 bp->tx_bufs[bp->tx_empty] = NULL; 783 783 bp->tx_fullup = 0;
+2 -2
drivers/net/ethernet/broadcom/b44.c
··· 638 638 bytes_compl += skb->len; 639 639 pkts_compl++; 640 640 641 - dev_kfree_skb_irq(skb); 641 + dev_consume_skb_irq(skb); 642 642 } 643 643 644 644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); ··· 1012 1012 } 1013 1013 1014 1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); 1015 - dev_kfree_skb_any(skb); 1015 + dev_consume_skb_any(skb); 1016 1016 skb = bounce_skb; 1017 1017 } 1018 1018
+10 -15
drivers/net/ethernet/broadcom/bcmsysport.c
··· 520 520 struct ethtool_wolinfo *wol) 521 521 { 522 522 struct bcm_sysport_priv *priv = netdev_priv(dev); 523 - u32 reg; 524 523 525 524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 526 525 wol->wolopts = priv->wolopts; ··· 527 528 if (!(priv->wolopts & WAKE_MAGICSECURE)) 528 529 return; 529 530 530 - /* Return the programmed SecureOn password */ 531 - reg = umac_readl(priv, UMAC_PSW_MS); 532 - put_unaligned_be16(reg, &wol->sopass[0]); 533 - reg = umac_readl(priv, UMAC_PSW_LS); 534 - put_unaligned_be32(reg, &wol->sopass[2]); 531 + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); 535 532 } 536 533 537 534 static int bcm_sysport_set_wol(struct net_device *dev, ··· 543 548 if (wol->wolopts & ~supported) 544 549 return -EINVAL; 545 550 546 - /* Program the SecureOn password */ 547 - if (wol->wolopts & WAKE_MAGICSECURE) { 548 - umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 549 - UMAC_PSW_MS); 550 - umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 551 - UMAC_PSW_LS); 552 - } 551 + if (wol->wolopts & WAKE_MAGICSECURE) 552 + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); 553 553 554 554 /* Flag the device and relevant IRQ as wakeup capable */ 555 555 if (wol->wolopts) { ··· 2639 2649 unsigned int index, i = 0; 2640 2650 u32 reg; 2641 2651 2642 - /* Password has already been programmed */ 2643 2652 reg = umac_readl(priv, UMAC_MPD_CTRL); 2644 2653 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2645 2654 reg |= MPD_EN; 2646 2655 reg &= ~PSW_EN; 2647 - if (priv->wolopts & WAKE_MAGICSECURE) 2656 + if (priv->wolopts & WAKE_MAGICSECURE) { 2657 + /* Program the SecureOn password */ 2658 + umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), 2659 + UMAC_PSW_MS); 2660 + umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), 2661 + UMAC_PSW_LS); 2648 2662 reg |= PSW_EN; 2663 + } 2649 2664 umac_writel(priv, reg, UMAC_MPD_CTRL); 2650 2665 2651 2666 if (priv->wolopts & WAKE_FILTER) {
+2
drivers/net/ethernet/broadcom/bcmsysport.h
··· 12 12 #define __BCM_SYSPORT_H 13 13 14 14 #include <linux/bitmap.h> 15 + #include <linux/ethtool.h> 15 16 #include <linux/if_vlan.h> 16 17 #include <linux/net_dim.h> 17 18 ··· 779 778 unsigned int crc_fwd:1; 780 779 u16 rev; 781 780 u32 wolopts; 781 + u8 sopass[SOPASS_MAX]; 782 782 unsigned int wol_irq_disabled:1; 783 783 784 784 /* MIB related fields */
+7 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4973 4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4974 4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4975 4975 u32 map_idx = ring->map_idx; 4976 + unsigned int vector; 4976 4977 4978 + vector = bp->irq_tbl[map_idx].vector; 4979 + disable_irq_nosync(vector); 4977 4980 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4978 - if (rc) 4981 + if (rc) { 4982 + enable_irq(vector); 4979 4983 goto err_out; 4984 + } 4980 4985 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 4981 4986 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4987 + enable_irq(vector); 4982 4988 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4983 4989 4984 4990 if (!i) {
+1 -1
drivers/net/ethernet/broadcom/sb1250-mac.c
··· 1288 1288 * for transmits, we just free buffers. 1289 1289 */ 1290 1290 1291 - dev_kfree_skb_irq(sb); 1291 + dev_consume_skb_irq(sb); 1292 1292 1293 1293 /* 1294 1294 * .. and advance to the next buffer.
+3
drivers/net/ethernet/cadence/macb.h
··· 643 643 #define MACB_CAPS_JUMBO 0x00000020 644 644 #define MACB_CAPS_GEM_HAS_PTP 0x00000040 645 645 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 646 + #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 646 647 #define MACB_CAPS_FIFO_MODE 0x10000000 647 648 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 648 649 #define MACB_CAPS_SG_DISABLED 0x40000000 ··· 1215 1214 1216 1215 int rx_bd_rd_prefetch; 1217 1216 int tx_bd_rd_prefetch; 1217 + 1218 + u32 rx_intr_mask; 1218 1219 }; 1219 1220 1220 1221 #ifdef CONFIG_MACB_USE_HWSTAMP
+17 -11
drivers/net/ethernet/cadence/macb_main.c
··· 56 56 /* level of occupied TX descriptors under which we wake up TX process */ 57 57 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 58 58 59 - #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 60 - | MACB_BIT(ISR_ROVR)) 59 + #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 61 60 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 62 61 | MACB_BIT(ISR_RLE) \ 63 62 | MACB_BIT(TXERR)) ··· 1269 1270 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1270 1271 napi_reschedule(napi); 1271 1272 } else { 1272 - queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1273 + queue_writel(queue, IER, bp->rx_intr_mask); 1273 1274 } 1274 1275 } 1275 1276 ··· 1287 1288 u32 ctrl; 1288 1289 1289 1290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1290 - queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1291 + queue_writel(queue, IDR, bp->rx_intr_mask | 1291 1292 MACB_TX_INT_FLAGS | 1292 1293 MACB_BIT(HRESP)); 1293 1294 } ··· 1317 1318 1318 1319 /* Enable interrupts */ 1319 1320 queue_writel(queue, IER, 1320 - MACB_RX_INT_FLAGS | 1321 + bp->rx_intr_mask | 1321 1322 MACB_TX_INT_FLAGS | 1322 1323 MACB_BIT(HRESP)); 1323 1324 } ··· 1371 1372 (unsigned int)(queue - bp->queues), 1372 1373 (unsigned long)status); 1373 1374 1374 - if (status & MACB_RX_INT_FLAGS) { 1375 + if (status & bp->rx_intr_mask) { 1375 1376 /* There's no point taking any more interrupts 1376 1377 * until we have processed the buffers. The 1377 1378 * scheduling call may fail if the poll routine 1378 1379 * is already scheduled, so disable interrupts 1379 1380 * now. 1380 1381 */ 1381 - queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1382 + queue_writel(queue, IDR, bp->rx_intr_mask); 1382 1383 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1383 1384 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1384 1385 ··· 1411 1412 /* There is a hardware issue under heavy load where DMA can 1412 1413 * stop, this causes endless "used buffer descriptor read" 1413 1414 * interrupts but it can be cleared by re-enabling RX. See 1414 - * the at91 manual, section 41.3.1 or the Zynq manual 1415 - * section 16.7.4 for details. 1415 + * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1416 + * section 16.7.4 for details. RXUBR is only enabled for 1417 + * these two versions. 1416 1418 */ 1417 1419 if (status & MACB_BIT(RXUBR)) { 1418 1420 ctrl = macb_readl(bp, NCR); ··· 2259 2259 2260 2260 /* Enable interrupts */ 2261 2261 queue_writel(queue, IER, 2262 - MACB_RX_INT_FLAGS | 2262 + bp->rx_intr_mask | 2263 2263 MACB_TX_INT_FLAGS | 2264 2264 MACB_BIT(HRESP)); 2265 2265 } ··· 3907 3907 }; 3908 3908 3909 3909 static const struct macb_config emac_config = { 3910 + .caps = MACB_CAPS_NEEDS_RSTONUBR, 3910 3911 .clk_init = at91ether_clk_init, 3911 3912 .init = at91ether_init, 3912 3913 }; ··· 3929 3928 }; 3930 3929 3931 3930 static const struct macb_config zynq_config = { 3932 - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3931 + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 3932 + MACB_CAPS_NEEDS_RSTONUBR, 3933 3933 .dma_burst_length = 16, 3934 3934 .clk_init = macb_clk_init, 3935 3935 .init = macb_init, ··· 4084 4082 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4085 4083 macb_dma_desc_get_size(bp); 4086 4084 } 4085 + 4086 + bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4087 + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4088 + bp->rx_intr_mask |= MACB_BIT(RXUBR); 4087 4089 4088 4090 mac = of_get_mac_address(np); 4089 4091 if (mac) {
-1
drivers/net/ethernet/cavium/Kconfig
··· 54 54 tristate "Cavium PTP coprocessor as PTP clock" 55 55 depends on 64BIT && PCI 56 56 imply PTP_1588_CLOCK 57 - default y 58 57 ---help--- 59 58 This driver adds support for the Precision Time Protocol Clocks and 60 59 Timestamping coprocessor (PTP) found on Cavium processors.
+2 -1
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1434 1434 * csum is correct or is zero. 1435 1435 */ 1436 1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1437 - tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { 1437 + tcp_udp_csum_ok && outer_csum_ok && 1438 + (ipv4_csum_ok || ipv6)) { 1438 1439 skb->ip_summed = CHECKSUM_UNNECESSARY; 1439 1440 skb->csum_level = encap; 1440 1441 }
+1 -1
drivers/net/ethernet/dec/tulip/de2104x.c
··· 585 585 netif_dbg(de, tx_done, de->dev, 586 586 "tx done, slot %d\n", tx_tail); 587 587 } 588 - dev_kfree_skb_irq(skb); 588 + dev_consume_skb_irq(skb); 589 589 } 590 590 591 591 next:
+1 -1
drivers/net/ethernet/freescale/fec_mpc52xx.c
··· 369 369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, 370 370 DMA_TO_DEVICE); 371 371 372 - dev_kfree_skb_irq(skb); 372 + dev_consume_skb_irq(skb); 373 373 } 374 374 spin_unlock(&priv->lock); 375 375
+2
drivers/net/ethernet/freescale/ucc_geth.c
··· 1879 1879 u16 i, j; 1880 1880 u8 __iomem *bd; 1881 1881 1882 + netdev_reset_queue(ugeth->ndev); 1883 + 1882 1884 ug_info = ugeth->ug_info; 1883 1885 uf_info = &ug_info->uf_info; 1884 1886
+5
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 2418 2418 out_notify_fail: 2419 2419 (void)cancel_work_sync(&priv->service_task); 2420 2420 out_read_prop_fail: 2421 + /* safe for ACPI FW */ 2422 + of_node_put(to_of_node(priv->fwnode)); 2421 2423 free_netdev(ndev); 2422 2424 return ret; 2423 2425 } ··· 2448 2446 2449 2447 set_bit(NIC_STATE_REMOVING, &priv->state); 2450 2448 (void)cancel_work_sync(&priv->service_task); 2449 + 2450 + /* safe for ACPI FW */ 2451 + of_node_put(to_of_node(priv->fwnode)); 2451 2452 2452 2453 free_netdev(ndev); 2453 2454 return 0;
+9 -7
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
··· 1157 1157 */ 1158 1158 static int hns_nic_nway_reset(struct net_device *netdev) 1159 1159 { 1160 - int ret = 0; 1161 1160 struct phy_device *phy = netdev->phydev; 1162 1161 1163 - if (netif_running(netdev)) { 1164 - /* if autoneg is disabled, don't restart auto-negotiation */ 1165 - if (phy && phy->autoneg == AUTONEG_ENABLE) 1166 - ret = genphy_restart_aneg(phy); 1167 - } 1162 + if (!netif_running(netdev)) 1163 + return 0; 1168 1164 1169 - return ret; 1165 + if (!phy) 1166 + return -EOPNOTSUPP; 1167 + 1168 + if (phy->autoneg != AUTONEG_ENABLE) 1169 + return -EINVAL; 1170 + 1171 + return genphy_restart_aneg(phy); 1170 1172 } 1171 1173 1172 1174 static u32
+1 -1
drivers/net/ethernet/hisilicon/hns_mdio.c
··· 321 321 } 322 322 323 323 hns_mdio_cmd_write(mdio_dev, is_c45, 324 - MDIO_C45_WRITE_ADDR, phy_id, devad); 324 + MDIO_C45_READ, phy_id, devad); 325 325 } 326 326 327 327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+1 -1
drivers/net/ethernet/i825xx/82596.c
··· 1310 1310 dev->stats.tx_aborted_errors++; 1311 1311 } 1312 1312 1313 - dev_kfree_skb_irq(skb); 1313 + dev_consume_skb_irq(skb); 1314 1314 1315 1315 tx_cmd->cmd.command = 0; /* Mark free */ 1316 1316 break;
+4 -2
drivers/net/ethernet/marvell/skge.c
··· 152 152 memset(p, 0, regs->len); 153 153 memcpy_fromio(p, io, B3_RAM_ADDR); 154 154 155 - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 156 - regs->len - B3_RI_WTO_R1); 155 + if (regs->len > B3_RI_WTO_R1) { 156 + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 157 + regs->len - B3_RI_WTO_R1); 158 + } 157 159 } 158 160 159 161 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 256 256 e->m_neigh.family = n->ops->family; 257 257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 258 258 e->out_dev = out_dev; 259 + e->route_dev = route_dev; 259 260 260 261 /* It's important to add the neigh to the hash table before checking 261 262 * the neigh validity state. So if we'll get a notification, in case the ··· 370 369 e->m_neigh.family = n->ops->family; 371 370 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 372 371 e->out_dev = out_dev; 372 + e->route_dev = route_dev; 373 373 374 374 /* It's importent to add the neigh to the hash table before checking 375 375 * the neigh validity state. So if we'll get a notification, in case the ··· 614 612 struct mlx5_flow_spec *spec, 615 613 struct tc_cls_flower_offload *f, 616 614 void *headers_c, 617 - void *headers_v) 615 + void *headers_v, u8 *match_level) 618 616 { 619 617 int tunnel_type; 620 618 int err = 0; 621 619 622 620 tunnel_type = mlx5e_tc_tun_get_type(filter_dev); 623 621 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 622 + *match_level = MLX5_MATCH_L4; 624 623 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, 625 624 headers_c, headers_v); 626 625 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { 626 + *match_level = MLX5_MATCH_L3; 627 627 err = mlx5e_tc_tun_parse_gretap(priv, spec, f, 628 628 headers_c, headers_v); 629 629 } else {
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
··· 39 39 struct mlx5_flow_spec *spec, 40 40 struct tc_cls_flower_offload *f, 41 41 void *headers_c, 42 - void *headers_v); 42 + void *headers_v, u8 *match_level); 43 43 44 44 #endif //__MLX5_EN_TC_TUNNEL_H__
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 950 950 if (params->rx_dim_enabled) 951 951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 952 952 953 - if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) 953 + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) 954 954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 955 955 956 956 return 0;
+27 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 596 596 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 597 597 ether_addr_copy(e->h_dest, ha); 598 598 ether_addr_copy(eth->h_dest, ha); 599 + /* Update the encap source mac, in case that we delete 600 + * the flows when encap source mac changed. 601 + */ 602 + ether_addr_copy(eth->h_source, e->route_dev->dev_addr); 599 603 600 604 mlx5e_tc_encap_flows_add(priv, e); 601 605 } ··· 1130 1126 struct mlx5e_priv *priv = netdev_priv(dev); 1131 1127 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1132 1128 struct mlx5_eswitch_rep *rep = rpriv->rep; 1133 - int ret; 1129 + int ret, pf_num; 1134 1130 1135 - ret = snprintf(buf, len, "%d", rep->vport - 1); 1131 + ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); 1132 + if (ret) 1133 + return ret; 1134 + 1135 + if (rep->vport == FDB_UPLINK_VPORT) 1136 + ret = snprintf(buf, len, "p%d", pf_num); 1137 + else 1138 + ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); 1139 + 1136 1140 if (ret >= len) 1137 1141 return -EOPNOTSUPP; 1138 1142 ··· 1297 1285 return 0; 1298 1286 } 1299 1287 1288 + static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 1289 + __be16 vlan_proto) 1290 + { 1291 + netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); 1292 + 1293 + if (vlan != 0) 1294 + return -EOPNOTSUPP; 1295 + 1296 + /* allow setting 0-vid for compatibility with libvirt */ 1297 + return 0; 1298 + } 1299 + 1300 1300 static const struct switchdev_ops mlx5e_rep_switchdev_ops = { 1301 1301 .switchdev_port_attr_get = mlx5e_attr_get, 1302 1302 }; ··· 1343 1319 .ndo_set_vf_rate = mlx5e_set_vf_rate, 1344 1320 .ndo_get_vf_config = mlx5e_get_vf_config, 1345 1321 .ndo_get_vf_stats = mlx5e_get_vf_stats, 1322 + .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, 1346 1323 }; 1347 1324 1348 1325 bool mlx5e_eswitch_rep(struct net_device *netdev)
+1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 148 148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 149 149 150 150 struct net_device *out_dev; 151 + struct net_device *route_dev; 151 152 int tunnel_type; 152 153 int tunnel_hlen; 153 154 int reformat_type;
+32 -20
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 128 128 struct net_device *filter_dev; 129 129 struct mlx5_flow_spec spec; 130 130 int num_mod_hdr_actions; 131 + int max_mod_hdr_actions; 131 132 void *mod_hdr_actions; 132 133 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 133 134 }; ··· 1303 1302 static int parse_tunnel_attr(struct mlx5e_priv *priv, 1304 1303 struct mlx5_flow_spec *spec, 1305 1304 struct tc_cls_flower_offload *f, 1306 - struct net_device *filter_dev) 1305 + struct net_device *filter_dev, u8 *match_level) 1307 1306 { 1308 1307 struct netlink_ext_ack *extack = f->common.extack; 1309 1308 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ··· 1318 1317 int err = 0; 1319 1318 1320 1319 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1321 - headers_c, headers_v); 1320 + headers_c, headers_v, match_level); 1322 1321 if (err) { 1323 1322 NL_SET_ERR_MSG_MOD(extack, 1324 1323 "failed to parse tunnel attributes"); ··· 1427 1426 struct mlx5_flow_spec *spec, 1428 1427 struct tc_cls_flower_offload *f, 1429 1428 struct net_device *filter_dev, 1430 - u8 *match_level) 1429 + u8 *match_level, u8 *tunnel_match_level) 1431 1430 { 1432 1431 struct netlink_ext_ack *extack = f->common.extack; 1433 1432 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ··· 1478 1477 switch (key->addr_type) { 1479 1478 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1480 1479 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1481 - if (parse_tunnel_attr(priv, spec, f, filter_dev)) 1480 + if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1482 1481 return -EOPNOTSUPP; 1483 1482 break; 1484 1483 default: ··· 1827 1826 struct mlx5_core_dev *dev = priv->mdev; 1828 1827 struct mlx5_eswitch *esw = dev->priv.eswitch; 1829 1828 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1829 + u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; 1830 1830 struct mlx5_eswitch_rep *rep; 1831 - u8 match_level; 1832 1831 int err; 1833 1832 1834 - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); 1833 + err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1835 1834 1836 1835 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1837 1836 rep = rpriv->rep; ··· 1847 1846 } 1848 1847 } 1849 1848 1850 - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 1849 + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1851 1850 flow->esw_attr->match_level = match_level; 1852 - else 1851 + flow->esw_attr->tunnel_match_level = tunnel_match_level; 1852 + } else { 1853 1853 flow->nic_attr->match_level = match_level; 1854 + } 1854 1855 1855 1856 return err; 1856 1857 } ··· 1937 1934 OFFLOAD(UDP_DPORT, 2, udp.dest, 0), 1938 1935 }; 1939 1936 1940 - /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at 1941 - * max from the SW pedit action. On success, it says how many HW actions were 1942 - * actually parsed. 1937 + /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at 1938 + * max from the SW pedit action. On success, attr->num_mod_hdr_actions 1939 + * says how many HW actions were actually parsed. 1943 1940 */ 1944 1941 static int offload_pedit_fields(struct pedit_headers *masks, 1945 1942 struct pedit_headers *vals, ··· 1963 1960 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; 1964 1961 1965 1962 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 1966 - action = parse_attr->mod_hdr_actions; 1967 - max_actions = parse_attr->num_mod_hdr_actions; 1968 - nactions = 0; 1963 + action = parse_attr->mod_hdr_actions + 1964 + parse_attr->num_mod_hdr_actions * action_size; 1965 + 1966 + max_actions = parse_attr->max_mod_hdr_actions; 1967 + nactions = parse_attr->num_mod_hdr_actions; 1969 1968 1970 1969 for (i = 0; i < ARRAY_SIZE(fields); i++) { 1971 1970 f = &fields[i]; ··· 2078 2073 if (!parse_attr->mod_hdr_actions) 2079 2074 return -ENOMEM; 2080 2075 2081 - parse_attr->num_mod_hdr_actions = max_actions; 2076 + parse_attr->max_mod_hdr_actions = max_actions; 2082 2077 return 0; 2083 2078 } 2084 2079 ··· 2124 2119 goto out_err; 2125 2120 } 2126 2121 2127 - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); 2128 - if (err) 2129 - goto out_err; 2122 + if (!parse_attr->mod_hdr_actions) { 2123 + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); 2124 + if (err) 2125 + goto out_err; 2126 + } 2130 2127 2131 2128 err = offload_pedit_fields(masks, vals, parse_attr, extack); 2132 2129 if (err < 0) ··· 2186 2179 2187 2180 static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2188 2181 struct tcf_exts *exts, 2182 + u32 actions, 2189 2183 struct netlink_ext_ack *extack) 2190 2184 { 2191 2185 const struct tc_action *a; ··· 2196 2188 u16 ethertype; 2197 2189 int nkeys, i; 2198 2190 2199 - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2191 + if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2192 + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); 2193 + else 2194 + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2195 + 2200 2196 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2201 2197 2202 2198 /* for non-IP we only re-write MACs, so we're okay */ ··· 2257 2245 2258 2246 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2259 2247 return modify_header_match_supported(&parse_attr->spec, exts, 2260 - extack); 2248 + actions, extack); 2261 2249 2262 2250 return true; 2263 2251 }
+6
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 387 387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 388 388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 389 389 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 390 + #ifdef CONFIG_MLX5_EN_IPSEC 391 + struct mlx5_wqe_eth_seg cur_eth = wqe->eth; 392 + #endif 390 393 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 391 394 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 395 + #ifdef CONFIG_MLX5_EN_IPSEC 396 + wqe->eth = cur_eth; 397 + #endif 392 398 } 393 399 394 400 /* fill wqe */
+8 -14
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1134 1134 int err = 0; 1135 1135 u8 *smac_v; 1136 1136 1137 - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { 1138 - mlx5_core_warn(esw->dev, 1139 - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", 1140 - vport->vport); 1141 - return -EPERM; 1142 - } 1143 - 1144 1137 esw_vport_cleanup_ingress_rules(esw, vport); 1145 1138 1146 1139 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { ··· 1721 1728 int vport_num; 1722 1729 int err; 1723 1730 1724 - if (!MLX5_ESWITCH_MANAGER(dev)) 1731 + if (!MLX5_VPORT_MANAGER(dev)) 1725 1732 return 0; 1726 1733 1727 1734 esw_info(dev, ··· 1790 1797 1791 1798 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1792 1799 { 1793 - if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) 1800 + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1794 1801 return; 1795 1802 1796 1803 esw_info(esw->dev, "cleanup\n"); ··· 1820 1827 mutex_lock(&esw->state_lock); 1821 1828 evport = &esw->vports[vport]; 1822 1829 1823 - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { 1830 + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) 1824 1831 mlx5_core_warn(esw->dev, 1825 - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", 1832 + "Set invalid MAC while spoofchk is on, vport(%d)\n", 1826 1833 vport); 1827 - err = -EPERM; 1828 - goto unlock; 1829 - } 1830 1834 1831 1835 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); 1832 1836 if (err) { ··· 1969 1979 evport = &esw->vports[vport]; 1970 1980 pschk = evport->info.spoofchk; 1971 1981 evport->info.spoofchk = spoofchk; 1982 + if (pschk && !is_valid_ether_addr(evport->info.mac)) 1983 + mlx5_core_warn(esw->dev, 1984 + "Spoofchk in set while MAC is invalid, vport(%d)\n", 1985 + evport->vport); 1972 1986 if (evport->enabled && esw->mode == SRIOV_LEGACY) 1973 1987 err = esw_vport_ingress_config(esw, evport); 1974 1988 if (err)
+1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 312 312 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 313 313 u32 mod_hdr_id; 314 314 u8 match_level; 315 + u8 tunnel_match_level; 315 316 struct mlx5_fc *counter; 316 317 u32 chain; 317 318 u16 prio;
+9 -8
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 160 160 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 161 161 source_eswitch_owner_vhca_id); 162 162 163 - if (attr->match_level == MLX5_MATCH_NONE) 164 - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 165 - else 166 - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | 167 - MLX5_MATCH_MISC_PARAMETERS; 168 - 169 - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 170 - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 163 + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 164 + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 165 + if (attr->tunnel_match_level != MLX5_MATCH_NONE) 166 + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 167 + if (attr->match_level != MLX5_MATCH_NONE) 168 + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 169 + } else if (attr->match_level != MLX5_MATCH_NONE) { 170 + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 171 + } 171 172 172 173 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 173 174 flow_act.modify_id = attr->mod_hdr_id;
+21
drivers/net/ethernet/mellanox/mlx5/core/lag.c
··· 616 616 } 617 617 } 618 618 619 + int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) 620 + { 621 + struct mlx5_lag *ldev; 622 + int n; 623 + 624 + ldev = mlx5_lag_dev_get(dev); 625 + if (!ldev) { 626 + mlx5_core_warn(dev, "no lag device, can't get pf num\n"); 627 + return -EINVAL; 628 + } 629 + 630 + for (n = 0; n < MLX5_MAX_PORTS; n++) 631 + if (ldev->pf[n].dev == dev) { 632 + *pf_num = n; 633 + return 0; 634 + } 635 + 636 + mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); 637 + return -EINVAL; 638 + } 639 + 619 640 /* Must be called with intf_mutex held */ 620 641 void mlx5_lag_remove(struct mlx5_core_dev *dev) 621 642 {
+2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 187 187 MLX5_CAP_GEN(dev, lag_master); 188 188 } 189 189 190 + int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); 191 + 190 192 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); 191 193 void mlx5_lag_update(struct mlx5_core_dev *dev); 192 194
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/qp.c
··· 44 44 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 45 45 { 46 46 struct mlx5_core_rsc_common *common; 47 + unsigned long flags; 47 48 48 - spin_lock(&table->lock); 49 + spin_lock_irqsave(&table->lock, flags); 49 50 50 51 common = radix_tree_lookup(&table->tree, rsn); 51 52 if (common) 52 53 atomic_inc(&common->refcount); 53 54 54 - spin_unlock(&table->lock); 55 + spin_unlock_irqrestore(&table->lock, flags); 55 56 56 57 return common; 57 58 }
+1 -1
drivers/net/ethernet/qlogic/qed/qed.h
··· 53 53 extern const struct qed_common_ops qed_common_ops_pass; 54 54 55 55 #define QED_MAJOR_VERSION 8 56 - #define QED_MINOR_VERSION 33 56 + #define QED_MINOR_VERSION 37 57 57 #define QED_REVISION_VERSION 0 58 58 #define QED_ENGINEERING_VERSION 20 59 59
+4 -4
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 795 795 796 796 /* get pq index according to PQ_FLAGS */ 797 797 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 798 - u32 pq_flags) 798 + unsigned long pq_flags) 799 799 { 800 800 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 801 801 802 802 /* Can't have multiple flags set here */ 803 - if (bitmap_weight((unsigned long *)&pq_flags, 803 + if (bitmap_weight(&pq_flags, 804 804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) { 805 - DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); 805 + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); 806 806 goto err; 807 807 } 808 808 809 809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { 810 - DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); 810 + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); 811 811 goto err; 812 812 } 813 813
+16 -4
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 609 609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 610 610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 611 611 612 + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, 613 + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && 614 + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 615 + 612 616 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 613 617 !!(accept_filter & QED_ACCEPT_BCAST)); 614 618 ··· 746 742 if (rc) { 747 743 qed_sp_destroy_request(p_hwfn, p_ent); 748 744 return rc; 745 + } 746 + 747 + if (p_params->update_ctl_frame_check) { 748 + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; 749 + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; 749 750 } 750 751 751 752 /* Update mcast bins for VFs, PF doesn't use this functionality */ ··· 2216 2207 u16 num_queues = 0; 2217 2208 2218 2209 /* Since the feature controls only queue-zones, 2219 - * make sure we have the contexts [rx, tx, xdp] to 2210 + * make sure we have the contexts [rx, xdp, tcs] to 2220 2211 * match. 2221 2212 */ 2222 2213 for_each_hwfn(cdev, i) { ··· 2226 2217 u16 cids; 2227 2218 2228 2219 cids = hwfn->pf_params.eth_pf_params.num_cons; 2229 - num_queues += min_t(u16, l2_queues, cids / 3); 2220 + cids /= (2 + info->num_tc); 2221 + num_queues += min_t(u16, l2_queues, cids); 2230 2222 } 2231 2223 2232 2224 /* queues might theoretically be >256, but interrupts' ··· 2698 2688 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2699 2689 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2700 2690 QED_ACCEPT_MCAST_UNMATCHED; 2701 - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2691 + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2692 + QED_ACCEPT_MCAST_UNMATCHED; 2702 2693 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2703 2694 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2704 2695 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; ··· 2871 2860 p_hwfn = p_cid->p_owner; 2872 2861 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2873 2862 if (rc) 2874 - DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2863 + DP_VERBOSE(cdev, QED_MSG_DEBUG, 2864 + "Unable to read queue coalescing\n"); 2875 2865 2876 2866 return rc; 2877 2867 }
+3
drivers/net/ethernet/qlogic/qed/qed_l2.h
··· 219 219 struct qed_rss_params *rss_params; 220 220 struct qed_filter_accept_flags accept_flags; 221 221 struct qed_sge_tpa_params *sge_tpa_params; 222 + u8 update_ctl_frame_check; 223 + u8 mac_chk_en; 224 + u8 ethtype_chk_en; 222 225 }; 223 226 224 227 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+15 -5
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 2451 2451 { 2452 2452 struct qed_ll2_tx_pkt_info pkt; 2453 2453 const skb_frag_t *frag; 2454 + u8 flags = 0, nr_frags; 2454 2455 int rc = -EINVAL, i; 2455 2456 dma_addr_t mapping; 2456 2457 u16 vlan = 0; 2457 - u8 flags = 0; 2458 2458 2459 2459 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2460 2460 DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2461 2461 return -EINVAL; 2462 2462 } 2463 2463 2464 - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2464 + /* Cache number of fragments from SKB since SKB may be freed by 2465 + * the completion routine after calling qed_ll2_prepare_tx_packet() 2466 + */ 2467 + nr_frags = skb_shinfo(skb)->nr_frags; 2468 + 2469 + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2465 2470 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 2466 - 1 + skb_shinfo(skb)->nr_frags); 2471 + 1 + nr_frags); 2467 2472 return -EINVAL; 2468 2473 } 2469 2474 ··· 2490 2485 } 2491 2486 2492 2487 memset(&pkt, 0, sizeof(pkt)); 2493 - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; 2488 + pkt.num_of_bds = 1 + nr_frags; 2494 2489 pkt.vlan = vlan; 2495 2490 pkt.bd_flags = flags; 2496 2491 pkt.tx_dest = QED_LL2_TX_DEST_NW; ··· 2501 2496 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2502 2497 pkt.remove_stag = true; 2503 2498 2499 + /* qed_ll2_prepare_tx_packet() may actually send the packet if 2500 + * there are no fragments in the skb and subsequently the completion 2501 + * routine may run and free the SKB, so no dereferencing the SKB 2502 + * beyond this point unless skb has any fragments. 2503 + */ 2504 2504 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, 2505 2505 &pkt, 1); 2506 2506 if (rc) 2507 2507 goto err; 2508 2508 2509 - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2509 + for (i = 0; i < nr_frags; i++) { 2510 2510 frag = &skb_shinfo(skb)->frags[i]; 2511 2511 2512 2512 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+1
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 382 382 * @param p_hwfn 383 383 */ 384 384 void qed_consq_free(struct qed_hwfn *p_hwfn); 385 + int qed_spq_pend_post(struct qed_hwfn *p_hwfn); 385 386 386 387 /** 387 388 * @file
+3
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
··· 604 604 605 605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true; 606 606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); 607 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 608 + p_ent->ramrod.pf_update.mf_vlan |= 609 + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); 607 610 608 611 return qed_spq_post(p_hwfn, p_ent, NULL); 609 612 }
+7 -8
drivers/net/ethernet/qlogic/qed/qed_spq.c
··· 397 397 398 398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 399 399 400 + /* Attempt to post pending requests */ 401 + spin_lock_bh(&p_hwfn->p_spq->lock); 402 + rc = qed_spq_pend_post(p_hwfn); 403 + spin_unlock_bh(&p_hwfn->p_spq->lock); 404 + 400 405 return rc; 401 406 } 402 407 ··· 772 767 return 0; 773 768 } 774 769 775 - static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 770 + int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 776 771 { 777 772 struct qed_spq *p_spq = p_hwfn->p_spq; 778 773 struct qed_spq_entry *p_ent = NULL; ··· 910 905 struct qed_spq_entry *p_ent = NULL; 911 906 struct qed_spq_entry *tmp; 912 907 struct qed_spq_entry *found = NULL; 913 - int rc; 914 908 915 909 if (!p_hwfn) 916 910 return -EINVAL; ··· 967 963 */ 968 964 qed_spq_return_entry(p_hwfn, found); 969 965 970 - /* Attempt to post pending requests */ 971 - spin_lock_bh(&p_spq->lock); 972 - rc = qed_spq_pend_post(p_hwfn); 973 - spin_unlock_bh(&p_spq->lock); 974 - 975 - return rc; 966 + return 0; 976 967 } 977 968 978 969 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
+8 -2
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 1969 1969 params.vport_id = vf->vport_id; 1970 1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1971 1971 params.mtu = vf->mtu; 1972 - params.check_mac = true; 1972 + 1973 + /* Non trusted VFs should enable control frame filtering */ 1974 + params.check_mac = !vf->p_vf_info.is_trusted_configured; 1973 1975 1974 1976 rc = qed_sp_eth_vport_start(p_hwfn, &params); 1975 1977 if (rc) { ··· 5132 5130 params.opaque_fid = vf->opaque_fid; 5133 5131 params.vport_id = vf->vport_id; 5134 5132 5133 + params.update_ctl_frame_check = 1; 5134 + params.mac_chk_en = !vf_info->is_trusted_configured; 5135 + 5135 5136 if (vf_info->rx_accept_mode & mask) { 5136 5137 flags->update_rx_mode_config = 1; 5137 5138 flags->rx_accept_filter = vf_info->rx_accept_mode; ··· 5152 5147 } 5153 5148 5154 5149 if (flags->update_rx_mode_config || 5155 - flags->update_tx_mode_config) 5150 + flags->update_tx_mode_config || 5151 + params.update_ctl_frame_check) 5156 5152 qed_sp_vport_update(hwfn, &params, 5157 5153 QED_SPQ_MODE_EBLOCK, NULL); 5158 5154 }
+10
drivers/net/ethernet/qlogic/qed/qed_vf.c
··· 261 261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 262 262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 263 263 struct vf_pf_resc_request *p_resc; 264 + u8 retry_cnt = VF_ACQUIRE_THRESH; 264 265 bool resources_acquired = false; 265 266 struct vfpf_acquire_tlv *req; 266 267 int rc = 0, attempts = 0; ··· 315 314 316 315 /* send acquire request */ 317 316 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 317 + 318 + /* Re-try acquire in case of vf-pf hw channel timeout */ 319 + if (retry_cnt && rc == -EBUSY) { 320 + DP_VERBOSE(p_hwfn, QED_MSG_IOV, 321 + "VF retrying to acquire due to VPC timeout\n"); 322 + retry_cnt--; 323 + continue; 324 + } 325 + 318 326 if (rc) 319 327 goto exit; 320 328
+4 -1
drivers/net/ethernet/qlogic/qede/qede.h
··· 56 56 #include <net/tc_act/tc_gact.h> 57 57 58 58 #define QEDE_MAJOR_VERSION 8 59 - #define QEDE_MINOR_VERSION 33 59 + #define QEDE_MINOR_VERSION 37 60 60 #define QEDE_REVISION_VERSION 0 61 61 #define QEDE_ENGINEERING_VERSION 20 62 62 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ ··· 494 494 495 495 /* Datapath functions definition */ 496 496 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 497 + u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, 498 + struct net_device *sb_dev, 499 + select_queue_fallback_t fallback); 497 500 netdev_features_t qede_features_check(struct sk_buff *skb, 498 501 struct net_device *dev, 499 502 netdev_features_t features);
+13
drivers/net/ethernet/qlogic/qede/qede_fp.c
··· 1695 1695 return NETDEV_TX_OK; 1696 1696 } 1697 1697 1698 + u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, 1699 + struct net_device *sb_dev, 1700 + select_queue_fallback_t fallback) 1701 + { 1702 + struct qede_dev *edev = netdev_priv(dev); 1703 + int total_txq; 1704 + 1705 + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; 1706 + 1707 + return QEDE_TSS_COUNT(edev) ? 1708 + fallback(dev, skb, NULL) % total_txq : 0; 1709 + } 1710 + 1698 1711 /* 8B udp header + 8B base tunnel header + 32B option length */ 1699 1712 #define QEDE_MAX_TUN_HDR_LEN 48 1700 1713
+3
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 631 631 .ndo_open = qede_open, 632 632 .ndo_stop = qede_close, 633 633 .ndo_start_xmit = qede_start_xmit, 634 + .ndo_select_queue = qede_select_queue, 634 635 .ndo_set_rx_mode = qede_set_rx_mode, 635 636 .ndo_set_mac_address = qede_set_mac_addr, 636 637 .ndo_validate_addr = eth_validate_addr, ··· 667 666 .ndo_open = qede_open, 668 667 .ndo_stop = qede_close, 669 668 .ndo_start_xmit = qede_start_xmit, 669 + .ndo_select_queue = qede_select_queue, 670 670 .ndo_set_rx_mode = qede_set_rx_mode, 671 671 .ndo_set_mac_address = qede_set_mac_addr, 672 672 .ndo_validate_addr = eth_validate_addr, ··· 686 684 .ndo_open = qede_open, 687 685 .ndo_stop = qede_close, 688 686 .ndo_start_xmit = qede_start_xmit, 687 + .ndo_select_queue = qede_select_queue, 689 688 .ndo_set_rx_mode = qede_set_rx_mode, 690 689 .ndo_set_mac_address = qede_set_mac_addr, 691 690 .ndo_validate_addr = eth_validate_addr,
+1 -1
drivers/net/ethernet/realtek/8139cp.c
··· 691 691 } 692 692 bytes_compl += skb->len; 693 693 pkts_compl++; 694 - dev_kfree_skb_irq(skb); 694 + dev_consume_skb_irq(skb); 695 695 } 696 696 697 697 cp->tx_skb[tx_tail] = NULL;
+1 -1
drivers/net/ethernet/smsc/epic100.c
··· 1037 1037 skb = ep->tx_skbuff[entry]; 1038 1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 1039 1039 skb->len, PCI_DMA_TODEVICE); 1040 - dev_kfree_skb_irq(skb); 1040 + dev_consume_skb_irq(skb); 1041 1041 ep->tx_skbuff[entry] = NULL; 1042 1042 } 1043 1043
+3 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 1342 1342 } 1343 1343 1344 1344 ret = phy_power_on(bsp_priv, true); 1345 - if (ret) 1345 + if (ret) { 1346 + gmac_clk_enable(bsp_priv, false); 1346 1347 return ret; 1348 + } 1347 1349 1348 1350 pm_runtime_enable(dev); 1349 1351 pm_runtime_get_sync(dev);
+10 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 721 721 { 722 722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 723 723 724 - if (!clk) 725 - return 0; 724 + if (!clk) { 725 + clk = priv->plat->clk_ref_rate; 726 + if (!clk) 727 + return 0; 728 + } 726 729 727 730 return (usec * (clk / 1000000)) / 256; 728 731 } ··· 734 731 { 735 732 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 736 733 737 - if (!clk) 738 - return 0; 734 + if (!clk) { 735 + clk = priv->plat->clk_ref_rate; 736 + if (!clk) 737 + return 0; 738 + } 739 739 740 740 return (riwt * 256) / (clk / 1000000); 741 741 }
+13 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3023 3023 3024 3024 tx_q = &priv->tx_queue[queue]; 3025 3025 3026 + if (priv->tx_path_in_lpi_mode) 3027 + stmmac_disable_eee_mode(priv); 3028 + 3026 3029 /* Manage oversized TCP frames for GMAC4 device */ 3027 3030 if (skb_is_gso(skb) && priv->tso) { 3028 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3031 + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3032 + /* 3033 + * There is no way to determine the number of TSO 3034 + * capable Queues. Let's use always the Queue 0 3035 + * because if TSO is supported then at least this 3036 + * one will be capable. 3037 + */ 3038 + skb_set_queue_mapping(skb, 0); 3039 + 3029 3040 return stmmac_tso_xmit(skb, dev); 3041 + } 3030 3042 } 3031 3043 3032 3044 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { ··· 3052 3040 } 3053 3041 return NETDEV_TX_BUSY; 3054 3042 } 3055 - 3056 - if (priv->tx_path_in_lpi_mode) 3057 - stmmac_disable_eee_mode(priv); 3058 3043 3059 3044 entry = tx_q->cur_tx; 3060 3045 first_entry = entry;
+1 -1
drivers/net/ethernet/sun/cassini.c
··· 1898 1898 cp->net_stats[ring].tx_packets++; 1899 1899 cp->net_stats[ring].tx_bytes += skb->len; 1900 1900 spin_unlock(&cp->stat_lock[ring]); 1901 - dev_kfree_skb_irq(skb); 1901 + dev_consume_skb_irq(skb); 1902 1902 } 1903 1903 cp->tx_old[ring] = entry; 1904 1904
+1 -1
drivers/net/ethernet/sun/sunbmac.c
··· 781 781 782 782 DTX(("skb(%p) ", skb)); 783 783 bp->tx_skbs[elem] = NULL; 784 - dev_kfree_skb_irq(skb); 784 + dev_consume_skb_irq(skb); 785 785 786 786 elem = NEXT_TX(elem); 787 787 }
+1 -1
drivers/net/ethernet/sun/sunhme.c
··· 1962 1962 this = &txbase[elem]; 1963 1963 } 1964 1964 1965 - dev_kfree_skb_irq(skb); 1965 + dev_consume_skb_irq(skb); 1966 1966 dev->stats.tx_packets++; 1967 1967 } 1968 1968 hp->tx_old = elem;
+1 -1
drivers/net/ethernet/tehuti/tehuti.c
··· 1739 1739 tx_level -= db->rptr->len; /* '-' koz len is negative */ 1740 1740 1741 1741 /* now should come skb pointer - free it */ 1742 - dev_kfree_skb_irq(db->rptr->addr.skb); 1742 + dev_consume_skb_irq(db->rptr->addr.skb); 1743 1743 bdx_tx_db_inc_rptr(db); 1744 1744 } 1745 1745
+1 -1
drivers/net/ethernet/ti/cpmac.c
··· 608 608 netdev_dbg(dev, "sent 0x%p, len=%d\n", 609 609 desc->skb, desc->skb->len); 610 610 611 - dev_kfree_skb_irq(desc->skb); 611 + dev_consume_skb_irq(desc->skb); 612 612 desc->skb = NULL; 613 613 if (__netif_subqueue_stopped(dev, queue)) 614 614 netif_wake_subqueue(dev, queue);
+1 -1
drivers/net/ethernet/via/via-velocity.c
··· 1740 1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], 1741 1741 le16_to_cpu(pktlen), DMA_TO_DEVICE); 1742 1742 } 1743 - dev_kfree_skb_irq(skb); 1743 + dev_consume_skb_irq(skb); 1744 1744 tdinfo->skb = NULL; 1745 1745 } 1746 1746
+1 -1
drivers/net/fddi/defxx.c
··· 3512 3512 bp->descr_block_virt->xmt_data[comp].long_1, 3513 3513 p_xmt_drv_descr->p_skb->len, 3514 3514 DMA_TO_DEVICE); 3515 - dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3515 + dev_consume_skb_irq(p_xmt_drv_descr->p_skb); 3516 3516 3517 3517 /* 3518 3518 * Move to start of next packet by updating completion index
+7 -3
drivers/net/geneve.c
··· 1512 1512 } 1513 1513 #if IS_ENABLED(CONFIG_IPV6) 1514 1514 case AF_INET6: { 1515 - struct rt6_info *rt = rt6_lookup(geneve->net, 1516 - &info->key.u.ipv6.dst, NULL, 0, 1517 - NULL, 0); 1515 + struct rt6_info *rt; 1516 + 1517 + if (!__in6_dev_get(dev)) 1518 + break; 1519 + 1520 + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, 1521 + NULL, 0); 1518 1522 1519 1523 if (rt && rt->dst.dev) 1520 1524 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
+3 -3
drivers/net/ieee802154/mcr20a.c
··· 905 905 } 906 906 break; 907 907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): 908 - /* rx is starting */ 909 - dev_dbg(printdev(lp), "RX is starting\n"); 910 - mcr20a_handle_rx(lp); 908 + /* rx is starting */ 909 + dev_dbg(printdev(lp), "RX is starting\n"); 910 + mcr20a_handle_rx(lp); 911 911 break; 912 912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 913 913 if (lp->is_tx) {
+3 -3
drivers/net/ipvlan/ipvlan_main.c
··· 100 100 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 101 101 if (!err) { 102 102 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 103 - mdev->priv_flags |= IFF_L3MDEV_MASTER; 103 + mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; 104 104 } else 105 105 goto fail; 106 106 } else if (port->mode == IPVLAN_MODE_L3S) { 107 107 /* Old mode was L3S */ 108 - mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 108 + mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; 109 109 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 110 110 mdev->l3mdev_ops = NULL; 111 111 } ··· 167 167 struct sk_buff *skb; 168 168 169 169 if (port->mode == IPVLAN_MODE_L3S) { 170 - dev->priv_flags &= ~IFF_L3MDEV_MASTER; 170 + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; 171 171 ipvlan_unregister_nf_hook(dev_net(dev)); 172 172 dev->l3mdev_ops = NULL; 173 173 }
+10 -3
drivers/net/phy/dp83640.c
··· 898 898 struct phy_txts *phy_txts) 899 899 { 900 900 struct skb_shared_hwtstamps shhwtstamps; 901 + struct dp83640_skb_info *skb_info; 901 902 struct sk_buff *skb; 902 - u64 ns; 903 903 u8 overflow; 904 + u64 ns; 904 905 905 906 /* We must already have the skb that triggered this. */ 906 - 907 + again: 907 908 skb = skb_dequeue(&dp83640->tx_queue); 908 - 909 909 if (!skb) { 910 910 pr_debug("have timestamp but tx_queue empty\n"); 911 911 return; ··· 919 919 skb = skb_dequeue(&dp83640->tx_queue); 920 920 } 921 921 return; 922 + } 923 + skb_info = (struct dp83640_skb_info *)skb->cb; 924 + if (time_after(jiffies, skb_info->tmo)) { 925 + kfree_skb(skb); 926 + goto again; 922 927 } 923 928 924 929 ns = phy2txts(phy_txts); ··· 1477 1472 static void dp83640_txtstamp(struct phy_device *phydev, 1478 1473 struct sk_buff *skb, int type) 1479 1474 { 1475 + struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; 1480 1476 struct dp83640_private *dp83640 = phydev->priv; 1481 1477 1482 1478 switch (dp83640->hwts_tx_en) { ··· 1490 1484 /* fall through */ 1491 1485 case HWTSTAMP_TX_ON: 1492 1486 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1487 + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1493 1488 skb_queue_tail(&dp83640->tx_queue, skb); 1494 1489 break; 1495 1490
-16
drivers/net/phy/marvell.c
··· 847 847 848 848 /* SGMII-to-Copper mode initialization */ 849 849 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 850 - 851 850 /* Select page 18 */ 852 851 err = marvell_set_page(phydev, 18); 853 852 if (err < 0) ··· 869 870 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 870 871 if (err < 0) 871 872 return err; 872 - 873 - /* There appears to be a bug in the 88e1512 when used in 874 - * SGMII to copper mode, where the AN advertisement register 875 - * clears the pause bits each time a negotiation occurs. 876 - * This means we can never be truely sure what was advertised, 877 - * so disable Pause support. 878 - */ 879 - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 880 - phydev->supported); 881 - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, 882 - phydev->supported); 883 - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 884 - phydev->advertising); 885 - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, 886 - phydev->advertising); 887 873 } 888 874 889 875 return m88e1318_config_init(phydev);
+1 -2
drivers/net/tun.c
··· 866 866 if (rtnl_dereference(tun->xdp_prog)) 867 867 sock_set_flag(&tfile->sk, SOCK_XDP); 868 868 869 - tun_set_real_num_queues(tun); 870 - 871 869 /* device is allowed to go away first, so no need to hold extra 872 870 * refcnt. 873 871 */ ··· 877 879 rcu_assign_pointer(tfile->tun, tun); 878 880 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 879 881 tun->numqueues++; 882 + tun_set_real_num_queues(tun); 880 883 out: 881 884 return err; 882 885 }
+124 -47
drivers/net/virtio_net.c
··· 57 57 #define VIRTIO_XDP_TX BIT(0) 58 58 #define VIRTIO_XDP_REDIR BIT(1) 59 59 60 + #define VIRTIO_XDP_FLAG BIT(0) 61 + 60 62 /* RX packet size EWMA. The average packet size is used to determine the packet 61 63 * buffer size when refilling RX rings. As the entire RX ring may be refilled 62 64 * at once, the weight is chosen so that the EWMA will be insensitive to short- ··· 253 251 */ 254 252 char padding[4]; 255 253 }; 254 + 255 + static bool is_xdp_frame(void *ptr) 256 + { 257 + return (unsigned long)ptr & VIRTIO_XDP_FLAG; 258 + } 259 + 260 + static void *xdp_to_ptr(struct xdp_frame *ptr) 261 + { 262 + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 263 + } 264 + 265 + static struct xdp_frame *ptr_to_xdp(void *ptr) 266 + { 267 + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 268 + } 256 269 257 270 /* Converting between virtqueue no. and kernel tx/rx queue no. 258 271 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq ··· 479 462 480 463 sg_init_one(sq->sg, xdpf->data, xdpf->len); 481 464 482 - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); 465 + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), 466 + GFP_ATOMIC); 483 467 if (unlikely(err)) 484 468 return -ENOSPC; /* Caller handle free/refcnt */ 485 469 ··· 500 482 { 501 483 struct virtnet_info *vi = netdev_priv(dev); 502 484 struct receive_queue *rq = vi->rq; 503 - struct xdp_frame *xdpf_sent; 504 485 struct bpf_prog *xdp_prog; 505 486 struct send_queue *sq; 506 487 unsigned int len; 488 + int packets = 0; 489 + int bytes = 0; 507 490 int drops = 0; 508 491 int kicks = 0; 509 492 int ret, err; 493 + void *ptr; 510 494 int i; 495 + 496 + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 497 + * indicate XDP resources have been successfully allocated. 498 + */ 499 + xdp_prog = rcu_dereference(rq->xdp_prog); 500 + if (!xdp_prog) 501 + return -ENXIO; 511 502 512 503 sq = virtnet_xdp_sq(vi); 513 504 ··· 526 499 goto out; 527 500 } 528 501 529 - /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 530 - * indicate XDP resources have been successfully allocated. 531 - */ 532 - xdp_prog = rcu_dereference(rq->xdp_prog); 533 - if (!xdp_prog) { 534 - ret = -ENXIO; 535 - drops = n; 536 - goto out; 537 - } 538 - 539 502 /* Free up any pending old buffers before queueing new ones. */ 540 - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 541 - xdp_return_frame(xdpf_sent); 503 + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 504 + if (likely(is_xdp_frame(ptr))) { 505 + struct xdp_frame *frame = ptr_to_xdp(ptr); 506 + 507 + bytes += frame->len; 508 + xdp_return_frame(frame); 509 + } else { 510 + struct sk_buff *skb = ptr; 511 + 512 + bytes += skb->len; 513 + napi_consume_skb(skb, false); 514 + } 515 + packets++; 516 + } 542 517 543 518 for (i = 0; i < n; i++) { 544 519 struct xdp_frame *xdpf = frames[i]; ··· 559 530 } 560 531 out: 561 532 u64_stats_update_begin(&sq->stats.syncp); 533 + sq->stats.bytes += bytes; 534 + sq->stats.packets += packets; 562 535 sq->stats.xdp_tx += n; 563 536 sq->stats.xdp_tx_drops += drops; 564 537 sq->stats.kicks += kicks; ··· 1363 1332 1364 1333 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 1365 1334 { 1366 - struct sk_buff *skb; 1367 1335 unsigned int len; 1368 1336 unsigned int packets = 0; 1369 1337 unsigned int bytes = 0; 1338 + void *ptr; 1370 1339 1371 - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1372 - pr_debug("Sent skb %p\n", skb); 1340 + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1341 + if (likely(!is_xdp_frame(ptr))) { 1342 + struct sk_buff *skb = ptr; 1373 1343 1374 - bytes += skb->len; 1344 + pr_debug("Sent skb %p\n", skb); 1345 + 1346 + bytes += skb->len; 1347 + napi_consume_skb(skb, in_napi); 1348 + } else { 1349 + struct xdp_frame *frame = ptr_to_xdp(ptr); 1350 + 1351 + bytes += frame->len; 1352 + xdp_return_frame(frame); 1353 + } 1375 1354 packets++; 1376 - 1377 - napi_consume_skb(skb, in_napi); 1378 1355 } 1379 1356 1380 1357 /* Avoid overhead when no packets have been processed ··· 1397 1358 u64_stats_update_end(&sq->stats.syncp); 1398 1359 } 1399 1360 1361 + static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 1362 + { 1363 + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1364 + return false; 1365 + else if (q < vi->curr_queue_pairs) 1366 + return true; 1367 + else 1368 + return false; 1369 + } 1370 + 1400 1371 static void virtnet_poll_cleantx(struct receive_queue *rq) 1401 1372 { 1402 1373 struct virtnet_info *vi = rq->vq->vdev->priv; ··· 1414 1365 struct send_queue *sq = &vi->sq[index]; 1415 1366 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1416 1367 1417 - if (!sq->napi.weight) 1368 + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 1418 1369 return; 1419 1370 1420 1371 if (__netif_tx_trylock(txq)) { ··· 1491 1442 { 1492 1443 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1493 1444 struct virtnet_info *vi = sq->vq->vdev->priv; 1494 - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1445 + unsigned int index = vq2txq(sq->vq); 1446 + struct netdev_queue *txq; 1495 1447 1448 + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 1449 + /* We don't need to enable cb for XDP */ 1450 + napi_complete_done(napi, 0); 1451 + return 0; 1452 + } 1453 + 1454 + txq = netdev_get_tx_queue(vi->dev, index); 1496 1455 __netif_tx_lock(txq, raw_smp_processor_id()); 1497 1456 free_old_xmit_skbs(sq, true); 1498 1457 __netif_tx_unlock(txq); ··· 2452 2395 return -ENOMEM; 2453 2396 } 2454 2397 2398 + old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 2399 + if (!prog && !old_prog) 2400 + return 0; 2401 + 2455 2402 if (prog) { 2456 2403 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 2457 2404 if (IS_ERR(prog)) ··· 2463 2402 } 2464 2403 2465 2404 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2466 - if (netif_running(dev)) 2467 - for (i = 0; i < vi->max_queue_pairs; i++) 2405 + if (netif_running(dev)) { 2406 + for (i = 0; i < vi->max_queue_pairs; i++) { 2468 2407 napi_disable(&vi->rq[i].napi); 2408 + virtnet_napi_tx_disable(&vi->sq[i].napi); 2409 + } 2410 + } 2469 2411 2470 - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2412 + if (!prog) { 2413 + for (i = 0; i < vi->max_queue_pairs; i++) { 2414 + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2415 + if (i == 0) 2416 + virtnet_restore_guest_offloads(vi); 2417 + } 2418 + synchronize_net(); 2419 + } 2420 + 2471 2421 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2472 2422 if (err) 2473 2423 goto err; 2424 + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2474 2425 vi->xdp_queue_pairs = xdp_qp; 2475 2426 2476 - for (i = 0; i < vi->max_queue_pairs; i++) { 2477 - old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2478 - rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2479 - if (i == 0) { 2480 - if (!old_prog) 2427 + if (prog) { 2428 + for (i = 0; i < vi->max_queue_pairs; i++) { 2429 + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2430 + if (i == 0 && !old_prog) 2481 2431 virtnet_clear_guest_offloads(vi); 2482 - if (!prog) 2483 - virtnet_restore_guest_offloads(vi); 2484 2432 } 2433 + } 2434 + 2435 + for (i = 0; i < vi->max_queue_pairs; i++) { 2485 2436 if (old_prog) 2486 2437 bpf_prog_put(old_prog); 2487 - if (netif_running(dev)) 2438 + if (netif_running(dev)) { 2488 2439 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2440 + virtnet_napi_tx_enable(vi, vi->sq[i].vq, 2441 + &vi->sq[i].napi); 2442 + } 2489 2443 } 2490 2444 2491 2445 return 0; 2492 2446 2493 2447 err: 2494 - for (i = 0; i < vi->max_queue_pairs; i++) 2495 - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2448 + if (!prog) { 2449 + virtnet_clear_guest_offloads(vi); 2450 + for (i = 0; i < vi->max_queue_pairs; i++) 2451 + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 2452 + } 2453 + 2454 + if (netif_running(dev)) { 2455 + for (i = 0; i < vi->max_queue_pairs; i++) { 2456 + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2457 + virtnet_napi_tx_enable(vi, vi->sq[i].vq, 2458 + &vi->sq[i].napi); 2459 + } 2460 + } 2496 2461 if (prog) 2497 2462 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 2498 2463 return err; ··· 2700 2613 put_page(vi->rq[i].alloc_frag.page); 2701 2614 } 2702 2615 2703 - static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 2704 - { 2705 - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 2706 - return false; 2707 - else if (q < vi->curr_queue_pairs) 2708 - return true; 2709 - else 2710 - return false; 2711 - } 2712 - 2713 2616 static void free_unused_bufs(struct virtnet_info *vi) 2714 2617 { 2715 2618 void *buf; ··· 2708 2631 for (i = 0; i < vi->max_queue_pairs; i++) { 2709 2632 struct virtqueue *vq = vi->sq[i].vq; 2710 2633 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2711 - if (!is_xdp_raw_buffer_queue(vi, i)) 2634 + if (!is_xdp_frame(buf)) 2712 2635 dev_kfree_skb(buf); 2713 2636 else 2714 - put_page(virt_to_head_page(buf)); 2637 + xdp_return_frame(ptr_to_xdp(buf)); 2715 2638 } 2716 2639 } 2717 2640
+1 -1
drivers/net/wan/dscc4.c
··· 1575 1575 dev->stats.tx_packets++; 1576 1576 dev->stats.tx_bytes += skb->len; 1577 1577 } 1578 - dev_kfree_skb_irq(skb); 1578 + dev_consume_skb_irq(skb); 1579 1579 dpriv->tx_skbuff[cur] = NULL; 1580 1580 ++dpriv->tx_dirty; 1581 1581 } else {
+1 -1
drivers/net/wan/fsl_ucc_hdlc.c
··· 482 482 memset(priv->tx_buffer + 483 483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 484 484 0, skb->len); 485 - dev_kfree_skb_irq(skb); 485 + dev_consume_skb_irq(skb); 486 486 487 487 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 488 488 priv->skb_dirtytx =
+1 -1
drivers/net/wireless/ath/ath10k/core.c
··· 548 548 { 549 549 .id = WCN3990_HW_1_0_DEV_VERSION, 550 550 .dev_id = 0, 551 - .bus = ATH10K_BUS_PCI, 551 + .bus = ATH10K_BUS_SNOC, 552 552 .name = "wcn3990 hw1.0", 553 553 .continuous_frag_desc = true, 554 554 .tx_chain_mask = 0x7,
+2 -1
drivers/net/wireless/intel/iwlwifi/Kconfig
··· 1 1 config IWLWIFI 2 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 3 - depends on PCI && HAS_IOMEM 3 + depends on PCI && HAS_IOMEM && CFG80211 4 4 select FW_LOADER 5 5 ---help--- 6 6 Select to build the driver supporting the: ··· 47 47 config IWLWIFI_LEDS 48 48 bool 49 49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI 50 + depends on IWLMVM || IWLDVM 50 51 select LEDS_TRIGGERS 51 52 select MAC80211_LEDS 52 53 default y
+19 -21
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
··· 212 212 mt76x02_add_rate_power_offset(t, delta); 213 213 } 214 214 215 - void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) 215 + void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) 216 216 { 217 217 struct mt76x0_chan_map { 218 218 u8 chan; 219 219 u8 offset; 220 220 } chan_map[] = { 221 - { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, 222 - { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, 223 - { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, 224 - { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, 225 - { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, 226 - { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, 227 - { 167, 17 }, { 171, 18 }, { 173, 19 }, 221 + { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, 222 + { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, 223 + { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, 224 + { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, 225 + { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, 226 + { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, 227 + { 167, 34 }, { 171, 36 }, { 175, 38 }, 228 228 }; 229 229 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 230 230 u8 offset, addr; 231 + int i, idx = 0; 231 232 u16 data; 232 - int i; 233 233 234 234 if (mt76x0_tssi_enabled(dev)) { 235 235 s8 target_power; ··· 239 239 else 240 240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); 241 241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; 242 - info[0] = target_power + mt76x0_get_delta(dev); 243 - info[1] = 0; 242 + *tp = target_power + mt76x0_get_delta(dev); 244 243 245 244 return; 246 245 } 247 246 248 247 for (i = 0; i < ARRAY_SIZE(chan_map); i++) { 249 - if (chan_map[i].chan <= chan->hw_value) { 248 + if (chan->hw_value <= chan_map[i].chan) { 249 + idx = (chan->hw_value == chan_map[i].chan); 250 250 offset = chan_map[i].offset; 251 251 break; 252 252 } ··· 258 258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset; 259 259 } else { 260 260 switch (chan->hw_value) { 261 + case 42: 262 + offset = 2; 263 + break; 261 264 case 58: 262 265 offset = 8; 263 266 break; 264 267 case 106: 265 268 offset = 14; 266 269 break; 267 - case 112: 270 + case 122: 268 271 offset = 20; 269 272 break; 270 273 case 155: ··· 280 277 } 281 278 282 279 data = mt76x02_eeprom_get(dev, addr); 283 - 284 - info[0] = data; 285 - if (!info[0] || info[0] > 0x3f) 286 - info[0] = 5; 287 - 288 - info[1] = data >> 8; 289 - if (!info[1] || info[1] > 0x3f) 290 - info[1] = 5; 280 + *tp = data >> (8 * idx); 281 + if (*tp < 0 || *tp > 0x3f) 282 + *tp = 5; 291 283 } 292 284 293 285 static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
··· 26 26 int mt76x0_eeprom_init(struct mt76x02_dev *dev); 27 27 void mt76x0_read_rx_gain(struct mt76x02_dev *dev); 28 28 void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); 29 - void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); 29 + void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); 30 30 31 31 static inline s8 s6_to_s8(u32 val) 32 32 {
+5 -5
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
··· 845 845 void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) 846 846 { 847 847 struct mt76_rate_power *t = &dev->mt76.rate_power; 848 - u8 info[2]; 848 + s8 info; 849 849 850 850 mt76x0_get_tx_power_per_rate(dev); 851 - mt76x0_get_power_info(dev, info); 851 + mt76x0_get_power_info(dev, &info); 852 852 853 - mt76x02_add_rate_power_offset(t, info[0]); 853 + mt76x02_add_rate_power_offset(t, info); 854 854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); 855 855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 856 - mt76x02_add_rate_power_offset(t, -info[0]); 856 + mt76x02_add_rate_power_offset(t, -info); 857 857 858 - mt76x02_phy_set_txpower(dev, info[0], info[1]); 858 + mt76x02_phy_set_txpower(dev, info, info); 859 859 } 860 860 861 861 void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+7 -8
drivers/net/wireless/ti/wlcore/sdio.c
··· 164 164 } 165 165 166 166 sdio_claim_host(func); 167 + /* 168 + * To guarantee that the SDIO card is power cycled, as required to make 169 + * the FW programming to succeed, let's do a brute force HW reset. 170 + */ 171 + mmc_hw_reset(card->host); 172 + 167 173 sdio_enable_func(func); 168 174 sdio_release_host(func); 169 175 ··· 180 174 { 181 175 struct sdio_func *func = dev_to_sdio_func(glue->dev); 182 176 struct mmc_card *card = func->card; 183 - int error; 184 177 185 178 sdio_claim_host(func); 186 179 sdio_disable_func(func); 187 180 sdio_release_host(func); 188 181 189 182 /* Let runtime PM know the card is powered off */ 190 - error = pm_runtime_put(&card->dev); 191 - if (error < 0 && error != -EBUSY) { 192 - dev_err(&card->dev, "%s failed: %i\n", __func__, error); 193 - 194 - return error; 195 - } 196 - 183 + pm_runtime_put(&card->dev); 197 184 return 0; 198 185 } 199 186
+7 -1
drivers/nvme/host/core.c
··· 1253 1253 * effects say only one namespace is affected. 1254 1254 */ 1255 1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1256 + mutex_lock(&ctrl->scan_lock); 1256 1257 nvme_start_freeze(ctrl); 1257 1258 nvme_wait_freeze(ctrl); 1258 1259 } ··· 1282 1281 */ 1283 1282 if (effects & NVME_CMD_EFFECTS_LBCC) 1284 1283 nvme_update_formats(ctrl); 1285 - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1284 + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1286 1285 nvme_unfreeze(ctrl); 1286 + mutex_unlock(&ctrl->scan_lock); 1287 + } 1287 1288 if (effects & NVME_CMD_EFFECTS_CCC) 1288 1289 nvme_init_identify(ctrl); 1289 1290 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) ··· 3404 3401 if (nvme_identify_ctrl(ctrl, &id)) 3405 3402 return; 3406 3403 3404 + mutex_lock(&ctrl->scan_lock); 3407 3405 nn = le32_to_cpu(id->nn); 3408 3406 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3409 3407 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { ··· 3413 3409 } 3414 3410 nvme_scan_ns_sequential(ctrl, nn); 3415 3411 out_free_id: 3412 + mutex_unlock(&ctrl->scan_lock); 3416 3413 kfree(id); 3417 3414 down_write(&ctrl->namespaces_rwsem); 3418 3415 list_sort(NULL, &ctrl->namespaces, ns_cmp); ··· 3657 3652 3658 3653 ctrl->state = NVME_CTRL_NEW; 3659 3654 spin_lock_init(&ctrl->lock); 3655 + mutex_init(&ctrl->scan_lock); 3660 3656 INIT_LIST_HEAD(&ctrl->namespaces); 3661 3657 init_rwsem(&ctrl->namespaces_rwsem); 3662 3658 ctrl->dev = dev;
+1
drivers/nvme/host/nvme.h
··· 154 154 enum nvme_ctrl_state state; 155 155 bool identified; 156 156 spinlock_t lock; 157 + struct mutex scan_lock; 157 158 const struct nvme_ctrl_ops *ops; 158 159 struct request_queue *admin_q; 159 160 struct request_queue *connect_q;
+12 -10
drivers/nvme/host/pci.c
··· 2557 2557 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2558 2558 nvme_dev_disable(dev, false); 2559 2559 2560 - /* 2561 - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2562 - * initializing procedure here. 2563 - */ 2564 - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2565 - dev_warn(dev->ctrl.device, 2566 - "failed to mark controller CONNECTING\n"); 2567 - goto out; 2568 - } 2569 - 2560 + mutex_lock(&dev->shutdown_lock); 2570 2561 result = nvme_pci_enable(dev); 2571 2562 if (result) 2572 2563 goto out; ··· 2576 2585 */ 2577 2586 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2578 2587 dev->ctrl.max_segments = NVME_MAX_SEGS; 2588 + mutex_unlock(&dev->shutdown_lock); 2589 + 2590 + /* 2591 + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2592 + * initializing procedure here. 2593 + */ 2594 + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2595 + dev_warn(dev->ctrl.device, 2596 + "failed to mark controller CONNECTING\n"); 2597 + goto out; 2598 + } 2579 2599 2580 2600 result = nvme_init_identify(&dev->ctrl); 2581 2601 if (result)
+7 -4
drivers/pci/controller/dwc/pci-imx6.c
··· 310 310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 311 311 if (IS_ERR(imx6_pcie->pd_pcie)) 312 312 return PTR_ERR(imx6_pcie->pd_pcie); 313 + /* Do nothing when power domain missing */ 314 + if (!imx6_pcie->pd_pcie) 315 + return 0; 313 316 link = device_link_add(dev, imx6_pcie->pd_pcie, 314 317 DL_FLAG_STATELESS | 315 318 DL_FLAG_PM_RUNTIME | ··· 326 323 if (IS_ERR(imx6_pcie->pd_pcie_phy)) 327 324 return PTR_ERR(imx6_pcie->pd_pcie_phy); 328 325 329 - device_link_add(dev, imx6_pcie->pd_pcie_phy, 326 + link = device_link_add(dev, imx6_pcie->pd_pcie_phy, 330 327 DL_FLAG_STATELESS | 331 328 DL_FLAG_PM_RUNTIME | 332 329 DL_FLAG_RPM_ACTIVE); 333 - if (IS_ERR(link)) { 334 - dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); 335 - return PTR_ERR(link); 330 + if (!link) { 331 + dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); 332 + return -EINVAL; 336 333 } 337 334 338 335 return 0;
-16
drivers/pci/controller/dwc/pcie-armada8k.c
··· 22 22 #include <linux/resource.h> 23 23 #include <linux/of_pci.h> 24 24 #include <linux/of_irq.h> 25 - #include <linux/gpio/consumer.h> 26 25 27 26 #include "pcie-designware.h" 28 27 ··· 29 30 struct dw_pcie *pci; 30 31 struct clk *clk; 31 32 struct clk *clk_reg; 32 - struct gpio_desc *reset_gpio; 33 33 }; 34 34 35 35 #define PCIE_VENDOR_REGS_OFFSET 0x8000 ··· 137 139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 138 140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci); 139 141 140 - if (pcie->reset_gpio) { 141 - /* assert and then deassert the reset signal */ 142 - gpiod_set_value_cansleep(pcie->reset_gpio, 1); 143 - msleep(100); 144 - gpiod_set_value_cansleep(pcie->reset_gpio, 0); 145 - } 146 142 dw_pcie_setup_rc(pp); 147 143 armada8k_pcie_establish_link(pcie); 148 144 ··· 246 254 if (IS_ERR(pci->dbi_base)) { 247 255 dev_err(dev, "couldn't remap regs base %p\n", base); 248 256 ret = PTR_ERR(pci->dbi_base); 249 - goto fail_clkreg; 250 - } 251 - 252 - /* Get reset gpio signal and hold asserted (logically high) */ 253 - pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", 254 - GPIOD_OUT_HIGH); 255 - if (IS_ERR(pcie->reset_gpio)) { 256 - ret = PTR_ERR(pcie->reset_gpio); 257 257 goto fail_clkreg; 258 258 } 259 259
+3 -2
drivers/pci/quirks.c
··· 639 639 break; 640 640 } 641 641 } 642 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, 643 - quirk_synopsys_haps); 642 + DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, 643 + PCI_CLASS_SERIAL_USB_XHCI, 0, 644 + quirk_synopsys_haps); 644 645 645 646 /* 646 647 * Let's make the southbridge information explicit instead of having to
+4 -4
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1513 1513 .matches = { 1514 1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1515 1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1516 - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1516 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), 1517 1517 }, 1518 1518 }, 1519 1519 { ··· 1521 1521 .matches = { 1522 1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1523 1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1524 - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1524 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), 1525 1525 }, 1526 1526 }, 1527 1527 { ··· 1529 1529 .matches = { 1530 1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1531 1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1532 - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1532 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), 1533 1533 }, 1534 1534 }, 1535 1535 { ··· 1537 1537 .matches = { 1538 1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1539 1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1540 - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1540 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), 1541 1541 }, 1542 1542 }, 1543 1543 {}
+3
drivers/pinctrl/mediatek/Kconfig
··· 45 45 config PINCTRL_MT7623 46 46 bool "Mediatek MT7623 pin control with generic binding" 47 47 depends on MACH_MT7623 || COMPILE_TEST 48 + depends on OF 48 49 default MACH_MT7623 49 50 select PINCTRL_MTK_MOORE 50 51 51 52 config PINCTRL_MT7629 52 53 bool "Mediatek MT7629 pin control" 53 54 depends on MACH_MT7629 || COMPILE_TEST 55 + depends on OF 54 56 default MACH_MT7629 55 57 select PINCTRL_MTK_MOORE 56 58 ··· 94 92 95 93 config PINCTRL_MT7622 96 94 bool "MediaTek MT7622 pin control" 95 + depends on OF 97 96 depends on ARM64 || COMPILE_TEST 98 97 default ARM64 && ARCH_MEDIATEK 99 98 select PINCTRL_MTK_MOORE
+6 -1
drivers/pinctrl/pinctrl-mcp23s08.c
··· 832 832 break; 833 833 834 834 case MCP_TYPE_S18: 835 + one_regmap_config = 836 + devm_kmemdup(dev, &mcp23x17_regmap, 837 + sizeof(struct regmap_config), GFP_KERNEL); 838 + if (!one_regmap_config) 839 + return -ENOMEM; 835 840 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, 836 - &mcp23x17_regmap); 841 + one_regmap_config); 837 842 mcp->reg_shift = 1; 838 843 mcp->chip.ngpio = 16; 839 844 mcp->chip.label = "mcp23s18";
+1 -1
drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
··· 588 588 static const struct sunxi_pinctrl_desc h6_pinctrl_data = { 589 589 .pins = h6_pins, 590 590 .npins = ARRAY_SIZE(h6_pins), 591 - .irq_banks = 3, 591 + .irq_banks = 4, 592 592 .irq_bank_map = h6_irq_bank_map, 593 593 .irq_read_needs_mux = true, 594 594 };
+22 -22
drivers/pinctrl/sunxi/pinctrl-sunxi.c
··· 698 698 { 699 699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 700 700 unsigned short bank = offset / PINS_PER_BANK; 701 - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 702 - struct regulator *reg; 701 + unsigned short bank_offset = bank - pctl->desc->pin_base / 702 + PINS_PER_BANK; 703 + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; 704 + struct regulator *reg = s_reg->regulator; 705 + char supply[16]; 703 706 int ret; 704 707 705 - reg = s_reg->regulator; 706 - if (!reg) { 707 - char supply[16]; 708 - 709 - snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); 710 - reg = regulator_get(pctl->dev, supply); 711 - if (IS_ERR(reg)) { 712 - dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", 713 - 'A' + bank); 714 - return PTR_ERR(reg); 715 - } 716 - 717 - s_reg->regulator = reg; 718 - refcount_set(&s_reg->refcount, 1); 719 - } else { 708 + if (reg) { 720 709 refcount_inc(&s_reg->refcount); 710 + return 0; 711 + } 712 + 713 + snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); 714 + reg = regulator_get(pctl->dev, supply); 715 + if (IS_ERR(reg)) { 716 + dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", 717 + 'A' + bank); 718 + return PTR_ERR(reg); 721 719 } 722 720 723 721 ret = regulator_enable(reg); ··· 725 727 goto out; 726 728 } 727 729 730 + s_reg->regulator = reg; 731 + refcount_set(&s_reg->refcount, 1); 732 + 728 733 return 0; 729 734 730 735 out: 731 - if (refcount_dec_and_test(&s_reg->refcount)) { 732 - regulator_put(s_reg->regulator); 733 - s_reg->regulator = NULL; 734 - } 736 + regulator_put(s_reg->regulator); 735 737 736 738 return ret; 737 739 } ··· 740 742 { 741 743 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 742 744 unsigned short bank = offset / PINS_PER_BANK; 743 - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 745 + unsigned short bank_offset = bank - pctl->desc->pin_base / 746 + PINS_PER_BANK; 747 + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; 744 748 745 749 if (!refcount_dec_and_test(&s_reg->refcount)) 746 750 return 0;
+1 -1
drivers/pinctrl/sunxi/pinctrl-sunxi.h
··· 136 136 struct gpio_chip *chip; 137 137 const struct sunxi_pinctrl_desc *desc; 138 138 struct device *dev; 139 - struct sunxi_pinctrl_regulator regulators[12]; 139 + struct sunxi_pinctrl_regulator regulators[9]; 140 140 struct irq_domain *domain; 141 141 struct sunxi_pinctrl_function *functions; 142 142 unsigned nfunctions;
+2
drivers/platform/x86/Kconfig
··· 905 905 config ACPI_CMPC 906 906 tristate "CMPC Laptop Extras" 907 907 depends on ACPI && INPUT 908 + depends on BACKLIGHT_LCD_SUPPORT 908 909 depends on RFKILL || RFKILL=n 909 910 select BACKLIGHT_CLASS_DEVICE 910 911 help ··· 1129 1128 config SAMSUNG_Q10 1130 1129 tristate "Samsung Q10 Extras" 1131 1130 depends on ACPI 1131 + depends on BACKLIGHT_LCD_SUPPORT 1132 1132 select BACKLIGHT_CLASS_DEVICE 1133 1133 ---help--- 1134 1134 This driver provides support for backlight control on Samsung Q10
+2 -1
drivers/s390/net/qeth_core.h
··· 22 22 #include <linux/hashtable.h> 23 23 #include <linux/ip.h> 24 24 #include <linux/refcount.h> 25 + #include <linux/workqueue.h> 25 26 26 27 #include <net/ipv6.h> 27 28 #include <net/if_inet6.h> ··· 790 789 struct qeth_seqno seqno; 791 790 struct qeth_card_options options; 792 791 792 + struct workqueue_struct *event_wq; 793 793 wait_queue_head_t wait_q; 794 794 spinlock_t mclock; 795 795 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; ··· 964 962 extern const struct attribute_group qeth_device_attr_group; 965 963 extern const struct attribute_group qeth_device_blkt_group; 966 964 extern const struct device_type qeth_generic_devtype; 967 - extern struct workqueue_struct *qeth_wq; 968 965 969 966 int qeth_card_hw_is_reachable(struct qeth_card *); 970 967 const char *qeth_get_cardname_short(struct qeth_card *);
+20 -11
drivers/s390/net/qeth_core_main.c
··· 74 74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75 75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76 76 77 - struct workqueue_struct *qeth_wq; 78 - EXPORT_SYMBOL_GPL(qeth_wq); 77 + static struct workqueue_struct *qeth_wq; 79 78 80 79 int qeth_card_hw_is_reachable(struct qeth_card *card) 81 80 { ··· 565 566 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 566 567 rc, CARD_DEVID(card)); 567 568 atomic_set(&channel->irq_pending, 0); 569 + qeth_release_buffer(channel, iob); 568 570 card->read_or_write_problem = 1; 569 571 qeth_schedule_recovery(card); 570 572 wake_up(&card->wait_q); ··· 1127 1127 rc = qeth_get_problem(card, cdev, irb); 1128 1128 if (rc) { 1129 1129 card->read_or_write_problem = 1; 1130 + if (iob) 1131 + qeth_release_buffer(iob->channel, iob); 1130 1132 qeth_clear_ipacmd_list(card); 1131 1133 qeth_schedule_recovery(card); 1132 1134 goto out; ··· 1468 1466 CARD_RDEV(card) = gdev->cdev[0]; 1469 1467 CARD_WDEV(card) = gdev->cdev[1]; 1470 1468 CARD_DDEV(card) = gdev->cdev[2]; 1469 + 1470 + card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); 1471 + if (!card->event_wq) 1472 + goto out_wq; 1471 1473 if (qeth_setup_channel(&card->read, true)) 1472 1474 goto out_ip; 1473 1475 if (qeth_setup_channel(&card->write, true)) ··· 1487 1481 out_channel: 1488 1482 qeth_clean_channel(&card->read); 1489 1483 out_ip: 1484 + destroy_workqueue(card->event_wq); 1485 + out_wq: 1490 1486 dev_set_drvdata(&gdev->dev, NULL); 1491 1487 kfree(card); 1492 1488 out: ··· 1817 1809 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); 1818 1810 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 1819 1811 atomic_set(&channel->irq_pending, 0); 1812 + qeth_release_buffer(channel, iob); 1820 1813 wake_up(&card->wait_q); 1821 1814 return rc; 1822 1815 } ··· 1887 1878 rc); 1888 1879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1889 1880 atomic_set(&channel->irq_pending, 0); 1881 + qeth_release_buffer(channel, iob); 1890 1882 wake_up(&card->wait_q); 1891 1883 return rc; 1892 1884 } ··· 2068 2058 } 2069 2059 reply = qeth_alloc_reply(card); 2070 2060 if (!reply) { 2061 + qeth_release_buffer(channel, iob); 2071 2062 return -ENOMEM; 2072 2063 } 2073 2064 reply->callback = reply_cb; ··· 2400 2389 return 0; 2401 2390 } 2402 2391 2403 - static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) 2392 + static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2404 2393 { 2405 2394 if (!q) 2406 2395 return; 2407 2396 2397 + qeth_clear_outq_buffers(q, 1); 2408 2398 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2409 2399 kfree(q); 2410 2400 } ··· 2479 2467 card->qdio.out_qs[i]->bufs[j] = NULL; 2480 2468 } 2481 2469 out_freeoutq: 2482 - while (i > 0) { 2483 - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); 2484 - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2485 - } 2470 + while (i > 0) 2471 + qeth_free_output_queue(card->qdio.out_qs[--i]); 2486 2472 kfree(card->qdio.out_qs); 2487 2473 card->qdio.out_qs = NULL; 2488 2474 out_freepool: ··· 2513 2503 qeth_free_buffer_pool(card); 2514 2504 /* free outbound qdio_qs */ 2515 2505 if (card->qdio.out_qs) { 2516 - for (i = 0; i < card->qdio.no_out_queues; ++i) { 2517 - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2518 - qeth_free_qdio_out_buf(card->qdio.out_qs[i]); 2519 - } 2506 + for (i = 0; i < card->qdio.no_out_queues; i++) 2507 + qeth_free_output_queue(card->qdio.out_qs[i]); 2520 2508 kfree(card->qdio.out_qs); 2521 2509 card->qdio.out_qs = NULL; 2522 2510 } ··· 5036 5028 qeth_clean_channel(&card->read); 5037 5029 qeth_clean_channel(&card->write); 5038 5030 qeth_clean_channel(&card->data); 5031 + destroy_workqueue(card->event_wq); 5039 5032 qeth_free_qdio_buffers(card); 5040 5033 unregister_service_level(&card->qeth_service_level); 5041 5034 dev_set_drvdata(&card->gdev->dev, NULL);
+6 -2
drivers/s390/net/qeth_l2_main.c
··· 369 369 qeth_clear_cmd_buffers(&card->read); 370 370 qeth_clear_cmd_buffers(&card->write); 371 371 } 372 + 373 + flush_workqueue(card->event_wq); 372 374 } 373 375 374 376 static int qeth_l2_process_inbound_buffer(struct qeth_card *card, ··· 803 801 804 802 if (cgdev->state == CCWGROUP_ONLINE) 805 803 qeth_l2_set_offline(cgdev); 804 + 805 + cancel_work_sync(&card->close_dev_work); 806 806 if (qeth_netdev_is_registered(card->dev)) 807 807 unregister_netdev(card->dev); 808 808 } ··· 1438 1434 data->card = card; 1439 1435 memcpy(&data->qports, qports, 1440 1436 sizeof(struct qeth_sbp_state_change) + extrasize); 1441 - queue_work(qeth_wq, &data->worker); 1437 + queue_work(card->event_wq, &data->worker); 1442 1438 } 1443 1439 1444 1440 struct qeth_bridge_host_data { ··· 1510 1506 data->card = card; 1511 1507 memcpy(&data->hostevs, hostevs, 1512 1508 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1513 - queue_work(qeth_wq, &data->worker); 1509 + queue_work(card->event_wq, &data->worker); 1514 1510 } 1515 1511 1516 1512 /* SETBRIDGEPORT support; sending commands */
+3
drivers/s390/net/qeth_l3_main.c
··· 1433 1433 qeth_clear_cmd_buffers(&card->read); 1434 1434 qeth_clear_cmd_buffers(&card->write); 1435 1435 } 1436 + 1437 + flush_workqueue(card->event_wq); 1436 1438 } 1437 1439 1438 1440 /* ··· 2340 2338 if (cgdev->state == CCWGROUP_ONLINE) 2341 2339 qeth_l3_set_offline(cgdev); 2342 2340 2341 + cancel_work_sync(&card->close_dev_work); 2343 2342 if (qeth_netdev_is_registered(card->dev)) 2344 2343 unregister_netdev(card->dev); 2345 2344 qeth_l3_clear_ip_htable(card, 0);
-1
drivers/s390/scsi/zfcp_aux.c
··· 403 403 goto failed; 404 404 405 405 /* report size limit per scatter-gather segment */ 406 - adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; 407 406 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 408 407 409 408 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
+2
drivers/s390/scsi/zfcp_scsi.c
··· 428 428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 429 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 430 430 /* GCD, adjusted later */ 431 + /* report size limit per scatter-gather segment */ 432 + .max_segment_size = ZFCP_QDIO_SBALE_LEN, 431 433 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 432 434 .shost_attrs = zfcp_sysfs_shost_attrs, 433 435 .sdev_attrs = zfcp_sysfs_sdev_attrs,
+1 -1
drivers/scsi/53c700.c
··· 295 295 if(tpnt->sdev_attrs == NULL) 296 296 tpnt->sdev_attrs = NCR_700_dev_attrs; 297 297 298 - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, 298 + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, 299 299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); 300 300 if(memory == NULL) { 301 301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
+4 -4
drivers/scsi/aic94xx/aic94xx_init.c
··· 280 280 return snprintf(buf, PAGE_SIZE, "%s\n", 281 281 asd_dev_rev[asd_ha->revision_id]); 282 282 } 283 - static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); 283 + static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); 284 284 285 285 static ssize_t asd_show_dev_bios_build(struct device *dev, 286 286 struct device_attribute *attr,char *buf) ··· 477 477 { 478 478 int err; 479 479 480 - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); 480 + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); 481 481 if (err) 482 482 return err; 483 483 ··· 499 499 err_biosb: 500 500 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 501 501 err_rev: 502 - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 502 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); 503 503 return err; 504 504 } 505 505 506 506 static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) 507 507 { 508 - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 508 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); 509 509 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 510 510 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 511 511 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
+2 -2
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 240 240 return NULL; 241 241 } 242 242 243 + cmgr->hba = hba; 243 244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), 244 245 GFP_KERNEL); 245 246 if (!cmgr->free_list) { ··· 257 256 goto mem_err; 258 257 } 259 258 260 - cmgr->hba = hba; 261 259 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 262 260 263 261 for (i = 0; i < arr_sz; i++) { ··· 295 295 296 296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 297 297 mem_size = num_ios * sizeof(struct io_bdt *); 298 - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); 299 299 if (!cmgr->io_bdt_pool) { 300 300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 301 301 goto mem_err;
+2
drivers/scsi/cxlflash/main.c
··· 3687 3687 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 3688 3688 3689 3689 cfg = shost_priv(host); 3690 + cfg->state = STATE_PROBING; 3690 3691 cfg->host = host; 3691 3692 rc = alloc_mem(cfg); 3692 3693 if (rc) { ··· 3776 3775 return rc; 3777 3776 3778 3777 out_remove: 3778 + cfg->state = STATE_PROBED; 3779 3779 cxlflash_remove(pdev); 3780 3780 goto out; 3781 3781 }
+3 -3
drivers/scsi/libfc/fc_lport.c
··· 1726 1726 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1727 1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1728 1728 fc_lport_error(lport, fp); 1729 - goto err; 1729 + goto out; 1730 1730 } 1731 1731 1732 1732 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1733 1733 if (!flp) { 1734 1734 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1735 1735 fc_lport_error(lport, fp); 1736 - goto err; 1736 + goto out; 1737 1737 } 1738 1738 1739 1739 mfs = ntohs(flp->fl_csp.sp_bb_data) & ··· 1743 1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1744 1744 "lport->mfs:%hu\n", mfs, lport->mfs); 1745 1745 fc_lport_error(lport, fp); 1746 - goto err; 1746 + goto out; 1747 1747 } 1748 1748 1749 1749 if (mfs <= lport->mfs) {
-1
drivers/scsi/libfc/fc_rport.c
··· 184 184 struct fc_rport_priv *rdata; 185 185 186 186 rdata = container_of(kref, struct fc_rport_priv, kref); 187 - WARN_ON(!list_empty(&rdata->peers)); 188 187 kfree_rcu(rdata, rcu); 189 188 } 190 189 EXPORT_SYMBOL(fc_rport_destroy);
+21 -20
drivers/scsi/scsi_debug.c
··· 62 62 63 63 /* make sure inq_product_rev string corresponds to this version */ 64 64 #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ 65 - static const char *sdebug_version_date = "20180128"; 65 + static const char *sdebug_version_date = "20190125"; 66 66 67 67 #define MY_NAME "scsi_debug" 68 68 ··· 735 735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 736 736 } 737 737 738 - static void *fake_store(unsigned long long lba) 738 + static void *lba2fake_store(unsigned long long lba) 739 739 { 740 740 lba = do_div(lba, sdebug_store_sectors); 741 741 ··· 2514 2514 return ret; 2515 2515 } 2516 2516 2517 - /* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2518 - * arr into fake_store(lba,num) and return true. If comparison fails then 2517 + /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of 2518 + * arr into lba2fake_store(lba,num) and return true. If comparison fails then 2519 2519 * return false. */ 2520 2520 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2521 2521 { ··· 2643 2643 if (sdt->app_tag == cpu_to_be16(0xffff)) 2644 2644 continue; 2645 2645 2646 - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 2646 + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); 2647 2647 if (ret) { 2648 2648 dif_errors++; 2649 2649 return ret; ··· 3261 3261 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3262 3262 u32 ei_lba, bool unmap, bool ndob) 3263 3263 { 3264 + int ret; 3264 3265 unsigned long iflags; 3265 3266 unsigned long long i; 3266 - int ret; 3267 - u64 lba_off; 3267 + u32 lb_size = sdebug_sector_size; 3268 + u64 block, lbaa; 3269 + u8 *fs1p; 3268 3270 3269 3271 ret = check_device_access_params(scp, lba, num); 3270 3272 if (ret) ··· 3278 3276 unmap_region(lba, num); 3279 3277 goto out; 3280 3278 } 3281 - 3282 - lba_off = lba * sdebug_sector_size; 3279 + lbaa = lba; 3280 + block = do_div(lbaa, sdebug_store_sectors); 3283 3281 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3282 + fs1p = fake_storep + (block * lb_size); 3284 3283 if (ndob) { 3285 - memset(fake_storep + lba_off, 0, sdebug_sector_size); 3284 + memset(fs1p, 0, lb_size); 3286 3285 ret = 0; 3287 3286 } else 3288 - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3289 - sdebug_sector_size); 3287 + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); 3290 3288 3291 3289 if (-1 == ret) { 3292 3290 write_unlock_irqrestore(&atomic_rw, iflags); 3293 3291 return DID_ERROR << 16; 3294 - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) 3292 + } else if (sdebug_verbose && !ndob && (ret < lb_size)) 3295 3293 sdev_printk(KERN_INFO, scp->device, 3296 3294 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3297 - my_name, "write same", 3298 - sdebug_sector_size, ret); 3295 + my_name, "write same", lb_size, ret); 3299 3296 3300 3297 /* Copy first sector to remaining blocks */ 3301 - for (i = 1 ; i < num ; i++) 3302 - memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3303 - fake_storep + lba_off, 3304 - sdebug_sector_size); 3305 - 3298 + for (i = 1 ; i < num ; i++) { 3299 + lbaa = lba + i; 3300 + block = do_div(lbaa, sdebug_store_sectors); 3301 + memmove(fake_storep + (block * lb_size), fs1p, lb_size); 3302 + } 3306 3303 if (scsi_debug_lbp()) 3307 3304 map_region(lba, num); 3308 3305 out:
+8 -4
drivers/scsi/sd_zbc.c
··· 462 462 sdkp->device->use_10_for_rw = 0; 463 463 464 464 /* 465 - * If something changed, revalidate the disk zone bitmaps once we have 466 - * the capacity, that is on the second revalidate execution during disk 467 - * scan and always during normal revalidate. 465 + * Revalidate the disk zone bitmaps once the block device capacity is 466 + * set on the second revalidate execution during disk scan and if 467 + * something changed when executing a normal revalidate. 468 468 */ 469 - if (sdkp->first_scan) 469 + if (sdkp->first_scan) { 470 + sdkp->zone_blocks = zone_blocks; 471 + sdkp->nr_zones = nr_zones; 470 472 return 0; 473 + } 474 + 471 475 if (sdkp->zone_blocks != zone_blocks || 472 476 sdkp->nr_zones != nr_zones || 473 477 disk->queue->nr_zones != nr_zones) {
+5 -4
drivers/soc/fsl/qbman/qman.c
··· 1143 1143 static irqreturn_t portal_isr(int irq, void *ptr) 1144 1144 { 1145 1145 struct qman_portal *p = ptr; 1146 - 1147 - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; 1148 1146 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; 1147 + u32 clear = 0; 1149 1148 1150 1149 if (unlikely(!is)) 1151 1150 return IRQ_NONE; 1152 1151 1153 1152 /* DQRR-handling if it's interrupt-driven */ 1154 - if (is & QM_PIRQ_DQRI) 1153 + if (is & QM_PIRQ_DQRI) { 1155 1154 __poll_portal_fast(p, QMAN_POLL_LIMIT); 1155 + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; 1156 + } 1156 1157 /* Handling of anything else that's interrupt-driven */ 1157 - clear |= __poll_portal_slow(p, is); 1158 + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; 1158 1159 qm_out(&p->p, QM_REG_ISR, clear); 1159 1160 return IRQ_HANDLED; 1160 1161 }
+1 -1
drivers/staging/octeon/ethernet-mdio.c
··· 170 170 return -ENODEV; 171 171 172 172 priv->last_link = 0; 173 - phy_start_aneg(phydev); 173 + phy_start(phydev); 174 174 175 175 return 0; 176 176 no_phy:
+4 -2
drivers/staging/speakup/spk_ttyio.c
··· 265 265 return; 266 266 } 267 267 268 - speakup_tty->ops->send_xchar(speakup_tty, ch); 268 + if (speakup_tty->ops->send_xchar) 269 + speakup_tty->ops->send_xchar(speakup_tty, ch); 269 270 mutex_unlock(&speakup_tty_mutex); 270 271 } 271 272 ··· 278 277 return; 279 278 } 280 279 281 - speakup_tty->ops->tiocmset(speakup_tty, set, clear); 280 + if (speakup_tty->ops->tiocmset) 281 + speakup_tty->ops->tiocmset(speakup_tty, set, clear); 282 282 mutex_unlock(&speakup_tty_mutex); 283 283 } 284 284
+7 -1
drivers/target/target_core_configfs.c
··· 852 852 return count; 853 853 } 854 854 855 + /* always zero, but attr needs to remain RW to avoid userspace breakage */ 856 + static ssize_t pi_prot_format_show(struct config_item *item, char *page) 857 + { 858 + return snprintf(page, PAGE_SIZE, "0\n"); 859 + } 860 + 855 861 static ssize_t pi_prot_format_store(struct config_item *item, 856 862 const char *page, size_t count) 857 863 { ··· 1138 1132 CONFIGFS_ATTR(, emulate_pr); 1139 1133 CONFIGFS_ATTR(, pi_prot_type); 1140 1134 CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1141 - CONFIGFS_ATTR_WO(, pi_prot_format); 1135 + CONFIGFS_ATTR(, pi_prot_format); 1142 1136 CONFIGFS_ATTR(, pi_prot_verify); 1143 1137 CONFIGFS_ATTR(, enforce_pr_isids); 1144 1138 CONFIGFS_ATTR(, is_nonrot);
+3
drivers/tty/serial/8250/8250_mtk.c
··· 357 357 if (dmacnt == 2) { 358 358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), 359 359 GFP_KERNEL); 360 + if (!data->dma) 361 + return -ENOMEM; 362 + 360 363 data->dma->fn = mtk8250_dma_filter; 361 364 data->dma->rx_size = MTK_UART_RX_SIZE; 362 365 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
+5 -4
drivers/tty/serial/8250/8250_pci.c
··· 3420 3420 serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) 3421 3421 { 3422 3422 int num_iomem, num_port, first_port = -1, i; 3423 + int rc; 3424 + 3425 + rc = serial_pci_is_class_communication(dev); 3426 + if (rc) 3427 + return rc; 3423 3428 3424 3429 /* 3425 3430 * Should we try to make guesses for multiport serial devices later? ··· 3651 3646 } 3652 3647 3653 3648 board = &pci_boards[ent->driver_data]; 3654 - 3655 - rc = serial_pci_is_class_communication(dev); 3656 - if (rc) 3657 - return rc; 3658 3649 3659 3650 rc = serial_pci_is_blacklisted(dev); 3660 3651 if (rc)
+8 -5
drivers/tty/serial/earlycon-riscv-sbi.c
··· 10 10 #include <linux/serial_core.h> 11 11 #include <asm/sbi.h> 12 12 13 - static void sbi_console_write(struct console *con, 14 - const char *s, unsigned int n) 13 + static void sbi_putc(struct uart_port *port, int c) 15 14 { 16 - int i; 15 + sbi_console_putchar(c); 16 + } 17 17 18 - for (i = 0; i < n; ++i) 19 - sbi_console_putchar(s[i]); 18 + static void sbi_console_write(struct console *con, 19 + const char *s, unsigned n) 20 + { 21 + struct earlycon_device *dev = con->data; 22 + uart_console_write(&dev->port, s, n, sbi_putc); 20 23 } 21 24 22 25 static int __init early_sbi_setup(struct earlycon_device *device,
+6
drivers/tty/serial/serial_core.c
··· 130 130 struct uart_port *port; 131 131 unsigned long flags; 132 132 133 + if (!state) 134 + return; 135 + 133 136 port = uart_port_lock(state, flags); 134 137 __uart_start(tty); 135 138 uart_port_unlock(port, flags); ··· 729 726 struct uart_state *state = tty->driver_data; 730 727 upstat_t mask = UPSTAT_SYNC_FIFO; 731 728 struct uart_port *port; 729 + 730 + if (!state) 731 + return; 732 732 733 733 port = uart_port_ref(state); 734 734 if (!port)
+8 -1
drivers/tty/serial/sh-sci.c
··· 1921 1921 1922 1922 static void sci_free_irq(struct sci_port *port) 1923 1923 { 1924 - int i; 1924 + int i, j; 1925 1925 1926 1926 /* 1927 1927 * Intentionally in reverse order so we iterate over the muxed ··· 1935 1935 * interrupt sources. 1936 1936 */ 1937 1937 if (unlikely(irq < 0)) 1938 + continue; 1939 + 1940 + /* Check if already freed (irq was muxed) */ 1941 + for (j = 0; j < i; j++) 1942 + if (port->irqs[j] == irq) 1943 + j = i + 1; 1944 + if (j > i) 1938 1945 continue; 1939 1946 1940 1947 free_irq(port->irqs[i], port);
+2 -2
drivers/usb/dwc3/dwc3-exynos.c
··· 78 78 for (i = 0; i < exynos->num_clks; i++) { 79 79 ret = clk_prepare_enable(exynos->clks[i]); 80 80 if (ret) { 81 - while (--i > 0) 81 + while (i-- > 0) 82 82 clk_disable_unprepare(exynos->clks[i]); 83 83 return ret; 84 84 } ··· 223 223 for (i = 0; i < exynos->num_clks; i++) { 224 224 ret = clk_prepare_enable(exynos->clks[i]); 225 225 if (ret) { 226 - while (--i > 0) 226 + while (i-- > 0) 227 227 clk_disable_unprepare(exynos->clks[i]); 228 228 return ret; 229 229 }
+1 -1
drivers/usb/dwc3/gadget.c
··· 1119 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1120 1120 unsigned int rem = length % maxp; 1121 1121 1122 - if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1122 + if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { 1123 1123 struct dwc3 *dwc = dep->dwc; 1124 1124 struct dwc3_trb *trb; 1125 1125
+1 -1
drivers/usb/gadget/udc/net2272.c
··· 2083 2083 #if defined(PLX_PCI_RDK2) 2084 2084 /* see if PCI int for us by checking irqstat */ 2085 2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2086 - if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2086 + if (!(intcsr & (1 << NET2272_PCI_IRQ))) { 2087 2087 spin_unlock(&dev->lock); 2088 2088 return IRQ_NONE; 2089 2089 }
+1 -12
drivers/usb/musb/musb_gadget.c
··· 452 452 } 453 453 454 454 if (request) { 455 - u8 is_dma = 0; 456 - bool short_packet = false; 457 455 458 456 trace_musb_req_tx(req); 459 457 460 458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 461 - is_dma = 1; 462 459 csr |= MUSB_TXCSR_P_WZC_BITS; 463 460 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 464 461 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); ··· 473 476 */ 474 477 if ((request->zero && request->length) 475 478 && (request->length % musb_ep->packet_sz == 0) 476 - && (request->actual == request->length)) 477 - short_packet = true; 479 + && (request->actual == request->length)) { 478 480 479 - if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && 480 - (is_dma && (!dma->desired_mode || 481 - (request->actual & 482 - (musb_ep->packet_sz - 1))))) 483 - short_packet = true; 484 - 485 - if (short_packet) { 486 481 /* 487 482 * On DMA completion, FIFO may not be 488 483 * available yet...
+11 -10
drivers/usb/musb/musbhsdma.c
··· 346 346 channel->status = MUSB_DMA_STATUS_FREE; 347 347 348 348 /* completed */ 349 - if ((devctl & MUSB_DEVCTL_HM) 350 - && (musb_channel->transmit) 351 - && ((channel->desired_mode == 0) 352 - || (channel->actual_len & 353 - (musb_channel->max_packet_sz - 1))) 354 - ) { 349 + if (musb_channel->transmit && 350 + (!channel->desired_mode || 351 + (channel->actual_len % 352 + musb_channel->max_packet_sz))) { 355 353 u8 epnum = musb_channel->epnum; 356 354 int offset = musb->io.ep_offset(epnum, 357 355 MUSB_TXCSR); ··· 361 363 */ 362 364 musb_ep_select(mbase, epnum); 363 365 txcsr = musb_readw(mbase, offset); 364 - txcsr &= ~(MUSB_TXCSR_DMAENAB 366 + if (channel->desired_mode == 1) { 367 + txcsr &= ~(MUSB_TXCSR_DMAENAB 365 368 | MUSB_TXCSR_AUTOSET); 366 - musb_writew(mbase, offset, txcsr); 367 - /* Send out the packet */ 368 - txcsr &= ~MUSB_TXCSR_DMAMODE; 369 + musb_writew(mbase, offset, txcsr); 370 + /* Send out the packet */ 371 + txcsr &= ~MUSB_TXCSR_DMAMODE; 372 + txcsr |= MUSB_TXCSR_DMAENAB; 373 + } 369 374 txcsr |= MUSB_TXCSR_TXPKTRDY; 370 375 musb_writew(mbase, offset, txcsr); 371 376 }
+1 -1
drivers/usb/phy/Kconfig
··· 21 21 22 22 config FSL_USB2_OTG 23 23 bool "Freescale USB OTG Transceiver Driver" 24 - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 24 + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM 25 25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' 26 26 select USB_PHY 27 27 help
+1 -4
drivers/usb/phy/phy-am335x.c
··· 61 61 if (ret) 62 62 return ret; 63 63 64 - ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); 65 - if (ret) 66 - return ret; 67 64 am_phy->usb_phy_gen.phy.init = am335x_init; 68 65 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; 69 66 ··· 79 82 device_set_wakeup_enable(dev, false); 80 83 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); 81 84 82 - return 0; 85 + return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); 83 86 } 84 87 85 88 static int am335x_phy_remove(struct platform_device *pdev)
+2 -1
drivers/usb/typec/tcpm/tcpm.c
··· 2297 2297 pdo_pps_apdo_max_voltage(snk)); 2298 2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk); 2299 2299 port->pps_data.out_volt = min(port->pps_data.max_volt, 2300 - port->pps_data.out_volt); 2300 + max(port->pps_data.min_volt, 2301 + port->pps_data.out_volt)); 2301 2302 port->pps_data.op_curr = min(port->pps_data.max_curr, 2302 2303 port->pps_data.op_curr); 2303 2304 }
+2 -1
drivers/vhost/net.c
··· 1337 1337 n->vqs[i].rx_ring = NULL; 1338 1338 vhost_net_buf_init(&n->vqs[i].rxq); 1339 1339 } 1340 - vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 1340 + vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, 1341 + UIO_MAXIOV + VHOST_NET_BATCH); 1341 1342 1342 1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1343 1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
+1 -1
drivers/vhost/scsi.c
··· 1627 1627 vqs[i] = &vs->vqs[i].vq; 1628 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1629 1629 } 1630 - vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1630 + vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); 1631 1631 1632 1632 vhost_scsi_init_inflight(vs, NULL); 1633 1633
+4 -3
drivers/vhost/vhost.c
··· 390 390 vq->indirect = kmalloc_array(UIO_MAXIOV, 391 391 sizeof(*vq->indirect), 392 392 GFP_KERNEL); 393 - vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), 393 + vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), 394 394 GFP_KERNEL); 395 - vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), 395 + vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), 396 396 GFP_KERNEL); 397 397 if (!vq->indirect || !vq->log || !vq->heads) 398 398 goto err_nomem; ··· 414 414 } 415 415 416 416 void vhost_dev_init(struct vhost_dev *dev, 417 - struct vhost_virtqueue **vqs, int nvqs) 417 + struct vhost_virtqueue **vqs, int nvqs, int iov_limit) 418 418 { 419 419 struct vhost_virtqueue *vq; 420 420 int i; ··· 427 427 dev->iotlb = NULL; 428 428 dev->mm = NULL; 429 429 dev->worker = NULL; 430 + dev->iov_limit = iov_limit; 430 431 init_llist_head(&dev->work_list); 431 432 init_waitqueue_head(&dev->wait); 432 433 INIT_LIST_HEAD(&dev->read_list);
+3 -1
drivers/vhost/vhost.h
··· 170 170 struct list_head read_list; 171 171 struct list_head pending_list; 172 172 wait_queue_head_t wait; 173 + int iov_limit; 173 174 }; 174 175 175 - void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 176 + void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, 177 + int nvqs, int iov_limit); 176 178 long vhost_dev_set_owner(struct vhost_dev *dev); 177 179 bool vhost_dev_has_owner(struct vhost_dev *dev); 178 180 long vhost_dev_check_owner(struct vhost_dev *);
+1 -1
drivers/vhost/vsock.c
··· 531 531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 532 532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 533 533 534 - vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); 534 + vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV); 535 535 536 536 file->private_data = vsock; 537 537 spin_lock_init(&vsock->send_pkt_list_lock);
+14 -1
drivers/virtio/virtio_ring.c
··· 152 152 /* Available for packed ring */ 153 153 struct { 154 154 /* Actual memory layout for this queue. */ 155 - struct vring_packed vring; 155 + struct { 156 + unsigned int num; 157 + struct vring_packed_desc *desc; 158 + struct vring_packed_desc_event *driver; 159 + struct vring_packed_desc_event *device; 160 + } vring; 156 161 157 162 /* Driver ring wrap counter. */ 158 163 bool avail_wrap_counter; ··· 1614 1609 !context; 1615 1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1616 1611 1612 + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 1613 + vq->weak_barriers = false; 1614 + 1617 1615 vq->packed.ring_dma_addr = ring_dma_addr; 1618 1616 vq->packed.driver_event_dma_addr = driver_event_dma_addr; 1619 1617 vq->packed.device_event_dma_addr = device_event_dma_addr; ··· 2087 2079 !context; 2088 2080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 2089 2081 2082 + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 2083 + vq->weak_barriers = false; 2084 + 2090 2085 vq->split.queue_dma_addr = 0; 2091 2086 vq->split.queue_size_in_bytes = 0; 2092 2087 ··· 2223 2212 case VIRTIO_F_IOMMU_PLATFORM: 2224 2213 break; 2225 2214 case VIRTIO_F_RING_PACKED: 2215 + break; 2216 + case VIRTIO_F_ORDER_PLATFORM: 2226 2217 break; 2227 2218 default: 2228 2219 /* We don't understand this bit. */
+1
fs/aio.c
··· 1436 1436 if (unlikely(!req->ki_filp)) 1437 1437 return -EBADF; 1438 1438 req->ki_complete = aio_complete_rw; 1439 + req->private = NULL; 1439 1440 req->ki_pos = iocb->aio_offset; 1440 1441 req->ki_flags = iocb_flags(req->ki_filp); 1441 1442 if (iocb->aio_flags & IOCB_FLAG_RESFD)
+2 -1
fs/autofs/expire.c
··· 596 596 pkt.len = dentry->d_name.len; 597 597 memcpy(pkt.name, dentry->d_name.name, pkt.len); 598 598 pkt.name[pkt.len] = '\0'; 599 - dput(dentry); 600 599 601 600 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) 602 601 ret = -EFAULT; ··· 607 608 ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); 608 609 complete_all(&ino->expire_complete); 609 610 spin_unlock(&sbi->fs_lock); 611 + 612 + dput(dentry); 610 613 611 614 return ret; 612 615 }
+3 -1
fs/autofs/inode.c
··· 266 266 } 267 267 root_inode = autofs_get_inode(s, S_IFDIR | 0755); 268 268 root = d_make_root(root_inode); 269 - if (!root) 269 + if (!root) { 270 + ret = -ENOMEM; 270 271 goto fail_ino; 272 + } 271 273 pipe = NULL; 272 274 273 275 root->d_fsdata = ino;
+50 -28
fs/btrfs/ctree.c
··· 968 968 return 0; 969 969 } 970 970 971 + static struct extent_buffer *alloc_tree_block_no_bg_flush( 972 + struct btrfs_trans_handle *trans, 973 + struct btrfs_root *root, 974 + u64 parent_start, 975 + const struct btrfs_disk_key *disk_key, 976 + int level, 977 + u64 hint, 978 + u64 empty_size) 979 + { 980 + struct btrfs_fs_info *fs_info = root->fs_info; 981 + struct extent_buffer *ret; 982 + 983 + /* 984 + * If we are COWing a node/leaf from the extent, chunk, device or free 985 + * space trees, make sure that we do not finish block group creation of 986 + * pending block groups. We do this to avoid a deadlock. 987 + * COWing can result in allocation of a new chunk, and flushing pending 988 + * block groups (btrfs_create_pending_block_groups()) can be triggered 989 + * when finishing allocation of a new chunk. Creation of a pending block 990 + * group modifies the extent, chunk, device and free space trees, 991 + * therefore we could deadlock with ourselves since we are holding a 992 + * lock on an extent buffer that btrfs_create_pending_block_groups() may 993 + * try to COW later. 994 + * For similar reasons, we also need to delay flushing pending block 995 + * groups when splitting a leaf or node, from one of those trees, since 996 + * we are holding a write lock on it and its parent or when inserting a 997 + * new root node for one of those trees. 998 + */ 999 + if (root == fs_info->extent_root || 1000 + root == fs_info->chunk_root || 1001 + root == fs_info->dev_root || 1002 + root == fs_info->free_space_root) 1003 + trans->can_flush_pending_bgs = false; 1004 + 1005 + ret = btrfs_alloc_tree_block(trans, root, parent_start, 1006 + root->root_key.objectid, disk_key, level, 1007 + hint, empty_size); 1008 + trans->can_flush_pending_bgs = true; 1009 + 1010 + return ret; 1011 + } 1012 + 971 1013 /* 972 1014 * does the dirty work in cow of a single block. The parent block (if 973 1015 * supplied) is updated to point to the new cow copy. The new buffer is marked ··· 1057 1015 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1058 1016 parent_start = parent->start; 1059 1017 1060 - /* 1061 - * If we are COWing a node/leaf from the extent, chunk, device or free 1062 - * space trees, make sure that we do not finish block group creation of 1063 - * pending block groups. We do this to avoid a deadlock. 1064 - * COWing can result in allocation of a new chunk, and flushing pending 1065 - * block groups (btrfs_create_pending_block_groups()) can be triggered 1066 - * when finishing allocation of a new chunk. Creation of a pending block 1067 - * group modifies the extent, chunk, device and free space trees, 1068 - * therefore we could deadlock with ourselves since we are holding a 1069 - * lock on an extent buffer that btrfs_create_pending_block_groups() may 1070 - * try to COW later. 1071 - */ 1072 - if (root == fs_info->extent_root || 1073 - root == fs_info->chunk_root || 1074 - root == fs_info->dev_root || 1075 - root == fs_info->free_space_root) 1076 - trans->can_flush_pending_bgs = false; 1077 - 1078 - cow = btrfs_alloc_tree_block(trans, root, parent_start, 1079 - root->root_key.objectid, &disk_key, level, 1080 - search_start, empty_size); 1081 - trans->can_flush_pending_bgs = true; 1018 + cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, 1019 + level, search_start, empty_size); 1082 1020 if (IS_ERR(cow)) 1083 1021 return PTR_ERR(cow); 1084 1022 ··· 3367 3345 else 3368 3346 btrfs_node_key(lower, &lower_key, 0); 3369 3347 3370 - c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3371 - &lower_key, level, root->node->start, 0); 3348 + c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, 3349 + root->node->start, 0); 3372 3350 if (IS_ERR(c)) 3373 3351 return PTR_ERR(c); 3374 3352 ··· 3497 3475 mid = (c_nritems + 1) / 2; 3498 3476 btrfs_node_key(c, &disk_key, mid); 3499 3477 3500 - split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3501 - &disk_key, level, c->start, 0); 3478 + split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, 3479 + c->start, 0); 3502 3480 if (IS_ERR(split)) 3503 3481 return PTR_ERR(split); 3504 3482 ··· 4282 4260 else 4283 4261 btrfs_item_key(l, &disk_key, mid); 4284 4262 4285 - right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4286 - &disk_key, 0, l->start, 0); 4263 + right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, 4264 + l->start, 0); 4287 4265 if (IS_ERR(right)) 4288 4266 return PTR_ERR(right); 4289 4267
+3
fs/btrfs/super.c
··· 1621 1621 flags | SB_RDONLY, device_name, data); 1622 1622 if (IS_ERR(mnt_root)) { 1623 1623 root = ERR_CAST(mnt_root); 1624 + kfree(subvol_name); 1624 1625 goto out; 1625 1626 } 1626 1627 ··· 1631 1630 if (error < 0) { 1632 1631 root = ERR_PTR(error); 1633 1632 mntput(mnt_root); 1633 + kfree(subvol_name); 1634 1634 goto out; 1635 1635 } 1636 1636 } 1637 1637 } 1638 1638 if (IS_ERR(mnt_root)) { 1639 1639 root = ERR_CAST(mnt_root); 1640 + kfree(subvol_name); 1640 1641 goto out; 1641 1642 } 1642 1643
+16 -8
fs/btrfs/transaction.c
··· 850 850 851 851 btrfs_trans_release_chunk_metadata(trans); 852 852 853 - if (lock && should_end_transaction(trans) && 854 - READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { 855 - spin_lock(&info->trans_lock); 856 - if (cur_trans->state == TRANS_STATE_RUNNING) 857 - cur_trans->state = TRANS_STATE_BLOCKED; 858 - spin_unlock(&info->trans_lock); 859 - } 860 - 861 853 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 862 854 if (throttle) 863 855 return btrfs_commit_transaction(trans); ··· 1871 1879 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1872 1880 } 1873 1881 1882 + /* 1883 + * Release reserved delayed ref space of all pending block groups of the 1884 + * transaction and remove them from the list 1885 + */ 1886 + static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) 1887 + { 1888 + struct btrfs_fs_info *fs_info = trans->fs_info; 1889 + struct btrfs_block_group_cache *block_group, *tmp; 1890 + 1891 + list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 1892 + btrfs_delayed_refs_rsv_release(fs_info, 1); 1893 + list_del_init(&block_group->bg_list); 1894 + } 1895 + } 1896 + 1874 1897 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1875 1898 { 1876 1899 /* ··· 2277 2270 btrfs_scrub_continue(fs_info); 2278 2271 cleanup_transaction: 2279 2272 btrfs_trans_release_metadata(trans); 2273 + btrfs_cleanup_pending_block_groups(trans); 2280 2274 btrfs_trans_release_chunk_metadata(trans); 2281 2275 trans->block_rsv = NULL; 2282 2276 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
+2 -2
fs/btrfs/volumes.c
··· 957 957 else 958 958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 959 959 960 - fs_devices->fsid_change = fsid_change_in_progress; 961 - 962 960 if (IS_ERR(fs_devices)) 963 961 return ERR_CAST(fs_devices); 962 + 963 + fs_devices->fsid_change = fsid_change_in_progress; 964 964 965 965 mutex_lock(&fs_devices->device_list_mutex); 966 966 list_add(&fs_devices->fs_list, &fs_uuids);
+10 -9
fs/buffer.c
··· 200 200 struct buffer_head *head; 201 201 struct page *page; 202 202 int all_mapped = 1; 203 + static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 203 204 204 205 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 205 206 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); ··· 228 227 * file io on the block device and getblk. It gets dealt with 229 228 * elsewhere, don't buffer_error if we had some unmapped buffers 230 229 */ 231 - if (all_mapped) { 232 - printk("__find_get_block_slow() failed. " 233 - "block=%llu, b_blocknr=%llu\n", 234 - (unsigned long long)block, 235 - (unsigned long long)bh->b_blocknr); 236 - printk("b_state=0x%08lx, b_size=%zu\n", 237 - bh->b_state, bh->b_size); 238 - printk("device %pg blocksize: %d\n", bdev, 239 - 1 << bd_inode->i_blkbits); 230 + ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 231 + if (all_mapped && __ratelimit(&last_warned)) { 232 + printk("__find_get_block_slow() failed. block=%llu, " 233 + "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 234 + "device %pg blocksize: %d\n", 235 + (unsigned long long)block, 236 + (unsigned long long)bh->b_blocknr, 237 + bh->b_state, bh->b_size, bdev, 238 + 1 << bd_inode->i_blkbits); 240 239 } 241 240 out_unlock: 242 241 spin_unlock(&bd_mapping->private_lock);
+1 -1
fs/cifs/cifsfs.h
··· 150 150 extern const struct export_operations cifs_export_ops; 151 151 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 152 152 153 - #define CIFS_VERSION "2.16" 153 + #define CIFS_VERSION "2.17" 154 154 #endif /* _CIFSFS_H */
+8 -3
fs/cifs/file.c
··· 2696 2696 2697 2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); 2698 2698 if (rc) { 2699 + kvfree(wdata->pages); 2699 2700 kfree(wdata); 2700 2701 add_credits_and_wake_if(server, credits, 0); 2701 2702 break; ··· 2708 2707 if (rc) { 2709 2708 for (i = 0; i < nr_pages; i++) 2710 2709 put_page(wdata->pages[i]); 2710 + kvfree(wdata->pages); 2711 2711 kfree(wdata); 2712 2712 add_credits_and_wake_if(server, credits, 0); 2713 2713 break; ··· 3388 3386 } 3389 3387 3390 3388 rc = cifs_read_allocate_pages(rdata, npages); 3391 - if (rc) 3392 - goto error; 3389 + if (rc) { 3390 + kvfree(rdata->pages); 3391 + kfree(rdata); 3392 + add_credits_and_wake_if(server, credits, 0); 3393 + break; 3394 + } 3393 3395 3394 3396 rdata->tailsz = PAGE_SIZE; 3395 3397 } ··· 3413 3407 if (!rdata->cfile->invalidHandle || 3414 3408 !(rc = cifs_reopen_file(rdata->cfile, true))) 3415 3409 rc = server->ops->async_readv(rdata); 3416 - error: 3417 3410 if (rc) { 3418 3411 add_credits_and_wake_if(server, rdata->credits, 0); 3419 3412 kref_put(&rdata->refcount,
+3 -1
fs/cifs/smb2ops.c
··· 866 866 FILE_READ_EA, 867 867 FILE_FULL_EA_INFORMATION, 868 868 SMB2_O_INFO_FILE, 869 - SMB2_MAX_EA_BUF, 869 + CIFSMaxBufSize - 870 + MAX_SMB2_CREATE_RESPONSE_SIZE - 871 + MAX_SMB2_CLOSE_RESPONSE_SIZE, 870 872 &rsp_iov, &buftype, cifs_sb); 871 873 if (rc) { 872 874 /*
+34 -20
fs/cifs/smb2pdu.c
··· 3241 3241 rdata->mr = NULL; 3242 3242 } 3243 3243 #endif 3244 - if (rdata->result) 3244 + if (rdata->result && rdata->result != -ENODATA) { 3245 3245 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 3246 + trace_smb3_read_err(0 /* xid */, 3247 + rdata->cfile->fid.persistent_fid, 3248 + tcon->tid, tcon->ses->Suid, rdata->offset, 3249 + rdata->bytes, rdata->result); 3250 + } else 3251 + trace_smb3_read_done(0 /* xid */, 3252 + rdata->cfile->fid.persistent_fid, 3253 + tcon->tid, tcon->ses->Suid, 3254 + rdata->offset, rdata->got_bytes); 3246 3255 3247 3256 queue_work(cifsiod_wq, &rdata->work); 3248 3257 DeleteMidQEntry(mid); ··· 3326 3317 if (rc) { 3327 3318 kref_put(&rdata->refcount, cifs_readdata_release); 3328 3319 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 3329 - trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, 3330 - io_parms.tcon->tid, io_parms.tcon->ses->Suid, 3331 - io_parms.offset, io_parms.length); 3332 - } else 3333 - trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, 3334 - io_parms.tcon->tid, io_parms.tcon->ses->Suid, 3335 - io_parms.offset, io_parms.length); 3320 + trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, 3321 + io_parms.tcon->tid, 3322 + io_parms.tcon->ses->Suid, 3323 + io_parms.offset, io_parms.length, rc); 3324 + } 3336 3325 3337 3326 cifs_small_buf_release(buf); 3338 3327 return rc; ··· 3374 3367 if (rc != -ENODATA) { 3375 3368 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 3376 3369 cifs_dbg(VFS, "Send error in read = %d\n", rc); 3370 + trace_smb3_read_err(xid, req->PersistentFileId, 3371 + io_parms->tcon->tid, ses->Suid, 3372 + io_parms->offset, io_parms->length, 3373 + rc); 3377 3374 } 3378 - trace_smb3_read_err(rc, xid, req->PersistentFileId, 3379 - io_parms->tcon->tid, ses->Suid, 3380 - io_parms->offset, io_parms->length); 3381 3375 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3382 3376 return rc == -ENODATA ? 0 : rc; 3383 3377 } else ··· 3467 3459 wdata->mr = NULL; 3468 3460 } 3469 3461 #endif 3470 - if (wdata->result) 3462 + if (wdata->result) { 3471 3463 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3464 + trace_smb3_write_err(0 /* no xid */, 3465 + wdata->cfile->fid.persistent_fid, 3466 + tcon->tid, tcon->ses->Suid, wdata->offset, 3467 + wdata->bytes, wdata->result); 3468 + } else 3469 + trace_smb3_write_done(0 /* no xid */, 3470 + wdata->cfile->fid.persistent_fid, 3471 + tcon->tid, tcon->ses->Suid, 3472 + wdata->offset, wdata->bytes); 3472 3473 3473 3474 queue_work(cifsiod_wq, &wdata->work); 3474 3475 DeleteMidQEntry(mid); ··· 3619 3602 wdata->bytes, rc); 3620 3603 kref_put(&wdata->refcount, release); 3621 3604 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3622 - } else 3623 - trace_smb3_write_done(0 /* no xid */, req->PersistentFileId, 3624 - tcon->tid, tcon->ses->Suid, wdata->offset, 3625 - wdata->bytes); 3605 + } 3626 3606 3627 3607 async_writev_out: 3628 3608 cifs_small_buf_release(req); ··· 3845 3831 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { 3846 3832 srch_inf->endOfSearch = true; 3847 3833 rc = 0; 3848 - } 3849 - cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 3834 + } else 3835 + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 3850 3836 goto qdir_exit; 3851 3837 } 3852 3838 ··· 4441 4427 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); 4442 4428 cifs_small_buf_release(req); 4443 4429 4444 - please_key_low = (__u64 *)req->LeaseKey; 4445 - please_key_high = (__u64 *)(req->LeaseKey+8); 4430 + please_key_low = (__u64 *)lease_key; 4431 + please_key_high = (__u64 *)(lease_key+8); 4446 4432 if (rc) { 4447 4433 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 4448 4434 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+15 -4
fs/cifs/smb2pdu.h
··· 84 84 85 85 #define NUMBER_OF_SMB2_COMMANDS 0x0013 86 86 87 - /* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ 88 - #define MAX_SMB2_HDR_SIZE 0x00b0 87 + /* 52 transform hdr + 64 hdr + 88 create rsp */ 88 + #define SMB2_TRANSFORM_HEADER_SIZE 52 89 + #define MAX_SMB2_HDR_SIZE 204 89 90 90 91 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) 91 92 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) ··· 649 648 __u8 Buffer[0]; 650 649 } __packed; 651 650 651 + /* 652 + * Maximum size of a SMB2_CREATE response is 64 (smb2 header) + 653 + * 88 (fixed part of create response) + 520 (path) + 150 (contexts) + 654 + * 2 bytes of padding. 655 + */ 656 + #define MAX_SMB2_CREATE_RESPONSE_SIZE 824 657 + 652 658 struct smb2_create_rsp { 653 659 struct smb2_sync_hdr sync_hdr; 654 660 __le16 StructureSize; /* Must be 89 */ ··· 1003 995 __u64 PersistentFileId; /* opaque endianness */ 1004 996 __u64 VolatileFileId; /* opaque endianness */ 1005 997 } __packed; 998 + 999 + /* 1000 + * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data) 1001 + */ 1002 + #define MAX_SMB2_CLOSE_RESPONSE_SIZE 124 1006 1003 1007 1004 struct smb2_close_rsp { 1008 1005 struct smb2_sync_hdr sync_hdr; ··· 1410 1397 __le32 FileNameLength; 1411 1398 char FileName[0]; /* Name to be assigned to new link */ 1412 1399 } __packed; /* level 11 Set */ 1413 - 1414 - #define SMB2_MAX_EA_BUF 65536 1415 1400 1416 1401 struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1417 1402 __le32 next_entry_offset;
+33 -5
fs/dcache.c
··· 119 119 120 120 static DEFINE_PER_CPU(long, nr_dentry); 121 121 static DEFINE_PER_CPU(long, nr_dentry_unused); 122 + static DEFINE_PER_CPU(long, nr_dentry_negative); 122 123 123 124 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 124 125 ··· 153 152 return sum < 0 ? 0 : sum; 154 153 } 155 154 155 + static long get_nr_dentry_negative(void) 156 + { 157 + int i; 158 + long sum = 0; 159 + 160 + for_each_possible_cpu(i) 161 + sum += per_cpu(nr_dentry_negative, i); 162 + return sum < 0 ? 0 : sum; 163 + } 164 + 156 165 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 157 166 size_t *lenp, loff_t *ppos) 158 167 { 159 168 dentry_stat.nr_dentry = get_nr_dentry(); 160 169 dentry_stat.nr_unused = get_nr_dentry_unused(); 170 + dentry_stat.nr_negative = get_nr_dentry_negative(); 161 171 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 162 172 } 163 173 #endif ··· 329 317 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 330 318 WRITE_ONCE(dentry->d_flags, flags); 331 319 dentry->d_inode = NULL; 320 + if (dentry->d_flags & DCACHE_LRU_LIST) 321 + this_cpu_inc(nr_dentry_negative); 332 322 } 333 323 334 324 static void dentry_free(struct dentry *dentry) ··· 385 371 * The per-cpu "nr_dentry_unused" counters are updated with 386 372 * the DCACHE_LRU_LIST bit. 387 373 * 374 + * The per-cpu "nr_dentry_negative" counters are only updated 375 + * when deleted from or added to the per-superblock LRU list, not 376 + * from/to the shrink list. That is to avoid an unneeded dec/inc 377 + * pair when moving from LRU to shrink list in select_collect(). 378 + * 388 379 * These helper functions make sure we always follow the 389 380 * rules. d_lock must be held by the caller. 390 381 */ ··· 399 380 D_FLAG_VERIFY(dentry, 0); 400 381 dentry->d_flags |= DCACHE_LRU_LIST; 401 382 this_cpu_inc(nr_dentry_unused); 383 + if (d_is_negative(dentry)) 384 + this_cpu_inc(nr_dentry_negative); 402 385 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 403 386 } 404 387 ··· 409 388 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 410 389 dentry->d_flags &= ~DCACHE_LRU_LIST; 411 390 this_cpu_dec(nr_dentry_unused); 391 + if (d_is_negative(dentry)) 392 + this_cpu_dec(nr_dentry_negative); 412 393 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 413 394 } 414 395 ··· 441 418 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 442 419 dentry->d_flags &= ~DCACHE_LRU_LIST; 443 420 this_cpu_dec(nr_dentry_unused); 421 + if (d_is_negative(dentry)) 422 + this_cpu_dec(nr_dentry_negative); 444 423 list_lru_isolate(lru, &dentry->d_lru); 445 424 } 446 425 ··· 451 426 { 452 427 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 453 428 dentry->d_flags |= DCACHE_SHRINK_LIST; 429 + if (d_is_negative(dentry)) 430 + this_cpu_dec(nr_dentry_negative); 454 431 list_lru_isolate_move(lru, &dentry->d_lru, list); 455 432 } 456 433 ··· 1215 1188 */ 1216 1189 void shrink_dcache_sb(struct super_block *sb) 1217 1190 { 1218 - long freed; 1219 - 1220 1191 do { 1221 1192 LIST_HEAD(dispose); 1222 1193 1223 - freed = list_lru_walk(&sb->s_dentry_lru, 1194 + list_lru_walk(&sb->s_dentry_lru, 1224 1195 dentry_lru_isolate_shrink, &dispose, 1024); 1225 - 1226 - this_cpu_sub(nr_dentry_unused, freed); 1227 1196 shrink_dentry_list(&dispose); 1228 1197 } while (list_lru_count(&sb->s_dentry_lru) > 0); 1229 1198 } ··· 1843 1820 WARN_ON(d_in_lookup(dentry)); 1844 1821 1845 1822 spin_lock(&dentry->d_lock); 1823 + /* 1824 + * Decrement negative dentry count if it was in the LRU list. 1825 + */ 1826 + if (dentry->d_flags & DCACHE_LRU_LIST) 1827 + this_cpu_dec(nr_dentry_negative); 1846 1828 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1847 1829 raw_write_seqcount_begin(&dentry->d_seq); 1848 1830 __d_set_inode_and_type(dentry, inode, add_flags);
+24 -12
fs/debugfs/inode.c
··· 324 324 inode_unlock(d_inode(dentry->d_parent)); 325 325 dput(dentry); 326 326 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 327 - return NULL; 327 + return ERR_PTR(-ENOMEM); 328 328 } 329 329 330 330 static struct dentry *end_creating(struct dentry *dentry) ··· 347 347 dentry = start_creating(name, parent); 348 348 349 349 if (IS_ERR(dentry)) 350 - return NULL; 350 + return dentry; 351 351 352 352 inode = debugfs_get_inode(dentry->d_sb); 353 353 if (unlikely(!inode)) ··· 386 386 * This function will return a pointer to a dentry if it succeeds. This 387 387 * pointer must be passed to the debugfs_remove() function when the file is 388 388 * to be removed (no automatic cleanup happens if your module is unloaded, 389 - * you are responsible here.) If an error occurs, %NULL will be returned. 389 + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be 390 + * returned. 390 391 * 391 392 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 392 393 * returned. ··· 465 464 * This function will return a pointer to a dentry if it succeeds. This 466 465 * pointer must be passed to the debugfs_remove() function when the file is 467 466 * to be removed (no automatic cleanup happens if your module is unloaded, 468 - * you are responsible here.) If an error occurs, %NULL will be returned. 467 + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be 468 + * returned. 469 469 * 470 470 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 471 471 * returned. ··· 497 495 * This function will return a pointer to a dentry if it succeeds. This 498 496 * pointer must be passed to the debugfs_remove() function when the file is 499 497 * to be removed (no automatic cleanup happens if your module is unloaded, 500 - * you are responsible here.) If an error occurs, %NULL will be returned. 498 + * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be 499 + * returned. 501 500 * 502 501 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 503 502 * returned. ··· 509 506 struct inode *inode; 510 507 511 508 if (IS_ERR(dentry)) 512 - return NULL; 509 + return dentry; 513 510 514 511 inode = debugfs_get_inode(dentry->d_sb); 515 512 if (unlikely(!inode)) ··· 548 545 struct inode *inode; 549 546 550 547 if (IS_ERR(dentry)) 551 - return NULL; 548 + return dentry; 552 549 553 550 inode = debugfs_get_inode(dentry->d_sb); 554 551 if (unlikely(!inode)) ··· 584 581 * This function will return a pointer to a dentry if it succeeds. This 585 582 * pointer must be passed to the debugfs_remove() function when the symbolic 586 583 * link is to be removed (no automatic cleanup happens if your module is 587 - * unloaded, you are responsible here.) If an error occurs, %NULL will be 588 - * returned. 584 + * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) 585 + * will be returned. 589 586 * 590 587 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 591 588 * returned. ··· 597 594 struct inode *inode; 598 595 char *link = kstrdup(target, GFP_KERNEL); 599 596 if (!link) 600 - return NULL; 597 + return ERR_PTR(-ENOMEM); 601 598 602 599 dentry = start_creating(name, parent); 603 600 if (IS_ERR(dentry)) { 604 601 kfree(link); 605 - return NULL; 602 + return dentry; 606 603 } 607 604 608 605 inode = debugfs_get_inode(dentry->d_sb); ··· 790 787 struct dentry *dentry = NULL, *trap; 791 788 struct name_snapshot old_name; 792 789 790 + if (IS_ERR(old_dir)) 791 + return old_dir; 792 + if (IS_ERR(new_dir)) 793 + return new_dir; 794 + if (IS_ERR_OR_NULL(old_dentry)) 795 + return old_dentry; 796 + 793 797 trap = lock_rename(new_dir, old_dir); 794 798 /* Source or destination directories don't exist? */ 795 799 if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) ··· 830 820 if (dentry && !IS_ERR(dentry)) 831 821 dput(dentry); 832 822 unlock_rename(new_dir, old_dir); 833 - return NULL; 823 + if (IS_ERR(dentry)) 824 + return dentry; 825 + return ERR_PTR(-EINVAL); 834 826 } 835 827 EXPORT_SYMBOL_GPL(debugfs_rename); 836 828
+7 -1
fs/drop_caches.c
··· 21 21 spin_lock(&sb->s_inode_list_lock); 22 22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 23 23 spin_lock(&inode->i_lock); 24 + /* 25 + * We must skip inodes in unusual state. We may also skip 26 + * inodes without pages but we deliberately won't in case 27 + * we need to reschedule to avoid softlockups. 28 + */ 24 29 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 25 - (inode->i_mapping->nrpages == 0)) { 30 + (inode->i_mapping->nrpages == 0 && !need_resched())) { 26 31 spin_unlock(&inode->i_lock); 27 32 continue; 28 33 } ··· 35 30 spin_unlock(&inode->i_lock); 36 31 spin_unlock(&sb->s_inode_list_lock); 37 32 33 + cond_resched(); 38 34 invalidate_mapping_pages(inode->i_mapping, 0, -1); 39 35 iput(toput_inode); 40 36 toput_inode = inode;
+3 -1
fs/fuse/dev.c
··· 1742 1742 req->in.h.nodeid = outarg->nodeid; 1743 1743 req->in.numargs = 2; 1744 1744 req->in.argpages = 1; 1745 - req->page_descs[0].offset = offset; 1746 1745 req->end = fuse_retrieve_end; 1747 1746 1748 1747 index = outarg->offset >> PAGE_SHIFT; ··· 1756 1757 1757 1758 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1758 1759 req->pages[req->num_pages] = page; 1760 + req->page_descs[req->num_pages].offset = offset; 1759 1761 req->page_descs[req->num_pages].length = this_num; 1760 1762 req->num_pages++; 1761 1763 ··· 2077 2077 2078 2078 ret = fuse_dev_do_write(fud, &cs, len); 2079 2079 2080 + pipe_lock(pipe); 2080 2081 for (idx = 0; idx < nbuf; idx++) 2081 2082 pipe_buf_release(pipe, &bufs[idx]); 2083 + pipe_unlock(pipe); 2082 2084 2083 2085 out: 2084 2086 kvfree(bufs);
+1 -1
fs/fuse/file.c
··· 1782 1782 spin_unlock(&fc->lock); 1783 1783 1784 1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1785 - dec_node_page_state(page, NR_WRITEBACK_TEMP); 1785 + dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP); 1786 1786 wb_writeout_inc(&bdi->wb); 1787 1787 fuse_writepage_free(fc, new_req); 1788 1788 fuse_request_free(new_req);
+1 -1
fs/fuse/inode.c
··· 628 628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 629 629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 630 630 fc->user_ns = get_user_ns(user_ns); 631 + fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 631 632 } 632 633 EXPORT_SYMBOL_GPL(fuse_conn_init); 633 634 ··· 1163 1162 fc->user_id = d.user_id; 1164 1163 fc->group_id = d.group_id; 1165 1164 fc->max_read = max_t(unsigned, 4096, d.max_read); 1166 - fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; 1167 1165 1168 1166 /* Used by get_root_inode() */ 1169 1167 sb->s_fs_info = fc;
+1 -1
fs/gfs2/rgrp.c
··· 1780 1780 goto next_iter; 1781 1781 } 1782 1782 if (ret == -E2BIG) { 1783 - n += rbm->bii - initial_bii; 1784 1783 rbm->bii = 0; 1785 1784 rbm->offset = 0; 1785 + n += (rbm->bii - initial_bii); 1786 1786 goto res_covered_end_of_rgrp; 1787 1787 } 1788 1788 return ret;
+30 -7
fs/iomap.c
··· 116 116 atomic_set(&iop->read_count, 0); 117 117 atomic_set(&iop->write_count, 0); 118 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 119 + 120 + /* 121 + * migrate_page_move_mapping() assumes that pages with private data have 122 + * their count elevated by 1. 123 + */ 124 + get_page(page); 119 125 set_page_private(page, (unsigned long)iop); 120 126 SetPagePrivate(page); 121 127 return iop; ··· 138 132 WARN_ON_ONCE(atomic_read(&iop->write_count)); 139 133 ClearPagePrivate(page); 140 134 set_page_private(page, 0); 135 + put_page(page); 141 136 kfree(iop); 142 137 } 143 138 ··· 576 569 577 570 if (page_has_private(page)) { 578 571 ClearPagePrivate(page); 572 + get_page(newpage); 579 573 set_page_private(newpage, page_private(page)); 580 574 set_page_private(page, 0); 575 + put_page(page); 581 576 SetPagePrivate(newpage); 582 577 } 583 578 ··· 1813 1804 loff_t pos = iocb->ki_pos, start = pos; 1814 1805 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1815 1806 unsigned int flags = IOMAP_DIRECT; 1807 + bool wait_for_completion = is_sync_kiocb(iocb); 1816 1808 struct blk_plug plug; 1817 1809 struct iomap_dio *dio; 1818 1810 ··· 1833 1823 dio->end_io = end_io; 1834 1824 dio->error = 0; 1835 1825 dio->flags = 0; 1836 - dio->wait_for_completion = is_sync_kiocb(iocb); 1837 1826 1838 1827 dio->submit.iter = iter; 1839 1828 dio->submit.waiter = current; ··· 1887 1878 dio_warn_stale_pagecache(iocb->ki_filp); 1888 1879 ret = 0; 1889 1880 1890 - if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && 1881 + if (iov_iter_rw(iter) == WRITE && !wait_for_completion && 1891 1882 !inode->i_sb->s_dio_done_wq) { 1892 1883 ret = sb_init_dio_done_wq(inode->i_sb); 1893 1884 if (ret < 0) ··· 1903 1894 if (ret <= 0) { 1904 1895 /* magic error code to fall back to buffered I/O */ 1905 1896 if (ret == -ENOTBLK) { 1906 - dio->wait_for_completion = true; 1897 + wait_for_completion = true; 1907 1898 ret = 0; 1908 1899 } 1909 1900 break; ··· 1925 1916 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1926 1917 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1927 1918 1919 + /* 1920 + * We are about to drop our additional submission reference, which 1921 + * might be the last reference to the dio. There are three three 1922 + * different ways we can progress here: 1923 + * 1924 + * (a) If this is the last reference we will always complete and free 1925 + * the dio ourselves. 1926 + * (b) If this is not the last reference, and we serve an asynchronous 1927 + * iocb, we must never touch the dio after the decrement, the 1928 + * I/O completion handler will complete and free it. 1929 + * (c) If this is not the last reference, but we serve a synchronous 1930 + * iocb, the I/O completion handler will wake us up on the drop 1931 + * of the final reference, and we will complete and free it here 1932 + * after we got woken by the I/O completion handler. 1933 + */ 1934 + dio->wait_for_completion = wait_for_completion; 1928 1935 if (!atomic_dec_and_test(&dio->ref)) { 1929 - if (!dio->wait_for_completion) 1936 + if (!wait_for_completion) 1930 1937 return -EIOCBQUEUED; 1931 1938 1932 1939 for (;;) { ··· 1959 1934 __set_current_state(TASK_RUNNING); 1960 1935 } 1961 1936 1962 - ret = iomap_dio_complete(dio); 1963 - 1964 - return ret; 1937 + return iomap_dio_complete(dio); 1965 1938 1966 1939 out_free_dio: 1967 1940 kfree(dio);
+5
fs/nfs/super.c
··· 1895 1895 size_t len; 1896 1896 char *end; 1897 1897 1898 + if (unlikely(!dev_name || !*dev_name)) { 1899 + dfprintk(MOUNT, "NFS: device name not specified\n"); 1900 + return -EINVAL; 1901 + } 1902 + 1898 1903 /* Is the host name protected with square brakcets? */ 1899 1904 if (*dev_name == '[') { 1900 1905 end = strchr(++dev_name, ']');
+5 -4
fs/nfs/write.c
··· 621 621 nfs_set_page_writeback(page); 622 622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 623 623 624 - ret = 0; 624 + ret = req->wb_context->error; 625 625 /* If there is a fatal error that covers this write, just exit */ 626 - if (nfs_error_is_fatal_on_server(req->wb_context->error)) 626 + if (nfs_error_is_fatal_on_server(ret)) 627 627 goto out_launder; 628 628 629 + ret = 0; 629 630 if (!nfs_pageio_add_request(pgio, req)) { 630 631 ret = pgio->pg_error; 631 632 /* ··· 636 635 nfs_context_set_write_error(req->wb_context, ret); 637 636 if (nfs_error_is_fatal_on_server(ret)) 638 637 goto out_launder; 639 - } 638 + } else 639 + ret = -EAGAIN; 640 640 nfs_redirty_request(req); 641 - ret = -EAGAIN; 642 641 } else 643 642 nfs_add_stats(page_file_mapping(page)->host, 644 643 NFSIOS_WRITEPAGES, 1);
+4 -2
fs/nfsd/vfs.c
··· 557 557 loff_t cloned; 558 558 559 559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 560 + if (cloned < 0) 561 + return nfserrno(cloned); 560 562 if (count && cloned != count) 561 - cloned = -EINVAL; 562 - return nfserrno(cloned < 0 ? cloned : 0); 563 + return nfserrno(-EINVAL); 564 + return 0; 563 565 } 564 566 565 567 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
+3 -1
fs/proc/generic.c
··· 256 256 inode = proc_get_inode(dir->i_sb, de); 257 257 if (!inode) 258 258 return ERR_PTR(-ENOMEM); 259 - d_set_d_op(dentry, &proc_misc_dentry_ops); 259 + d_set_d_op(dentry, de->proc_dops); 260 260 return d_splice_alias(inode, dentry); 261 261 } 262 262 read_unlock(&proc_subdir_lock); ··· 428 428 spin_lock_init(&ent->pde_unload_lock); 429 429 INIT_LIST_HEAD(&ent->pde_openers); 430 430 proc_set_user(ent, (*parent)->uid, (*parent)->gid); 431 + 432 + ent->proc_dops = &proc_misc_dentry_ops; 431 433 432 434 out: 433 435 return ent;
+1
fs/proc/internal.h
··· 44 44 struct completion *pde_unload_completion; 45 45 const struct inode_operations *proc_iops; 46 46 const struct file_operations *proc_fops; 47 + const struct dentry_operations *proc_dops; 47 48 union { 48 49 const struct seq_operations *seq_ops; 49 50 int (*single_show)(struct seq_file *, void *);
+20
fs/proc/proc_net.c
··· 38 38 return maybe_get_net(PDE_NET(PDE(inode))); 39 39 } 40 40 41 + static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) 42 + { 43 + return 0; 44 + } 45 + 46 + static const struct dentry_operations proc_net_dentry_ops = { 47 + .d_revalidate = proc_net_d_revalidate, 48 + .d_delete = always_delete_dentry, 49 + }; 50 + 51 + static void pde_force_lookup(struct proc_dir_entry *pde) 52 + { 53 + /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ 54 + pde->proc_dops = &proc_net_dentry_ops; 55 + } 56 + 41 57 static int seq_open_net(struct inode *inode, struct file *file) 42 58 { 43 59 unsigned int state_size = PDE(inode)->state_size; ··· 106 90 p = proc_create_reg(name, mode, &parent, data); 107 91 if (!p) 108 92 return NULL; 93 + pde_force_lookup(p); 109 94 p->proc_fops = &proc_net_seq_fops; 110 95 p->seq_ops = ops; 111 96 p->state_size = state_size; ··· 150 133 p = proc_create_reg(name, mode, &parent, data); 151 134 if (!p) 152 135 return NULL; 136 + pde_force_lookup(p); 153 137 p->proc_fops = &proc_net_seq_fops; 154 138 p->seq_ops = ops; 155 139 p->state_size = state_size; ··· 199 181 p = proc_create_reg(name, mode, &parent, data); 200 182 if (!p) 201 183 return NULL; 184 + pde_force_lookup(p); 202 185 p->proc_fops = &proc_net_single_fops; 203 186 p->single_show = show; 204 187 return proc_register(parent, p); ··· 242 223 p = proc_create_reg(name, mode, &parent, data); 243 224 if (!p) 244 225 return NULL; 226 + pde_force_lookup(p); 245 227 p->proc_fops = &proc_net_single_fops; 246 228 p->single_show = show; 247 229 p->write = write;
+8 -3
fs/xfs/scrub/repair.c
··· 768 768 if (!uuid_equal(&btblock->bb_u.s.bb_uuid, 769 769 &mp->m_sb.sb_meta_uuid)) 770 770 goto out; 771 + /* 772 + * Read verifiers can reference b_ops, so we set the pointer 773 + * here. If the verifier fails we'll reset the buffer state 774 + * to what it was before we touched the buffer. 775 + */ 776 + bp->b_ops = fab->buf_ops; 771 777 fab->buf_ops->verify_read(bp); 772 778 if (bp->b_error) { 779 + bp->b_ops = NULL; 773 780 bp->b_error = 0; 774 781 goto out; 775 782 } 776 783 777 784 /* 778 785 * Some read verifiers will (re)set b_ops, so we must be 779 - * careful not to blow away any such assignment. 786 + * careful not to change b_ops after running the verifier. 780 787 */ 781 - if (!bp->b_ops) 782 - bp->b_ops = fab->buf_ops; 783 788 } 784 789 785 790 /*
+2
fs/xfs/xfs_aops.c
··· 449 449 } 450 450 451 451 wpc->imap = imap; 452 + xfs_trim_extent_eof(&wpc->imap, ip); 452 453 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); 453 454 return 0; 454 455 allocate_blocks: ··· 460 459 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || 461 460 imap.br_startoff + imap.br_blockcount <= cow_fsb); 462 461 wpc->imap = imap; 462 + xfs_trim_extent_eof(&wpc->imap, ip); 463 463 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); 464 464 return 0; 465 465 }
+17 -2
fs/xfs/xfs_buf.c
··· 776 776 } 777 777 778 778 /* 779 + * Set buffer ops on an unchecked buffer and validate it, if possible. 780 + * 779 781 * If the caller passed in an ops structure and the buffer doesn't have ops 780 782 * assigned, set the ops and use them to verify the contents. If the contents 781 783 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no 782 784 * recorded errors and is already in XBF_DONE state. 785 + * 786 + * Under normal operations, every in-core buffer must have buffer ops assigned 787 + * to them when the buffer is read in from disk so that we can validate the 788 + * metadata. 789 + * 790 + * However, there are two scenarios where one can encounter in-core buffers 791 + * that don't have buffer ops. The first is during log recovery of buffers on 792 + * a V4 filesystem, though these buffers are purged at the end of recovery. 793 + * 794 + * The other is online repair, which tries to match arbitrary metadata blocks 795 + * with btree types in order to find the root. If online repair doesn't match 796 + * the buffer with /any/ btree type, the buffer remains in memory in DONE state 797 + * with no ops, and a subsequent read_buf call from elsewhere will not set the 798 + * ops. This function helps us fix this situation. 783 799 */ 784 800 int 785 801 xfs_buf_ensure_ops( ··· 1552 1536 xfs_buf_ioerror(bp, -EIO); 1553 1537 bp->b_flags &= ~XBF_DONE; 1554 1538 xfs_buf_stale(bp); 1555 - if (bp->b_flags & XBF_ASYNC) 1556 - xfs_buf_ioend(bp); 1539 + xfs_buf_ioend(bp); 1557 1540 return -EIO; 1558 1541 } 1559 1542
+13 -13
include/dt-bindings/clock/imx8mq-clock.h
··· 350 350 #define IMX8MQ_CLK_VPU_G2_ROOT 241 351 351 352 352 /* SCCG PLL GATE */ 353 - #define IMX8MQ_SYS1_PLL_OUT 232 353 + #define IMX8MQ_SYS1_PLL_OUT 242 354 354 #define IMX8MQ_SYS2_PLL_OUT 243 355 355 #define IMX8MQ_SYS3_PLL_OUT 244 356 356 #define IMX8MQ_DRAM_PLL_OUT 245 ··· 372 372 /* txesc clock */ 373 373 #define IMX8MQ_CLK_DSI_IPG_DIV 256 374 374 375 - #define IMX8MQ_CLK_TMU_ROOT 265 375 + #define IMX8MQ_CLK_TMU_ROOT 257 376 376 377 377 /* Display root clocks */ 378 - #define IMX8MQ_CLK_DISP_AXI_ROOT 266 379 - #define IMX8MQ_CLK_DISP_APB_ROOT 267 380 - #define IMX8MQ_CLK_DISP_RTRM_ROOT 268 378 + #define IMX8MQ_CLK_DISP_AXI_ROOT 258 379 + #define IMX8MQ_CLK_DISP_APB_ROOT 259 380 + #define IMX8MQ_CLK_DISP_RTRM_ROOT 260 381 381 382 - #define IMX8MQ_CLK_OCOTP_ROOT 269 382 + #define IMX8MQ_CLK_OCOTP_ROOT 261 383 383 384 - #define IMX8MQ_CLK_DRAM_ALT_ROOT 270 385 - #define IMX8MQ_CLK_DRAM_CORE 271 384 + #define IMX8MQ_CLK_DRAM_ALT_ROOT 262 385 + #define IMX8MQ_CLK_DRAM_CORE 263 386 386 387 - #define IMX8MQ_CLK_MU_ROOT 272 388 - #define IMX8MQ_VIDEO2_PLL_OUT 273 387 + #define IMX8MQ_CLK_MU_ROOT 264 388 + #define IMX8MQ_VIDEO2_PLL_OUT 265 389 389 390 - #define IMX8MQ_CLK_CLKO2 274 390 + #define IMX8MQ_CLK_CLKO2 266 391 391 392 - #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 392 + #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267 393 393 394 - #define IMX8MQ_CLK_END 276 394 + #define IMX8MQ_CLK_END 268 395 395 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
-1
include/dt-bindings/clock/marvell,mmp2.h
··· 71 71 #define MMP2_CLK_CCIC1_MIX 117 72 72 #define MMP2_CLK_CCIC1_PHY 118 73 73 #define MMP2_CLK_CCIC1_SPHY 119 74 - #define MMP2_CLK_SP 120 75 74 76 75 #define MMP2_NR_CLKS 200 77 76 #endif
+7 -1
include/linux/blktrace_api.h
··· 116 116 117 117 static inline sector_t blk_rq_trace_sector(struct request *rq) 118 118 { 119 - return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); 119 + /* 120 + * Tracing should ignore starting sector for passthrough requests and 121 + * requests where starting sector didn't get set. 122 + */ 123 + if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) 124 + return 0; 125 + return blk_rq_pos(rq); 120 126 } 121 127 122 128 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
-2
include/linux/cpu.h
··· 180 180 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) 181 181 extern enum cpuhp_smt_control cpu_smt_control; 182 182 extern void cpu_smt_disable(bool force); 183 - extern void cpu_smt_check_topology_early(void); 184 183 extern void cpu_smt_check_topology(void); 185 184 #else 186 185 # define cpu_smt_control (CPU_SMT_ENABLED) 187 186 static inline void cpu_smt_disable(bool force) { } 188 - static inline void cpu_smt_check_topology_early(void) { } 189 187 static inline void cpu_smt_check_topology(void) { } 190 188 #endif 191 189
+4 -3
include/linux/dcache.h
··· 62 62 struct dentry_stat_t { 63 63 long nr_dentry; 64 64 long nr_unused; 65 - long age_limit; /* age in seconds */ 66 - long want_pages; /* pages requested by system */ 67 - long dummy[2]; 65 + long age_limit; /* age in seconds */ 66 + long want_pages; /* pages requested by system */ 67 + long nr_negative; /* # of unused negative dentries */ 68 + long dummy; /* Reserved for future use */ 68 69 }; 69 70 extern struct dentry_stat_t dentry_stat; 70 71
+18 -3
include/linux/filter.h
··· 591 591 return qdisc_skb_cb(skb)->data; 592 592 } 593 593 594 - static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 595 - struct sk_buff *skb) 594 + static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, 595 + struct sk_buff *skb) 596 596 { 597 597 u8 *cb_data = bpf_skb_cb(skb); 598 598 u8 cb_saved[BPF_SKB_CB_LEN]; ··· 611 611 return res; 612 612 } 613 613 614 + static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 615 + struct sk_buff *skb) 616 + { 617 + u32 res; 618 + 619 + preempt_disable(); 620 + res = __bpf_prog_run_save_cb(prog, skb); 621 + preempt_enable(); 622 + return res; 623 + } 624 + 614 625 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 615 626 struct sk_buff *skb) 616 627 { 617 628 u8 *cb_data = bpf_skb_cb(skb); 629 + u32 res; 618 630 619 631 if (unlikely(prog->cb_access)) 620 632 memset(cb_data, 0, BPF_SKB_CB_LEN); 621 633 622 - return BPF_PROG_RUN(prog, skb); 634 + preempt_disable(); 635 + res = BPF_PROG_RUN(prog, skb); 636 + preempt_enable(); 637 + return res; 623 638 } 624 639 625 640 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+5 -4
include/linux/fs.h
··· 1479 1479 struct user_namespace *s_user_ns; 1480 1480 1481 1481 /* 1482 - * Keep the lru lists last in the structure so they always sit on their 1483 - * own individual cachelines. 1482 + * The list_lru structure is essentially just a pointer to a table 1483 + * of per-node lru lists, each of which has its own spinlock. 1484 + * There is no need to put them into separate cachelines. 1484 1485 */ 1485 - struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1486 - struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1486 + struct list_lru s_dentry_lru; 1487 + struct list_lru s_inode_lru; 1487 1488 struct rcu_head rcu; 1488 1489 struct work_struct destroy_work; 1489 1490
+4 -5
include/linux/hid-debug.h
··· 24 24 25 25 #ifdef CONFIG_DEBUG_FS 26 26 27 + #include <linux/kfifo.h> 28 + 27 29 #define HID_DEBUG_BUFSIZE 512 30 + #define HID_DEBUG_FIFOSIZE 512 28 31 29 32 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); 30 33 void hid_dump_report(struct hid_device *, int , u8 *, int); ··· 40 37 void hid_debug_exit(void); 41 38 void hid_debug_event(struct hid_device *, char *); 42 39 43 - 44 40 struct hid_debug_list { 45 - char *hid_debug_buf; 46 - int head; 47 - int tail; 41 + DECLARE_KFIFO_PTR(hid_debug_fifo, char); 48 42 struct fasync_struct *fasync; 49 43 struct hid_device *hdev; 50 44 struct list_head node; ··· 64 64 #endif 65 65 66 66 #endif 67 -
+2
include/linux/ide.h
··· 615 615 616 616 /* current sense rq and buffer */ 617 617 bool sense_rq_armed; 618 + bool sense_rq_active; 618 619 struct request *sense_rq; 619 620 struct request_sense sense_data; 620 621 ··· 1220 1219 extern void ide_timer_expiry(struct timer_list *t); 1221 1220 extern irqreturn_t ide_intr(int irq, void *dev_id); 1222 1221 extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 1222 + extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool); 1223 1223 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); 1224 1224 1225 1225 void ide_init_disk(struct gendisk *, ide_drive_t *);
+1 -1
include/linux/irqchip/arm-gic-v3.h
··· 319 319 #define GITS_TYPER_PLPIS (1UL << 0) 320 320 #define GITS_TYPER_VLPIS (1UL << 1) 321 321 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 322 - #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) 322 + #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) 323 323 #define GITS_TYPER_IDBITS_SHIFT 8 324 324 #define GITS_TYPER_DEVBITS_SHIFT 13 325 325 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+10 -8
include/linux/memory_hotplug.h
··· 21 21 * walkers which rely on the fully initialized page->flags and others 22 22 * should use this rather than pfn_valid && pfn_to_page 23 23 */ 24 - #define pfn_to_online_page(pfn) \ 25 - ({ \ 26 - struct page *___page = NULL; \ 27 - unsigned long ___nr = pfn_to_section_nr(pfn); \ 28 - \ 29 - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 30 - ___page = pfn_to_page(pfn); \ 31 - ___page; \ 24 + #define pfn_to_online_page(pfn) \ 25 + ({ \ 26 + struct page *___page = NULL; \ 27 + unsigned long ___pfn = pfn; \ 28 + unsigned long ___nr = pfn_to_section_nr(___pfn); \ 29 + \ 30 + if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ 31 + pfn_valid_within(___pfn)) \ 32 + ___page = pfn_to_page(___pfn); \ 33 + ___page; \ 32 34 }) 33 35 34 36 /*
+8
include/linux/netdevice.h
··· 1483 1483 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1484 1484 * @IFF_FAILOVER: device is a failover master device 1485 1485 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1486 + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1486 1487 */ 1487 1488 enum netdev_priv_flags { 1488 1489 IFF_802_1Q_VLAN = 1<<0, ··· 1515 1514 IFF_NO_RX_HANDLER = 1<<26, 1516 1515 IFF_FAILOVER = 1<<27, 1517 1516 IFF_FAILOVER_SLAVE = 1<<28, 1517 + IFF_L3MDEV_RX_HANDLER = 1<<29, 1518 1518 }; 1519 1519 1520 1520 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN ··· 1546 1544 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1547 1545 #define IFF_FAILOVER IFF_FAILOVER 1548 1546 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1547 + #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1549 1548 1550 1549 /** 1551 1550 * struct net_device - The DEVICE structure. ··· 4550 4547 static inline bool netif_supports_nofcs(struct net_device *dev) 4551 4548 { 4552 4549 return dev->priv_flags & IFF_SUPP_NOFCS; 4550 + } 4551 + 4552 + static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 4553 + { 4554 + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 4553 4555 } 4554 4556 4555 4557 static inline bool netif_is_l3_master(const struct net_device *dev)
+1 -1
include/linux/pm_runtime.h
··· 105 105 106 106 static inline void pm_runtime_mark_last_busy(struct device *dev) 107 107 { 108 - WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); 108 + WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); 109 109 } 110 110 111 111 static inline bool pm_runtime_is_irq_safe(struct device *dev)
+1 -1
include/linux/sched.h
··· 995 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 996 996 struct list_head cg_list; 997 997 #endif 998 - #ifdef CONFIG_X86_RESCTRL 998 + #ifdef CONFIG_X86_CPU_RESCTRL 999 999 u32 closid; 1000 1000 u32 rmid; 1001 1001 #endif
+1
include/linux/sched/coredump.h
··· 71 71 #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 72 72 #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 73 73 #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ 74 + #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ 74 75 #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 75 76 76 77 #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+1 -1
include/linux/signal.h
··· 392 392 #endif 393 393 394 394 #define siginmask(sig, mask) \ 395 - ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) 395 + ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) 396 396 397 397 #define SIG_KERNEL_ONLY_MASK (\ 398 398 rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
+1
include/linux/stmmac.h
··· 184 184 struct clk *pclk; 185 185 struct clk *clk_ptp_ref; 186 186 unsigned int clk_ptp_rate; 187 + unsigned int clk_ref_rate; 187 188 struct reset_control *stmmac_rst; 188 189 struct stmmac_axi *axi; 189 190 int has_gmac4;
+2 -1
include/net/l3mdev.h
··· 153 153 154 154 if (netif_is_l3_slave(skb->dev)) 155 155 master = netdev_master_upper_dev_get_rcu(skb->dev); 156 - else if (netif_is_l3_master(skb->dev)) 156 + else if (netif_is_l3_master(skb->dev) || 157 + netif_has_l3_rx_handler(skb->dev)) 157 158 master = skb->dev; 158 159 159 160 if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+13 -4
include/net/netfilter/nf_tables.h
··· 469 469 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 470 470 struct nft_set_binding *binding); 471 471 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 472 - struct nft_set_binding *binding); 473 - void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, 474 - struct nft_set_binding *binding); 472 + struct nft_set_binding *binding, bool commit); 475 473 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); 476 474 477 475 /** ··· 719 721 #define NFT_EXPR_STATEFUL 0x1 720 722 #define NFT_EXPR_GC 0x2 721 723 724 + enum nft_trans_phase { 725 + NFT_TRANS_PREPARE, 726 + NFT_TRANS_ABORT, 727 + NFT_TRANS_COMMIT, 728 + NFT_TRANS_RELEASE 729 + }; 730 + 722 731 /** 723 732 * struct nft_expr_ops - nf_tables expression operations 724 733 * ··· 755 750 void (*activate)(const struct nft_ctx *ctx, 756 751 const struct nft_expr *expr); 757 752 void (*deactivate)(const struct nft_ctx *ctx, 758 - const struct nft_expr *expr); 753 + const struct nft_expr *expr, 754 + enum nft_trans_phase phase); 759 755 void (*destroy)(const struct nft_ctx *ctx, 760 756 const struct nft_expr *expr); 761 757 void (*destroy_clone)(const struct nft_ctx *ctx, ··· 1329 1323 struct nft_trans_set { 1330 1324 struct nft_set *set; 1331 1325 u32 set_id; 1326 + bool bound; 1332 1327 }; 1333 1328 1334 1329 #define nft_trans_set(trans) \ 1335 1330 (((struct nft_trans_set *)trans->data)->set) 1336 1331 #define nft_trans_set_id(trans) \ 1337 1332 (((struct nft_trans_set *)trans->data)->set_id) 1333 + #define nft_trans_set_bound(trans) \ 1334 + (((struct nft_trans_set *)trans->data)->bound) 1338 1335 1339 1336 struct nft_trans_chain { 1340 1337 bool update;
+2
include/net/tls.h
··· 120 120 struct scatterlist sg_aead_out[2]; 121 121 122 122 char aad_space[TLS_AAD_SPACE_SIZE]; 123 + u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE + 124 + TLS_CIPHER_AES_GCM_128_SALT_SIZE]; 123 125 struct aead_request aead_req; 124 126 u8 aead_req_ctx[]; 125 127 };
+22 -2
include/rdma/ib_verbs.h
··· 2579 2579 2580 2580 const struct uapi_definition *driver_def; 2581 2581 enum rdma_driver_id driver_id; 2582 + 2582 2583 /* 2583 - * Provides synchronization between device unregistration and netlink 2584 - * commands on a device. To be used only by core. 2584 + * Positive refcount indicates that the device is currently 2585 + * registered and cannot be unregistered. 2585 2586 */ 2586 2587 refcount_t refcount; 2587 2588 struct completion unreg_completion; ··· 3927 3926 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3928 3927 struct ib_mr_status *mr_status); 3929 3928 3929 + /** 3930 + * ib_device_try_get: Hold a registration lock 3931 + * device: The device to lock 3932 + * 3933 + * A device under an active registration lock cannot become unregistered. It 3934 + * is only possible to obtain a registration lock on a device that is fully 3935 + * registered, otherwise this function returns false. 3936 + * 3937 + * The registration lock is only necessary for actions which require the 3938 + * device to still be registered. Uses that only require the device pointer to 3939 + * be valid should use get_device(&ibdev->dev) to hold the memory. 3940 + * 3941 + */ 3942 + static inline bool ib_device_try_get(struct ib_device *dev) 3943 + { 3944 + return refcount_inc_not_zero(&dev->refcount); 3945 + } 3946 + 3947 + void ib_device_put(struct ib_device *device); 3930 3948 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3931 3949 u16 pkey, const union ib_gid *gid, 3932 3950 const struct sockaddr *addr);
+5 -1
include/sound/compress_driver.h
··· 173 173 if (snd_BUG_ON(!stream)) 174 174 return; 175 175 176 - stream->runtime->state = SNDRV_PCM_STATE_SETUP; 176 + if (stream->direction == SND_COMPRESS_PLAYBACK) 177 + stream->runtime->state = SNDRV_PCM_STATE_SETUP; 178 + else 179 + stream->runtime->state = SNDRV_PCM_STATE_PREPARED; 180 + 177 181 wake_up(&stream->runtime->sleep); 178 182 } 179 183
+1
include/sound/hda_codec.h
··· 68 68 unsigned int response_reset:1; /* controller was reset */ 69 69 unsigned int in_reset:1; /* during reset operation */ 70 70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 71 + unsigned int bus_probing :1; /* during probing process */ 71 72 72 73 int primary_dig_out_type; /* primary digital out PCM type */ 73 74 unsigned int mixer_assigned; /* codec addr for mixer name */
+6
include/uapi/linux/virtio_config.h
··· 79 79 #define VIRTIO_F_RING_PACKED 34 80 80 81 81 /* 82 + * This feature indicates that memory accesses by the driver and the 83 + * device are ordered in a way described by the platform. 84 + */ 85 + #define VIRTIO_F_ORDER_PLATFORM 36 86 + 87 + /* 82 88 * Does the device support Single Root I/O Virtualization? 83 89 */ 84 90 #define VIRTIO_F_SR_IOV 37
-10
include/uapi/linux/virtio_ring.h
··· 213 213 __le16 flags; 214 214 }; 215 215 216 - struct vring_packed { 217 - unsigned int num; 218 - 219 - struct vring_packed_desc *desc; 220 - 221 - struct vring_packed_desc_event *driver; 222 - 223 - struct vring_packed_desc_event *device; 224 - }; 225 - 226 216 #endif /* _UAPI_LINUX_VIRTIO_RING_H */
+5
include/uapi/rdma/hns-abi.h
··· 52 52 __aligned_u64 que_addr; 53 53 }; 54 54 55 + struct hns_roce_ib_create_srq_resp { 56 + __u32 srqn; 57 + __u32 reserved; 58 + }; 59 + 55 60 struct hns_roce_ib_create_qp { 56 61 __aligned_u64 buf_addr; 57 62 __aligned_u64 db_addr;
+12 -1
init/Kconfig
··· 512 512 per default but can be enabled through passing psi=1 on the 513 513 kernel commandline during boot. 514 514 515 + This feature adds some code to the task wakeup and sleep 516 + paths of the scheduler. The overhead is too low to affect 517 + common scheduling-intense workloads in practice (such as 518 + webservers, memcache), but it does show up in artificial 519 + scheduler stress tests, such as hackbench. 520 + 521 + If you are paranoid and not sure what the kernel will be 522 + used for, say Y. 523 + 524 + Say N if unsure. 525 + 515 526 endmenu # "CPU/Task time and stats accounting" 516 527 517 528 config CPU_ISOLATION ··· 836 825 PIDs controller is designed to stop this from happening. 837 826 838 827 It should be noted that organisational operations (such as attaching 839 - to a cgroup hierarchy will *not* be blocked by the PIDs controller), 828 + to a cgroup hierarchy) will *not* be blocked by the PIDs controller, 840 829 since the PIDs limit only affects a process's ability to fork, not to 841 830 attach to a cgroup. 842 831
+2 -1
kernel/bpf/btf.c
··· 1459 1459 1460 1460 /* "typedef void new_void", "const void"...etc */ 1461 1461 if (!btf_type_is_void(next_type) && 1462 - !btf_type_is_fwd(next_type)) { 1462 + !btf_type_is_fwd(next_type) && 1463 + !btf_type_is_func_proto(next_type)) { 1463 1464 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1464 1465 return -EINVAL; 1465 1466 }
+1 -1
kernel/bpf/cgroup.c
··· 572 572 bpf_compute_and_save_data_end(skb, &saved_data_end); 573 573 574 574 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 575 - bpf_prog_run_save_cb); 575 + __bpf_prog_run_save_cb); 576 576 bpf_restore_data_end(skb, saved_data_end); 577 577 __skb_pull(skb, offset); 578 578 skb->sk = save_sk;
+2 -2
kernel/bpf/hashtab.c
··· 686 686 } 687 687 688 688 if (htab_is_prealloc(htab)) { 689 - pcpu_freelist_push(&htab->freelist, &l->fnode); 689 + __pcpu_freelist_push(&htab->freelist, &l->fnode); 690 690 } else { 691 691 atomic_dec(&htab->count); 692 692 l->htab = htab; ··· 748 748 } else { 749 749 struct pcpu_freelist_node *l; 750 750 751 - l = pcpu_freelist_pop(&htab->freelist); 751 + l = __pcpu_freelist_pop(&htab->freelist); 752 752 if (!l) 753 753 return ERR_PTR(-E2BIG); 754 754 l_new = container_of(l, struct htab_elem, fnode);
+29 -12
kernel/bpf/percpu_freelist.c
··· 28 28 free_percpu(s->freelist); 29 29 } 30 30 31 - static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, 32 - struct pcpu_freelist_node *node) 31 + static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, 32 + struct pcpu_freelist_node *node) 33 33 { 34 34 raw_spin_lock(&head->lock); 35 35 node->next = head->first; ··· 37 37 raw_spin_unlock(&head->lock); 38 38 } 39 39 40 - void pcpu_freelist_push(struct pcpu_freelist *s, 40 + void __pcpu_freelist_push(struct pcpu_freelist *s, 41 41 struct pcpu_freelist_node *node) 42 42 { 43 43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); 44 44 45 - __pcpu_freelist_push(head, node); 45 + ___pcpu_freelist_push(head, node); 46 + } 47 + 48 + void pcpu_freelist_push(struct pcpu_freelist *s, 49 + struct pcpu_freelist_node *node) 50 + { 51 + unsigned long flags; 52 + 53 + local_irq_save(flags); 54 + __pcpu_freelist_push(s, node); 55 + local_irq_restore(flags); 46 56 } 47 57 48 58 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, ··· 73 63 for_each_possible_cpu(cpu) { 74 64 again: 75 65 head = per_cpu_ptr(s->freelist, cpu); 76 - __pcpu_freelist_push(head, buf); 66 + ___pcpu_freelist_push(head, buf); 77 67 i++; 78 68 buf += elem_size; 79 69 if (i == nr_elems) ··· 84 74 local_irq_restore(flags); 85 75 } 86 76 87 - struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 77 + struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) 88 78 { 89 79 struct pcpu_freelist_head *head; 90 80 struct pcpu_freelist_node *node; 91 - unsigned long flags; 92 81 int orig_cpu, cpu; 93 82 94 - local_irq_save(flags); 95 83 orig_cpu = cpu = raw_smp_processor_id(); 96 84 while (1) { 97 85 head = per_cpu_ptr(s->freelist, cpu); ··· 97 89 node = head->first; 98 90 if (node) { 99 91 head->first = node->next; 100 - raw_spin_unlock_irqrestore(&head->lock, flags); 92 + raw_spin_unlock(&head->lock); 101 93 return node; 102 94 } 103 95 raw_spin_unlock(&head->lock); 104 96 cpu = cpumask_next(cpu, cpu_possible_mask); 105 97 if (cpu >= nr_cpu_ids) 106 98 cpu = 0; 107 - if (cpu == orig_cpu) { 108 - local_irq_restore(flags); 99 + if (cpu == orig_cpu) 109 100 return NULL; 110 - } 111 101 } 102 + } 103 + 104 + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 105 + { 106 + struct pcpu_freelist_node *ret; 107 + unsigned long flags; 108 + 109 + local_irq_save(flags); 110 + ret = __pcpu_freelist_pop(s); 111 + local_irq_restore(flags); 112 + return ret; 112 113 }
+4
kernel/bpf/percpu_freelist.h
··· 22 22 struct pcpu_freelist_node *next; 23 23 }; 24 24 25 + /* pcpu_freelist_* do spin_lock_irqsave. */ 25 26 void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); 26 27 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); 28 + /* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ 29 + void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); 30 + struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); 27 31 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 28 32 u32 nr_elems); 29 33 int pcpu_freelist_init(struct pcpu_freelist *);
+10 -2
kernel/bpf/syscall.c
··· 713 713 714 714 if (bpf_map_is_dev_bound(map)) { 715 715 err = bpf_map_offload_lookup_elem(map, key, value); 716 - } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 717 - map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 716 + goto done; 717 + } 718 + 719 + preempt_disable(); 720 + this_cpu_inc(bpf_prog_active); 721 + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 722 + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 718 723 err = bpf_percpu_hash_copy(map, key, value); 719 724 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 720 725 err = bpf_percpu_array_copy(map, key, value); ··· 749 744 } 750 745 rcu_read_unlock(); 751 746 } 747 + this_cpu_dec(bpf_prog_active); 748 + preempt_enable(); 752 749 750 + done: 753 751 if (err) 754 752 goto free_value; 755 753
+5 -33
kernel/cpu.c
··· 376 376 377 377 #ifdef CONFIG_HOTPLUG_SMT 378 378 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 379 - EXPORT_SYMBOL_GPL(cpu_smt_control); 380 - 381 - static bool cpu_smt_available __read_mostly; 382 379 383 380 void __init cpu_smt_disable(bool force) 384 381 { ··· 394 397 395 398 /* 396 399 * The decision whether SMT is supported can only be done after the full 397 - * CPU identification. Called from architecture code before non boot CPUs 398 - * are brought up. 399 - */ 400 - void __init cpu_smt_check_topology_early(void) 401 - { 402 - if (!topology_smt_supported()) 403 - cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 404 - } 405 - 406 - /* 407 - * If SMT was disabled by BIOS, detect it here, after the CPUs have been 408 - * brought online. This ensures the smt/l1tf sysfs entries are consistent 409 - * with reality. cpu_smt_available is set to true during the bringup of non 410 - * boot CPUs when a SMT sibling is detected. Note, this may overwrite 411 - * cpu_smt_control's previous setting. 400 + * CPU identification. Called from architecture code. 412 401 */ 413 402 void __init cpu_smt_check_topology(void) 414 403 { 415 - if (!cpu_smt_available) 404 + if (!topology_smt_supported()) 416 405 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 417 406 } 418 407 ··· 411 428 412 429 static inline bool cpu_smt_allowed(unsigned int cpu) 413 430 { 414 - if (topology_is_primary_thread(cpu)) 431 + if (cpu_smt_control == CPU_SMT_ENABLED) 415 432 return true; 416 433 417 - /* 418 - * If the CPU is not a 'primary' thread and the booted_once bit is 419 - * set then the processor has SMT support. Store this information 420 - * for the late check of SMT support in cpu_smt_check_topology(). 421 - */ 422 - if (per_cpu(cpuhp_state, cpu).booted_once) 423 - cpu_smt_available = true; 424 - 425 - if (cpu_smt_control == CPU_SMT_ENABLED) 434 + if (topology_is_primary_thread(cpu)) 426 435 return true; 427 436 428 437 /* ··· 2065 2090 */ 2066 2091 cpuhp_offline_cpu_device(cpu); 2067 2092 } 2068 - if (!ret) { 2093 + if (!ret) 2069 2094 cpu_smt_control = ctrlval; 2070 - arch_smt_update(); 2071 - } 2072 2095 cpu_maps_update_done(); 2073 2096 return ret; 2074 2097 } ··· 2077 2104 2078 2105 cpu_maps_update_begin(); 2079 2106 cpu_smt_control = CPU_SMT_ENABLED; 2080 - arch_smt_update(); 2081 2107 for_each_present_cpu(cpu) { 2082 2108 /* Skip online CPUs and CPUs on offline nodes */ 2083 2109 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+7 -7
kernel/events/core.c
··· 436 436 void __user *buffer, size_t *lenp, 437 437 loff_t *ppos) 438 438 { 439 - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 440 - 441 - if (ret || !write) 442 - return ret; 443 - 439 + int ret; 440 + int perf_cpu = sysctl_perf_cpu_time_max_percent; 444 441 /* 445 442 * If throttling is disabled don't allow the write: 446 443 */ 447 - if (sysctl_perf_cpu_time_max_percent == 100 || 448 - sysctl_perf_cpu_time_max_percent == 0) 444 + if (write && (perf_cpu == 100 || perf_cpu == 0)) 449 445 return -EINVAL; 446 + 447 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 448 + if (ret || !write) 449 + return ret; 450 450 451 451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 452 452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
+3
kernel/events/ring_buffer.c
··· 734 734 size = sizeof(struct ring_buffer); 735 735 size += nr_pages * sizeof(void *); 736 736 737 + if (order_base_2(size) >= MAX_ORDER) 738 + goto fail; 739 + 737 740 rb = kzalloc(size, GFP_KERNEL); 738 741 if (!rb) 739 742 goto fail;
+10 -2
kernel/exit.c
··· 558 558 return NULL; 559 559 } 560 560 561 - static struct task_struct *find_child_reaper(struct task_struct *father) 561 + static struct task_struct *find_child_reaper(struct task_struct *father, 562 + struct list_head *dead) 562 563 __releases(&tasklist_lock) 563 564 __acquires(&tasklist_lock) 564 565 { 565 566 struct pid_namespace *pid_ns = task_active_pid_ns(father); 566 567 struct task_struct *reaper = pid_ns->child_reaper; 568 + struct task_struct *p, *n; 567 569 568 570 if (likely(reaper != father)) 569 571 return reaper; ··· 581 579 panic("Attempted to kill init! exitcode=0x%08x\n", 582 580 father->signal->group_exit_code ?: father->exit_code); 583 581 } 582 + 583 + list_for_each_entry_safe(p, n, dead, ptrace_entry) { 584 + list_del_init(&p->ptrace_entry); 585 + release_task(p); 586 + } 587 + 584 588 zap_pid_ns_processes(pid_ns); 585 589 write_lock_irq(&tasklist_lock); 586 590 ··· 676 668 exit_ptrace(father, dead); 677 669 678 670 /* Can drop and reacquire tasklist_lock */ 679 - reaper = find_child_reaper(father); 671 + reaper = find_child_reaper(father, dead); 680 672 if (list_empty(&father->children)) 681 673 return; 682 674
+20 -12
kernel/futex.c
··· 2221 2221 * decrement the counter at queue_unlock() when some error has 2222 2222 * occurred and we don't end up adding the task to the list. 2223 2223 */ 2224 - hb_waiters_inc(hb); 2224 + hb_waiters_inc(hb); /* implies smp_mb(); (A) */ 2225 2225 2226 2226 q->lock_ptr = &hb->lock; 2227 2227 2228 - spin_lock(&hb->lock); /* implies smp_mb(); (A) */ 2228 + spin_lock(&hb->lock); 2229 2229 return hb; 2230 2230 } 2231 2231 ··· 2861 2861 * and BUG when futex_unlock_pi() interleaves with this. 2862 2862 * 2863 2863 * Therefore acquire wait_lock while holding hb->lock, but drop the 2864 - * latter before calling rt_mutex_start_proxy_lock(). This still fully 2865 - * serializes against futex_unlock_pi() as that does the exact same 2866 - * lock handoff sequence. 2864 + * latter before calling __rt_mutex_start_proxy_lock(). This 2865 + * interleaves with futex_unlock_pi() -- which does a similar lock 2866 + * handoff -- such that the latter can observe the futex_q::pi_state 2867 + * before __rt_mutex_start_proxy_lock() is done. 2867 2868 */ 2868 2869 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); 2869 2870 spin_unlock(q.lock_ptr); 2871 + /* 2872 + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter 2873 + * such that futex_unlock_pi() is guaranteed to observe the waiter when 2874 + * it sees the futex_q::pi_state. 2875 + */ 2870 2876 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); 2871 2877 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); 2872 2878 2873 2879 if (ret) { 2874 2880 if (ret == 1) 2875 2881 ret = 0; 2876 - 2877 - spin_lock(q.lock_ptr); 2878 - goto no_block; 2882 + goto cleanup; 2879 2883 } 2880 - 2881 2884 2882 2885 if (unlikely(to)) 2883 2886 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); 2884 2887 2885 2888 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); 2886 2889 2890 + cleanup: 2887 2891 spin_lock(q.lock_ptr); 2888 2892 /* 2889 - * If we failed to acquire the lock (signal/timeout), we must 2893 + * If we failed to acquire the lock (deadlock/signal/timeout), we must 2890 2894 * first acquire the hb->lock before removing the lock from the 2891 - * rt_mutex waitqueue, such that we can keep the hb and rt_mutex 2892 - * wait lists consistent. 2895 + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait 2896 + * lists consistent. 2893 2897 * 2894 2898 * In particular; it is important that futex_unlock_pi() can not 2895 2899 * observe this inconsistency. ··· 3017 3013 * there is no point where we hold neither; and therefore 3018 3014 * wake_futex_pi() must observe a state consistent with what we 3019 3015 * observed. 3016 + * 3017 + * In particular; this forces __rt_mutex_start_proxy() to 3018 + * complete such that we're guaranteed to observe the 3019 + * rt_waiter. Also see the WARN in wake_futex_pi(). 3020 3020 */ 3021 3021 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 3022 3022 spin_unlock(&hb->lock);
+32 -5
kernel/locking/rtmutex.c
··· 1726 1726 rt_mutex_set_owner(lock, NULL); 1727 1727 } 1728 1728 1729 + /** 1730 + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task 1731 + * @lock: the rt_mutex to take 1732 + * @waiter: the pre-initialized rt_mutex_waiter 1733 + * @task: the task to prepare 1734 + * 1735 + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock 1736 + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. 1737 + * 1738 + * NOTE: does _NOT_ remove the @waiter on failure; must either call 1739 + * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. 1740 + * 1741 + * Returns: 1742 + * 0 - task blocked on lock 1743 + * 1 - acquired the lock for task, caller should wake it up 1744 + * <0 - error 1745 + * 1746 + * Special API call for PI-futex support. 1747 + */ 1729 1748 int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, 1730 1749 struct rt_mutex_waiter *waiter, 1731 1750 struct task_struct *task) 1732 1751 { 1733 1752 int ret; 1753 + 1754 + lockdep_assert_held(&lock->wait_lock); 1734 1755 1735 1756 if (try_to_take_rt_mutex(lock, task, NULL)) 1736 1757 return 1; ··· 1770 1749 ret = 0; 1771 1750 } 1772 1751 1773 - if (unlikely(ret)) 1774 - remove_waiter(lock, waiter); 1775 - 1776 1752 debug_rt_mutex_print_deadlock(waiter); 1777 1753 1778 1754 return ret; ··· 1781 1763 * @waiter: the pre-initialized rt_mutex_waiter 1782 1764 * @task: the task to prepare 1783 1765 * 1766 + * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock 1767 + * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. 1768 + * 1769 + * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter 1770 + * on failure. 1771 + * 1784 1772 * Returns: 1785 1773 * 0 - task blocked on lock 1786 1774 * 1 - acquired the lock for task, caller should wake it up 1787 1775 * <0 - error 1788 1776 * 1789 - * Special API call for FUTEX_REQUEUE_PI support. 1777 + * Special API call for PI-futex support. 1790 1778 */ 1791 1779 int rt_mutex_start_proxy_lock(struct rt_mutex *lock, 1792 1780 struct rt_mutex_waiter *waiter, ··· 1802 1778 1803 1779 raw_spin_lock_irq(&lock->wait_lock); 1804 1780 ret = __rt_mutex_start_proxy_lock(lock, waiter, task); 1781 + if (unlikely(ret)) 1782 + remove_waiter(lock, waiter); 1805 1783 raw_spin_unlock_irq(&lock->wait_lock); 1806 1784 1807 1785 return ret; ··· 1871 1845 * @lock: the rt_mutex we were woken on 1872 1846 * @waiter: the pre-initialized rt_mutex_waiter 1873 1847 * 1874 - * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). 1848 + * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or 1849 + * rt_mutex_wait_proxy_lock(). 1875 1850 * 1876 1851 * Unless we acquired the lock; we're still enqueued on the wait-list and can 1877 1852 * in fact still be granted ownership until we're removed. Therefore we can
+3 -1
kernel/relay.c
··· 428 428 dentry = chan->cb->create_buf_file(tmpname, chan->parent, 429 429 S_IRUSR, buf, 430 430 &chan->is_global); 431 + if (IS_ERR(dentry)) 432 + dentry = NULL; 431 433 432 434 kfree(tmpname); 433 435 ··· 463 461 dentry = chan->cb->create_buf_file(NULL, NULL, 464 462 S_IRUSR, buf, 465 463 &chan->is_global); 466 - if (WARN_ON(dentry)) 464 + if (IS_ERR_OR_NULL(dentry)) 467 465 goto free_buf; 468 466 } 469 467
+1
kernel/sched/fair.c
··· 5980 5980 5981 5981 #ifdef CONFIG_SCHED_SMT 5982 5982 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5983 + EXPORT_SYMBOL_GPL(sched_smt_present); 5983 5984 5984 5985 static inline void set_idle_cores(int cpu, int val) 5985 5986 {
+17 -4
kernel/sched/psi.c
··· 124 124 * sampling of the aggregate task states would be. 125 125 */ 126 126 127 + #include "../workqueue_internal.h" 127 128 #include <linux/sched/loadavg.h> 128 129 #include <linux/seq_file.h> 129 130 #include <linux/proc_fs.h> ··· 481 480 groupc->tasks[t]++; 482 481 483 482 write_seqcount_end(&groupc->seq); 484 - 485 - if (!delayed_work_pending(&group->clock_work)) 486 - schedule_delayed_work(&group->clock_work, PSI_FREQ); 487 483 } 488 484 489 485 static struct psi_group *iterate_groups(struct task_struct *task, void **iter) ··· 511 513 { 512 514 int cpu = task_cpu(task); 513 515 struct psi_group *group; 516 + bool wake_clock = true; 514 517 void *iter = NULL; 515 518 516 519 if (!task->pid) ··· 529 530 task->psi_flags &= ~clear; 530 531 task->psi_flags |= set; 531 532 532 - while ((group = iterate_groups(task, &iter))) 533 + /* 534 + * Periodic aggregation shuts off if there is a period of no 535 + * task changes, so we wake it back up if necessary. However, 536 + * don't do this if the task change is the aggregation worker 537 + * itself going to sleep, or we'll ping-pong forever. 538 + */ 539 + if (unlikely((clear & TSK_RUNNING) && 540 + (task->flags & PF_WQ_WORKER) && 541 + wq_worker_last_func(task) == psi_update_work)) 542 + wake_clock = false; 543 + 544 + while ((group = iterate_groups(task, &iter))) { 533 545 psi_group_change(group, cpu, clear, set); 546 + if (wake_clock && !delayed_work_pending(&group->clock_work)) 547 + schedule_delayed_work(&group->clock_work, PSI_FREQ); 548 + } 534 549 } 535 550 536 551 void psi_memstall_tick(struct task_struct *task, int cpu)
+59 -4
kernel/signal.c
··· 688 688 } 689 689 EXPORT_SYMBOL_GPL(dequeue_signal); 690 690 691 + static int dequeue_synchronous_signal(kernel_siginfo_t *info) 692 + { 693 + struct task_struct *tsk = current; 694 + struct sigpending *pending = &tsk->pending; 695 + struct sigqueue *q, *sync = NULL; 696 + 697 + /* 698 + * Might a synchronous signal be in the queue? 699 + */ 700 + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 701 + return 0; 702 + 703 + /* 704 + * Return the first synchronous signal in the queue. 705 + */ 706 + list_for_each_entry(q, &pending->list, list) { 707 + /* Synchronous signals have a postive si_code */ 708 + if ((q->info.si_code > SI_USER) && 709 + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 710 + sync = q; 711 + goto next; 712 + } 713 + } 714 + return 0; 715 + next: 716 + /* 717 + * Check if there is another siginfo for the same signal. 718 + */ 719 + list_for_each_entry_continue(q, &pending->list, list) { 720 + if (q->info.si_signo == sync->info.si_signo) 721 + goto still_pending; 722 + } 723 + 724 + sigdelset(&pending->signal, sync->info.si_signo); 725 + recalc_sigpending(); 726 + still_pending: 727 + list_del_init(&sync->list); 728 + copy_siginfo(info, &sync->info); 729 + __sigqueue_free(sync); 730 + return info->si_signo; 731 + } 732 + 691 733 /* 692 734 * Tell a process that it has a new active signal.. 693 735 * ··· 1099 1057 1100 1058 result = TRACE_SIGNAL_DELIVERED; 1101 1059 /* 1102 - * Skip useless siginfo allocation for SIGKILL SIGSTOP, 1103 - * and kernel threads. 1060 + * Skip useless siginfo allocation for SIGKILL and kernel threads. 1104 1061 */ 1105 - if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) 1062 + if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1106 1063 goto out_set; 1107 1064 1108 1065 /* ··· 2435 2394 goto relock; 2436 2395 } 2437 2396 2397 + /* Has this task already been marked for death? */ 2398 + ksig->info.si_signo = signr = SIGKILL; 2399 + if (signal_group_exit(signal)) 2400 + goto fatal; 2401 + 2438 2402 for (;;) { 2439 2403 struct k_sigaction *ka; 2440 2404 ··· 2453 2407 goto relock; 2454 2408 } 2455 2409 2456 - signr = dequeue_signal(current, &current->blocked, &ksig->info); 2410 + /* 2411 + * Signals generated by the execution of an instruction 2412 + * need to be delivered before any other pending signals 2413 + * so that the instruction pointer in the signal stack 2414 + * frame points to the faulting instruction. 2415 + */ 2416 + signr = dequeue_synchronous_signal(&ksig->info); 2417 + if (!signr) 2418 + signr = dequeue_signal(current, &current->blocked, &ksig->info); 2457 2419 2458 2420 if (!signr) 2459 2421 break; /* will return 0 */ ··· 2543 2489 continue; 2544 2490 } 2545 2491 2492 + fatal: 2546 2493 spin_unlock_irq(&sighand->siglock); 2547 2494 2548 2495 /*
-2
kernel/smp.c
··· 584 584 num_nodes, (num_nodes > 1 ? "s" : ""), 585 585 num_cpus, (num_cpus > 1 ? "s" : "")); 586 586 587 - /* Final decision about SMT support */ 588 - cpu_smt_check_topology(); 589 587 /* Any cleanup work */ 590 588 smp_cpus_done(setup_max_cpus); 591 589 }
+2 -12
kernel/trace/bpf_trace.c
··· 1204 1204 1205 1205 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1206 1206 { 1207 - int err; 1208 - 1209 - mutex_lock(&bpf_event_mutex); 1210 - err = __bpf_probe_register(btp, prog); 1211 - mutex_unlock(&bpf_event_mutex); 1212 - return err; 1207 + return __bpf_probe_register(btp, prog); 1213 1208 } 1214 1209 1215 1210 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1216 1211 { 1217 - int err; 1218 - 1219 - mutex_lock(&bpf_event_mutex); 1220 - err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 1221 - mutex_unlock(&bpf_event_mutex); 1222 - return err; 1212 + return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 1223 1213 } 1224 1214 1225 1215 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+8 -1
kernel/trace/trace_uprobe.c
··· 5 5 * Copyright (C) IBM Corporation, 2010-2012 6 6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 7 7 */ 8 - #define pr_fmt(fmt) "trace_kprobe: " fmt 8 + #define pr_fmt(fmt) "trace_uprobe: " fmt 9 9 10 10 #include <linux/ctype.h> 11 11 #include <linux/module.h> ··· 160 160 if (ret >= 0) { 161 161 if (ret == maxlen) 162 162 dst[ret - 1] = '\0'; 163 + else 164 + /* 165 + * Include the terminating null byte. In this case it 166 + * was copied by strncpy_from_user but not accounted 167 + * for in ret. 168 + */ 169 + ret++; 163 170 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 164 171 } 165 172
+23
kernel/workqueue.c
··· 910 910 } 911 911 912 912 /** 913 + * wq_worker_last_func - retrieve worker's last work function 914 + * 915 + * Determine the last function a worker executed. This is called from 916 + * the scheduler to get a worker's last known identity. 917 + * 918 + * CONTEXT: 919 + * spin_lock_irq(rq->lock) 920 + * 921 + * Return: 922 + * The last work function %current executed as a worker, NULL if it 923 + * hasn't executed any work yet. 924 + */ 925 + work_func_t wq_worker_last_func(struct task_struct *task) 926 + { 927 + struct worker *worker = kthread_data(task); 928 + 929 + return worker->last_func; 930 + } 931 + 932 + /** 913 933 * worker_set_flags - set worker flags and adjust nr_running accordingly 914 934 * @worker: self 915 935 * @flags: flags to set ··· 2203 2183 /* clear cpu intensive status */ 2204 2184 if (unlikely(cpu_intensive)) 2205 2185 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2186 + 2187 + /* tag the worker for identification in schedule() */ 2188 + worker->last_func = worker->current_func; 2206 2189 2207 2190 /* we're done with it, release */ 2208 2191 hash_del(&worker->hentry);
+5 -1
kernel/workqueue_internal.h
··· 53 53 54 54 /* used only by rescuers to point to the target workqueue */ 55 55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 56 + 57 + /* used by the scheduler to determine a worker's last known identity */ 58 + work_func_t last_func; 56 59 }; 57 60 58 61 /** ··· 70 67 71 68 /* 72 69 * Scheduler hooks for concurrency managed workqueue. Only to be used from 73 - * sched/core.c and workqueue.c. 70 + * sched/ and workqueue.c. 74 71 */ 75 72 void wq_worker_waking_up(struct task_struct *task, int cpu); 76 73 struct task_struct *wq_worker_sleeping(struct task_struct *task); 74 + work_func_t wq_worker_last_func(struct task_struct *task); 77 75 78 76 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
+1 -1
lib/test_kmod.c
··· 632 632 config->test_driver = NULL; 633 633 634 634 kfree_const(config->test_fs); 635 - config->test_driver = NULL; 635 + config->test_fs = NULL; 636 636 } 637 637 638 638 static void kmod_config_free(struct kmod_test_device *test_dev)
+15 -8
lib/test_rhashtable.c
··· 541 541 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, 542 542 int cnt, bool slow) 543 543 { 544 - struct rhltable rhlt; 544 + struct rhltable *rhlt; 545 545 unsigned int i, ret; 546 546 const char *key; 547 547 int err = 0; 548 548 549 - err = rhltable_init(&rhlt, &test_rht_params_dup); 550 - if (WARN_ON(err)) 549 + rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); 550 + if (WARN_ON(!rhlt)) 551 + return -EINVAL; 552 + 553 + err = rhltable_init(rhlt, &test_rht_params_dup); 554 + if (WARN_ON(err)) { 555 + kfree(rhlt); 551 556 return err; 557 + } 552 558 553 559 for (i = 0; i < cnt; i++) { 554 560 rhl_test_objects[i].value.tid = i; 555 - key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); 561 + key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); 556 562 key += test_rht_params_dup.key_offset; 557 563 558 564 if (slow) { 559 - err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, 565 + err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, 560 566 &rhl_test_objects[i].list_node.rhead)); 561 567 if (err == -EAGAIN) 562 568 err = 0; 563 569 } else 564 - err = rhltable_insert(&rhlt, 570 + err = rhltable_insert(rhlt, 565 571 &rhl_test_objects[i].list_node, 566 572 test_rht_params_dup); 567 573 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) 568 574 goto skip_print; 569 575 } 570 576 571 - ret = print_ht(&rhlt); 577 + ret = print_ht(rhlt); 572 578 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); 573 579 574 580 skip_print: 575 - rhltable_destroy(&rhlt); 581 + rhltable_destroy(rhlt); 582 + kfree(rhlt); 576 583 577 584 return 0; 578 585 }
+2 -1
mm/hugetlb.c
··· 4268 4268 break; 4269 4269 } 4270 4270 if (ret & VM_FAULT_RETRY) { 4271 - if (nonblocking) 4271 + if (nonblocking && 4272 + !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 4272 4273 *nonblocking = 0; 4273 4274 *nr_pages = 0; 4274 4275 /*
+1
mm/kasan/Makefile
··· 5 5 UBSAN_SANITIZE_tags.o := n 6 6 KCOV_INSTRUMENT := n 7 7 8 + CFLAGS_REMOVE_common.o = -pg 8 9 CFLAGS_REMOVE_generic.o = -pg 9 10 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 10 11 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
+2 -1
mm/memory-failure.c
··· 372 372 if (fail || tk->addr_valid == 0) { 373 373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 374 374 pfn, tk->tsk->comm, tk->tsk->pid); 375 - force_sig(SIGKILL, tk->tsk); 375 + do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 376 + tk->tsk, PIDTYPE_PID); 376 377 } 377 378 378 379 /*
+27 -35
mm/memory_hotplug.c
··· 1233 1233 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1234 1234 { 1235 1235 struct page *page = pfn_to_page(start_pfn); 1236 - struct page *end_page = page + nr_pages; 1236 + unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); 1237 + struct page *end_page = pfn_to_page(end_pfn); 1237 1238 1238 1239 /* Check the starting page of each pageblock within the range */ 1239 1240 for (; page < end_page; page = next_active_pageblock(page)) { ··· 1274 1273 i++; 1275 1274 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1276 1275 continue; 1276 + /* Check if we got outside of the zone */ 1277 + if (zone && !zone_spans_pfn(zone, pfn + i)) 1278 + return 0; 1277 1279 page = pfn_to_page(pfn + i); 1278 1280 if (zone && page_zone(page) != zone) 1279 1281 return 0; ··· 1305 1301 static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1306 1302 { 1307 1303 unsigned long pfn; 1308 - struct page *page; 1304 + 1309 1305 for (pfn = start; pfn < end; pfn++) { 1310 - if (pfn_valid(pfn)) { 1311 - page = pfn_to_page(pfn); 1312 - if (PageLRU(page)) 1313 - return pfn; 1314 - if (__PageMovable(page)) 1315 - return pfn; 1316 - if (PageHuge(page)) { 1317 - if (hugepage_migration_supported(page_hstate(page)) && 1318 - page_huge_active(page)) 1319 - return pfn; 1320 - else 1321 - pfn = round_up(pfn + 1, 1322 - 1 << compound_order(page)) - 1; 1323 - } 1324 - } 1306 + struct page *page, *head; 1307 + unsigned long skip; 1308 + 1309 + if (!pfn_valid(pfn)) 1310 + continue; 1311 + page = pfn_to_page(pfn); 1312 + if (PageLRU(page)) 1313 + return pfn; 1314 + if (__PageMovable(page)) 1315 + return pfn; 1316 + 1317 + if (!PageHuge(page)) 1318 + continue; 1319 + head = compound_head(page); 1320 + if (hugepage_migration_supported(page_hstate(head)) && 1321 + page_huge_active(head)) 1322 + return pfn; 1323 + skip = (1 << compound_order(head)) - (page - head); 1324 + pfn += skip - 1; 1325 1325 } 1326 1326 return 0; 1327 1327 } ··· 1352 1344 { 1353 1345 unsigned long pfn; 1354 1346 struct page *page; 1355 - int not_managed = 0; 1356 1347 int ret = 0; 1357 1348 LIST_HEAD(source); 1358 1349 ··· 1399 1392 else 1400 1393 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1401 1394 if (!ret) { /* Success */ 1402 - put_page(page); 1403 1395 list_add_tail(&page->lru, &source); 1404 1396 if (!__PageMovable(page)) 1405 1397 inc_node_page_state(page, NR_ISOLATED_ANON + ··· 1407 1401 } else { 1408 1402 pr_warn("failed to isolate pfn %lx\n", pfn); 1409 1403 dump_page(page, "isolation failed"); 1410 - put_page(page); 1411 - /* Because we don't have big zone->lock. we should 1412 - check this again here. */ 1413 - if (page_count(page)) { 1414 - not_managed++; 1415 - ret = -EBUSY; 1416 - break; 1417 - } 1418 1404 } 1405 + put_page(page); 1419 1406 } 1420 1407 if (!list_empty(&source)) { 1421 - if (not_managed) { 1422 - putback_movable_pages(&source); 1423 - goto out; 1424 - } 1425 - 1426 1408 /* Allocate a new page from the nearest neighbor node */ 1427 1409 ret = migrate_pages(&source, new_node_page, NULL, 0, 1428 1410 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); ··· 1423 1429 putback_movable_pages(&source); 1424 1430 } 1425 1431 } 1426 - out: 1432 + 1427 1433 return ret; 1428 1434 } 1429 1435 ··· 1570 1576 we assume this for now. .*/ 1571 1577 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1572 1578 &valid_end)) { 1573 - mem_hotplug_done(); 1574 1579 ret = -EINVAL; 1575 1580 reason = "multizone range"; 1576 1581 goto failed_removal; ··· 1584 1591 MIGRATE_MOVABLE, 1585 1592 SKIP_HWPOISON | REPORT_FAILURE); 1586 1593 if (ret) { 1587 - mem_hotplug_done(); 1588 1594 reason = "failure to isolate range"; 1589 1595 goto failed_removal; 1590 1596 }
+5 -7
mm/migrate.c
··· 709 709 /* Simple case, sync compaction */ 710 710 if (mode != MIGRATE_ASYNC) { 711 711 do { 712 - get_bh(bh); 713 712 lock_buffer(bh); 714 713 bh = bh->b_this_page; 715 714 ··· 719 720 720 721 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 721 722 do { 722 - get_bh(bh); 723 723 if (!trylock_buffer(bh)) { 724 724 /* 725 725 * We failed to lock the buffer and cannot stall in 726 726 * async migration. Release the taken locks 727 727 */ 728 728 struct buffer_head *failed_bh = bh; 729 - put_bh(failed_bh); 730 729 bh = head; 731 730 while (bh != failed_bh) { 732 731 unlock_buffer(bh); 733 - put_bh(bh); 734 732 bh = bh->b_this_page; 735 733 } 736 734 return false; ··· 814 818 bh = head; 815 819 do { 816 820 unlock_buffer(bh); 817 - put_bh(bh); 818 821 bh = bh->b_this_page; 819 822 820 823 } while (bh != head); ··· 1130 1135 * If migration is successful, decrease refcount of the newpage 1131 1136 * which will not free the page because new page owner increased 1132 1137 * refcounter. As well, if it is LRU page, add the page to LRU 1133 - * list in here. 1138 + * list in here. Use the old state of the isolated source page to 1139 + * determine if we migrated a LRU page. newpage was already unlocked 1140 + * and possibly modified by its owner - don't rely on the page 1141 + * state. 1134 1142 */ 1135 1143 if (rc == MIGRATEPAGE_SUCCESS) { 1136 - if (unlikely(__PageMovable(newpage))) 1144 + if (unlikely(!is_lru)) 1137 1145 put_page(newpage); 1138 1146 else 1139 1147 putback_lru_page(newpage);
+10 -2
mm/oom_kill.c
··· 647 647 648 648 static void wake_oom_reaper(struct task_struct *tsk) 649 649 { 650 - /* tsk is already queued? */ 651 - if (tsk == oom_reaper_list || tsk->oom_reaper_list) 650 + /* mm is already queued? */ 651 + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) 652 652 return; 653 653 654 654 get_task_struct(tsk); ··· 975 975 * still freeing memory. 976 976 */ 977 977 read_lock(&tasklist_lock); 978 + 979 + /* 980 + * The task 'p' might have already exited before reaching here. The 981 + * put_task_struct() will free task_struct 'p' while the loop still try 982 + * to access the field of 'p', so, get an extra reference. 983 + */ 984 + get_task_struct(p); 978 985 for_each_thread(p, t) { 979 986 list_for_each_entry(child, &t->children, sibling) { 980 987 unsigned int child_points; ··· 1001 994 } 1002 995 } 1003 996 } 997 + put_task_struct(p); 1004 998 read_unlock(&tasklist_lock); 1005 999 1006 1000 /*
-12
mm/page_alloc.c
··· 5701 5701 cond_resched(); 5702 5702 } 5703 5703 } 5704 - #ifdef CONFIG_SPARSEMEM 5705 - /* 5706 - * If the zone does not span the rest of the section then 5707 - * we should at least initialize those pages. Otherwise we 5708 - * could blow up on a poisoned page in some paths which depend 5709 - * on full sections being initialized (e.g. memory hotplug). 5710 - */ 5711 - while (end_pfn % PAGES_PER_SECTION) { 5712 - __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); 5713 - end_pfn++; 5714 - } 5715 - #endif 5716 5704 } 5717 5705 5718 5706 #ifdef CONFIG_ZONE_DEVICE
+3
net/batman-adv/bat_v_elp.c
··· 104 104 105 105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 106 106 107 + /* free the TID stats immediately */ 108 + cfg80211_sinfo_release_content(&sinfo); 109 + 107 110 dev_put(real_netdev); 108 111 if (ret == -ENOENT) { 109 112 /* Node is not associated anymore! It would be
+3 -2
net/batman-adv/hard-interface.c
··· 20 20 #include "main.h" 21 21 22 22 #include <linux/atomic.h> 23 - #include <linux/bug.h> 24 23 #include <linux/byteorder/generic.h> 25 24 #include <linux/errno.h> 26 25 #include <linux/gfp.h> ··· 178 179 parent_dev = __dev_get_by_index((struct net *)parent_net, 179 180 dev_get_iflink(net_dev)); 180 181 /* if we got a NULL parent_dev there is something broken.. */ 181 - if (WARN(!parent_dev, "Cannot find parent device")) 182 + if (!parent_dev) { 183 + pr_err("Cannot find parent device\n"); 182 184 return false; 185 + } 183 186 184 187 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) 185 188 return false;
+2
net/batman-adv/soft-interface.c
··· 221 221 222 222 netif_trans_update(soft_iface); 223 223 vid = batadv_get_vid(skb, 0); 224 + 225 + skb_reset_mac_header(skb); 224 226 ethhdr = eth_hdr(skb); 225 227 226 228 switch (ntohs(ethhdr->h_proto)) {
+6 -3
net/bridge/netfilter/ebtables.c
··· 2293 2293 2294 2294 xt_compat_lock(NFPROTO_BRIDGE); 2295 2295 2296 - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2297 - if (ret < 0) 2298 - goto out_unlock; 2296 + if (tmp.nentries) { 2297 + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2298 + if (ret < 0) 2299 + goto out_unlock; 2300 + } 2301 + 2299 2302 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2300 2303 if (ret < 0) 2301 2304 goto out_unlock;
+3
net/core/dev.c
··· 8712 8712 set_bit(__LINK_STATE_PRESENT, &dev->state); 8713 8713 set_bit(__LINK_STATE_START, &dev->state); 8714 8714 8715 + /* napi_busy_loop stats accounting wants this */ 8716 + dev_net_set(dev, &init_net); 8717 + 8715 8718 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8716 8719 * because users of this 'device' dont need to change 8717 8720 * its refcount.
+2
net/core/filter.c
··· 4112 4112 /* Only some socketops are supported */ 4113 4113 switch (optname) { 4114 4114 case SO_RCVBUF: 4115 + val = min_t(u32, val, sysctl_rmem_max); 4115 4116 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4116 4117 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 4117 4118 break; 4118 4119 case SO_SNDBUF: 4120 + val = min_t(u32, val, sysctl_wmem_max); 4119 4121 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4120 4122 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4121 4123 break;
+1 -2
net/core/skmsg.c
··· 545 545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 546 546 547 547 /* No sk_callback_lock since already detached. */ 548 - if (psock->parser.enabled) 549 - strp_done(&psock->parser.strp); 548 + strp_done(&psock->parser.strp); 550 549 551 550 cancel_work_sync(&psock->work); 552 551
+2 -2
net/dccp/ccid.h
··· 202 202 static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 203 203 u8 pkt, u8 opt, u8 *val, u8 len) 204 204 { 205 - if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) 205 + if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options) 206 206 return 0; 207 207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); 208 208 } ··· 214 214 static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, 215 215 u8 pkt, u8 opt, u8 *val, u8 len) 216 216 { 217 - if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) 217 + if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options) 218 218 return 0; 219 219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); 220 220 }
+1 -1
net/decnet/dn_dev.c
··· 56 56 #include <net/dn_neigh.h> 57 57 #include <net/dn_fib.h> 58 58 59 - #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) 59 + #define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn)) 60 60 61 61 static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; 62 62 static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
+4
net/dsa/master.c
··· 205 205 rtnl_unlock(); 206 206 } 207 207 208 + static struct lock_class_key dsa_master_addr_list_lock_key; 209 + 208 210 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 209 211 { 210 212 int ret; ··· 220 218 wmb(); 221 219 222 220 dev->dsa_ptr = cpu_dp; 221 + lockdep_set_class(&dev->addr_list_lock, 222 + &dsa_master_addr_list_lock_key); 223 223 224 224 ret = dsa_master_ethtool_setup(dev); 225 225 if (ret)
+10 -7
net/dsa/slave.c
··· 140 140 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 141 141 { 142 142 struct net_device *master = dsa_slave_to_master(dev); 143 - 144 - if (change & IFF_ALLMULTI) 145 - dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 146 - if (change & IFF_PROMISC) 147 - dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 143 + if (dev->flags & IFF_UP) { 144 + if (change & IFF_ALLMULTI) 145 + dev_set_allmulti(master, 146 + dev->flags & IFF_ALLMULTI ? 1 : -1); 147 + if (change & IFF_PROMISC) 148 + dev_set_promiscuity(master, 149 + dev->flags & IFF_PROMISC ? 1 : -1); 150 + } 148 151 } 149 152 150 153 static void dsa_slave_set_rx_mode(struct net_device *dev) ··· 642 639 int ret; 643 640 644 641 /* Port's PHY and MAC both need to be EEE capable */ 645 - if (!dev->phydev && !dp->pl) 642 + if (!dev->phydev || !dp->pl) 646 643 return -ENODEV; 647 644 648 645 if (!ds->ops->set_mac_eee) ··· 662 659 int ret; 663 660 664 661 /* Port's PHY and MAC both need to be EEE capable */ 665 - if (!dev->phydev && !dp->pl) 662 + if (!dev->phydev || !dp->pl) 666 663 return -ENODEV; 667 664 668 665 if (!ds->ops->get_mac_eee)
+6 -1
net/ipv4/ip_gre.c
··· 1455 1455 { 1456 1456 struct ip_tunnel *t = netdev_priv(dev); 1457 1457 struct ip_tunnel_parm *p = &t->parms; 1458 + __be16 o_flags = p->o_flags; 1459 + 1460 + if ((t->erspan_ver == 1 || t->erspan_ver == 2) && 1461 + !t->collect_md) 1462 + o_flags |= TUNNEL_KEY; 1458 1463 1459 1464 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1460 1465 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1461 1466 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1462 1467 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1463 - gre_tnl_flags_to_gre_flags(p->o_flags)) || 1468 + gre_tnl_flags_to_gre_flags(o_flags)) || 1464 1469 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1465 1470 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1466 1471 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+50
net/ipv4/ip_vti.c
··· 74 74 return 0; 75 75 } 76 76 77 + static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, 78 + int encap_type) 79 + { 80 + struct ip_tunnel *tunnel; 81 + const struct iphdr *iph = ip_hdr(skb); 82 + struct net *net = dev_net(skb->dev); 83 + struct ip_tunnel_net *itn = net_generic(net, vti_net_id); 84 + 85 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 86 + iph->saddr, iph->daddr, 0); 87 + if (tunnel) { 88 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 89 + goto drop; 90 + 91 + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; 92 + 93 + skb->dev = tunnel->dev; 94 + 95 + return xfrm_input(skb, nexthdr, spi, encap_type); 96 + } 97 + 98 + return -EINVAL; 99 + drop: 100 + kfree_skb(skb); 101 + return 0; 102 + } 103 + 77 104 static int vti_rcv(struct sk_buff *skb) 78 105 { 79 106 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 80 107 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 81 108 82 109 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); 110 + } 111 + 112 + static int vti_rcv_ipip(struct sk_buff *skb) 113 + { 114 + XFRM_SPI_SKB_CB(skb)->family = AF_INET; 115 + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 116 + 117 + return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); 83 118 } 84 119 85 120 static int vti_rcv_cb(struct sk_buff *skb, int err) ··· 470 435 .priority = 100, 471 436 }; 472 437 438 + static struct xfrm_tunnel ipip_handler __read_mostly = { 439 + .handler = vti_rcv_ipip, 440 + .err_handler = vti4_err, 441 + .priority = 0, 442 + }; 443 + 473 444 static int __net_init vti_init_net(struct net *net) 474 445 { 475 446 int err; ··· 644 603 if (err < 0) 645 604 goto xfrm_proto_comp_failed; 646 605 606 + msg = "ipip tunnel"; 607 + err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 608 + if (err < 0) { 609 + pr_info("%s: cant't register tunnel\n",__func__); 610 + goto xfrm_tunnel_failed; 611 + } 612 + 647 613 msg = "netlink interface"; 648 614 err = rtnl_link_register(&vti_link_ops); 649 615 if (err < 0) ··· 660 612 661 613 rtnl_link_failed: 662 614 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 615 + xfrm_tunnel_failed: 616 + xfrm4_tunnel_deregister(&ipip_handler, AF_INET); 663 617 xfrm_proto_comp_failed: 664 618 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 665 619 xfrm_proto_ah_failed:
+1 -1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 846 846 847 847 static void clusterip_net_exit(struct net *net) 848 848 { 849 + #ifdef CONFIG_PROC_FS 849 850 struct clusterip_net *cn = clusterip_pernet(net); 850 851 851 - #ifdef CONFIG_PROC_FS 852 852 mutex_lock(&cn->mutex); 853 853 proc_remove(cn->procdir); 854 854 cn->procdir = NULL;
+6 -1
net/ipv6/ip6_gre.c
··· 2098 2098 { 2099 2099 struct ip6_tnl *t = netdev_priv(dev); 2100 2100 struct __ip6_tnl_parm *p = &t->parms; 2101 + __be16 o_flags = p->o_flags; 2102 + 2103 + if ((p->erspan_ver == 1 || p->erspan_ver == 2) && 2104 + !p->collect_md) 2105 + o_flags |= TUNNEL_KEY; 2101 2106 2102 2107 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2103 2108 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2104 2109 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2105 2110 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2106 - gre_tnl_flags_to_gre_flags(p->o_flags)) || 2111 + gre_tnl_flags_to_gre_flags(o_flags)) || 2107 2112 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2108 2113 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2109 2114 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
+3 -4
net/ipv6/ip6mr.c
··· 1516 1516 continue; 1517 1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); 1518 1518 list_del_rcu(&c->list); 1519 + call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1520 + FIB_EVENT_ENTRY_DEL, 1521 + (struct mfc6_cache *)c, mrt->id); 1519 1522 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 1520 1523 mr_cache_put(c); 1521 1524 } ··· 1527 1524 spin_lock_bh(&mfc_unres_lock); 1528 1525 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1529 1526 list_del(&c->list); 1530 - call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1531 - FIB_EVENT_ENTRY_DEL, 1532 - (struct mfc6_cache *)c, 1533 - mrt->id); 1534 1527 mr6_netlink_event(mrt, (struct mfc6_cache *)c, 1535 1528 RTM_DELROUTE); 1536 1529 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
+3 -1
net/ipv6/netfilter.c
··· 23 23 struct sock *sk = sk_to_full_sk(skb->sk); 24 24 unsigned int hh_len; 25 25 struct dst_entry *dst; 26 + int strict = (ipv6_addr_type(&iph->daddr) & 27 + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 26 28 struct flowi6 fl6 = { 27 29 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : 28 - rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, 30 + strict ? skb_dst(skb)->dev->ifindex : 0, 29 31 .flowi6_mark = skb->mark, 30 32 .flowi6_uid = sock_net_uid(net, sk), 31 33 .daddr = iph->daddr,
+2
net/ipv6/seg6_iptunnel.c
··· 146 146 } else { 147 147 ip6_flow_hdr(hdr, 0, flowlabel); 148 148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); 149 + 150 + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 149 151 } 150 152 151 153 hdr->nexthdr = NEXTHDR_ROUTING;
+2 -1
net/ipv6/sit.c
··· 546 546 } 547 547 548 548 err = 0; 549 - if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) 549 + if (__in6_dev_get(skb->dev) && 550 + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) 550 551 goto out; 551 552 552 553 if (t->parms.iph.daddr == 0)
+6 -3
net/l2tp/l2tp_core.c
··· 83 83 #define L2TP_SLFLAG_S 0x40000000 84 84 #define L2TP_SL_SEQ_MASK 0x00ffffff 85 85 86 - #define L2TP_HDR_SIZE_SEQ 10 87 - #define L2TP_HDR_SIZE_NOSEQ 6 86 + #define L2TP_HDR_SIZE_MAX 14 88 87 89 88 /* Default trace flags */ 90 89 #define L2TP_DEFAULT_DEBUG_FLAGS 0 ··· 807 808 __skb_pull(skb, sizeof(struct udphdr)); 808 809 809 810 /* Short packet? */ 810 - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 811 + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { 811 812 l2tp_info(tunnel, L2TP_MSG_DATA, 812 813 "%s: recv short packet (len=%d)\n", 813 814 tunnel->name, skb->len); ··· 882 883 tunnel->name, tunnel_id, session_id); 883 884 goto error; 884 885 } 886 + 887 + if (tunnel->version == L2TP_HDR_VER_3 && 888 + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) 889 + goto error; 885 890 886 891 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); 887 892 l2tp_session_dec_refcount(session);
+20
net/l2tp/l2tp_core.h
··· 301 301 } 302 302 #endif 303 303 304 + static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, 305 + unsigned char **ptr, unsigned char **optr) 306 + { 307 + int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); 308 + 309 + if (opt_len > 0) { 310 + int off = *ptr - *optr; 311 + 312 + if (!pskb_may_pull(skb, off + opt_len)) 313 + return -1; 314 + 315 + if (skb->data != *optr) { 316 + *optr = skb->data; 317 + *ptr = skb->data + off; 318 + } 319 + } 320 + 321 + return 0; 322 + } 323 + 304 324 #define l2tp_printk(ptr, type, func, fmt, ...) \ 305 325 do { \ 306 326 if (((ptr)->debug) & (type)) \
+3
net/l2tp/l2tp_ip.c
··· 165 165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 166 166 } 167 167 168 + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) 169 + goto discard_sess; 170 + 168 171 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 169 172 l2tp_session_dec_refcount(session); 170 173
+3
net/l2tp/l2tp_ip6.c
··· 178 178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 179 179 } 180 180 181 + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) 182 + goto discard_sess; 183 + 181 184 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 182 185 l2tp_session_dec_refcount(session); 183 186
+9 -3
net/mac80211/tx.c
··· 1938 1938 int head_need, bool may_encrypt) 1939 1939 { 1940 1940 struct ieee80211_local *local = sdata->local; 1941 + struct ieee80211_hdr *hdr; 1942 + bool enc_tailroom; 1941 1943 int tail_need = 0; 1942 1944 1943 - if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { 1945 + hdr = (struct ieee80211_hdr *) skb->data; 1946 + enc_tailroom = may_encrypt && 1947 + (sdata->crypto_tx_tailroom_needed_cnt || 1948 + ieee80211_is_mgmt(hdr->frame_control)); 1949 + 1950 + if (enc_tailroom) { 1944 1951 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1945 1952 tail_need -= skb_tailroom(skb); 1946 1953 tail_need = max_t(int, tail_need, 0); ··· 1955 1948 1956 1949 if (skb_cloned(skb) && 1957 1950 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || 1958 - !skb_clone_writable(skb, ETH_HLEN) || 1959 - (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) 1951 + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) 1960 1952 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1961 1953 else if (head_need || tail_need) 1962 1954 I802_DEBUG_INC(local->tx_expand_skb_head);
+12
net/netfilter/ipvs/ip_vs_ctl.c
··· 2221 2221 u->udp_timeout); 2222 2222 2223 2223 #ifdef CONFIG_IP_VS_PROTO_TCP 2224 + if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) || 2225 + u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) { 2226 + return -EINVAL; 2227 + } 2228 + #endif 2229 + 2230 + #ifdef CONFIG_IP_VS_PROTO_UDP 2231 + if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ)) 2232 + return -EINVAL; 2233 + #endif 2234 + 2235 + #ifdef CONFIG_IP_VS_PROTO_TCP 2224 2236 if (u->tcp_timeout) { 2225 2237 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); 2226 2238 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
+16
net/netfilter/nf_conntrack_core.c
··· 1007 1007 } 1008 1008 1009 1009 if (nf_ct_key_equal(h, tuple, zone, net)) { 1010 + /* Tuple is taken already, so caller will need to find 1011 + * a new source port to use. 1012 + * 1013 + * Only exception: 1014 + * If the *original tuples* are identical, then both 1015 + * conntracks refer to the same flow. 1016 + * This is a rare situation, it can occur e.g. when 1017 + * more than one UDP packet is sent from same socket 1018 + * in different threads. 1019 + * 1020 + * Let nf_ct_resolve_clash() deal with this later. 1021 + */ 1022 + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 1023 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) 1024 + continue; 1025 + 1010 1026 NF_CT_STAT_INC_ATOMIC(net, found); 1011 1027 rcu_read_unlock(); 1012 1028 return 1;
+41 -44
net/netfilter/nf_tables_api.c
··· 116 116 kfree(trans); 117 117 } 118 118 119 + static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) 120 + { 121 + struct net *net = ctx->net; 122 + struct nft_trans *trans; 123 + 124 + if (!nft_set_is_anonymous(set)) 125 + return; 126 + 127 + list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { 128 + if (trans->msg_type == NFT_MSG_NEWSET && 129 + nft_trans_set(trans) == set) { 130 + nft_trans_set_bound(trans) = true; 131 + break; 132 + } 133 + } 134 + } 135 + 119 136 static int nf_tables_register_hook(struct net *net, 120 137 const struct nft_table *table, 121 138 struct nft_chain *chain) ··· 228 211 return err; 229 212 } 230 213 231 - /* either expr ops provide both activate/deactivate, or neither */ 232 - static bool nft_expr_check_ops(const struct nft_expr_ops *ops) 233 - { 234 - if (!ops) 235 - return true; 236 - 237 - if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) 238 - return false; 239 - 240 - return true; 241 - } 242 - 243 214 static void nft_rule_expr_activate(const struct nft_ctx *ctx, 244 215 struct nft_rule *rule) 245 216 { ··· 243 238 } 244 239 245 240 static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, 246 - struct nft_rule *rule) 241 + struct nft_rule *rule, 242 + enum nft_trans_phase phase) 247 243 { 248 244 struct nft_expr *expr; 249 245 250 246 expr = nft_expr_first(rule); 251 247 while (expr != nft_expr_last(rule) && expr->ops) { 252 248 if (expr->ops->deactivate) 253 - expr->ops->deactivate(ctx, expr); 249 + expr->ops->deactivate(ctx, expr, phase); 254 250 255 251 expr = nft_expr_next(expr); 256 252 } ··· 302 296 nft_trans_destroy(trans); 303 297 return err; 304 298 } 305 - nft_rule_expr_deactivate(ctx, rule); 299 + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); 306 300 307 301 return 0; 308 302 } ··· 1935 1929 */ 1936 1930 int nft_register_expr(struct nft_expr_type *type) 1937 1931 { 1938 - if (!nft_expr_check_ops(type->ops)) 1939 - return -EINVAL; 1940 - 1941 1932 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1942 1933 if (type->family == NFPROTO_UNSPEC) 1943 1934 list_add_tail_rcu(&type->list, &nf_tables_expressions); ··· 2080 2077 (const struct nlattr * const *)info->tb); 2081 2078 if (IS_ERR(ops)) { 2082 2079 err = PTR_ERR(ops); 2083 - goto err1; 2084 - } 2085 - if (!nft_expr_check_ops(ops)) { 2086 - err = -EINVAL; 2087 2080 goto err1; 2088 2081 } 2089 2082 } else ··· 2510 2511 static void nf_tables_rule_release(const struct nft_ctx *ctx, 2511 2512 struct nft_rule *rule) 2512 2513 { 2513 - nft_rule_expr_deactivate(ctx, rule); 2514 + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); 2514 2515 nf_tables_rule_destroy(ctx, rule); 2515 2516 } 2516 2517 ··· 3707 3708 bind: 3708 3709 binding->chain = ctx->chain; 3709 3710 list_add_tail_rcu(&binding->list, &set->bindings); 3711 + nft_set_trans_bind(ctx, set); 3712 + 3710 3713 return 0; 3711 3714 } 3712 3715 EXPORT_SYMBOL_GPL(nf_tables_bind_set); 3713 3716 3714 - void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, 3715 - struct nft_set_binding *binding) 3716 - { 3717 - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3718 - nft_is_active(ctx->net, set)) 3719 - list_add_tail_rcu(&set->list, &ctx->table->sets); 3720 - 3721 - list_add_tail_rcu(&binding->list, &set->bindings); 3722 - } 3723 - EXPORT_SYMBOL_GPL(nf_tables_rebind_set); 3724 - 3725 3717 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 3726 - struct nft_set_binding *binding) 3718 + struct nft_set_binding *binding, bool event) 3727 3719 { 3728 3720 list_del_rcu(&binding->list); 3729 3721 3730 - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3731 - nft_is_active(ctx->net, set)) 3722 + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { 3732 3723 list_del_rcu(&set->list); 3724 + if (event) 3725 + nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, 3726 + GFP_KERNEL); 3727 + } 3733 3728 } 3734 3729 EXPORT_SYMBOL_GPL(nf_tables_unbind_set); 3735 3730 3736 3731 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) 3737 3732 { 3738 - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3739 - nft_is_active(ctx->net, set)) { 3740 - nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 3733 + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) 3741 3734 nft_set_destroy(set); 3742 - } 3743 3735 } 3744 3736 EXPORT_SYMBOL_GPL(nf_tables_destroy_set); 3745 3737 ··· 6525 6535 nf_tables_rule_notify(&trans->ctx, 6526 6536 nft_trans_rule(trans), 6527 6537 NFT_MSG_DELRULE); 6538 + nft_rule_expr_deactivate(&trans->ctx, 6539 + nft_trans_rule(trans), 6540 + NFT_TRANS_COMMIT); 6528 6541 break; 6529 6542 case NFT_MSG_NEWSET: 6530 6543 nft_clear(net, nft_trans_set(trans)); ··· 6614 6621 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); 6615 6622 break; 6616 6623 case NFT_MSG_NEWSET: 6617 - nft_set_destroy(nft_trans_set(trans)); 6624 + if (!nft_trans_set_bound(trans)) 6625 + nft_set_destroy(nft_trans_set(trans)); 6618 6626 break; 6619 6627 case NFT_MSG_NEWSETELEM: 6620 6628 nft_set_elem_destroy(nft_trans_elem_set(trans), ··· 6676 6682 case NFT_MSG_NEWRULE: 6677 6683 trans->ctx.chain->use--; 6678 6684 list_del_rcu(&nft_trans_rule(trans)->list); 6679 - nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); 6685 + nft_rule_expr_deactivate(&trans->ctx, 6686 + nft_trans_rule(trans), 6687 + NFT_TRANS_ABORT); 6680 6688 break; 6681 6689 case NFT_MSG_DELRULE: 6682 6690 trans->ctx.chain->use++; ··· 6688 6692 break; 6689 6693 case NFT_MSG_NEWSET: 6690 6694 trans->ctx.table->use--; 6691 - list_del_rcu(&nft_trans_set(trans)->list); 6695 + if (!nft_trans_set_bound(trans)) 6696 + list_del_rcu(&nft_trans_set(trans)->list); 6692 6697 break; 6693 6698 case NFT_MSG_DELSET: 6694 6699 trans->ctx.table->use++;
+130 -47
net/netfilter/nft_compat.c
··· 22 22 #include <linux/netfilter_bridge/ebtables.h> 23 23 #include <linux/netfilter_arp/arp_tables.h> 24 24 #include <net/netfilter/nf_tables.h> 25 + #include <net/netns/generic.h> 25 26 26 27 struct nft_xt { 27 28 struct list_head head; 28 29 struct nft_expr_ops ops; 29 - unsigned int refcnt; 30 + refcount_t refcnt; 31 + 32 + /* used only when transaction mutex is locked */ 33 + unsigned int listcnt; 30 34 31 35 /* Unlike other expressions, ops doesn't have static storage duration. 32 36 * nft core assumes they do. We use kfree_rcu so that nft core can ··· 47 43 void *info; 48 44 }; 49 45 46 + struct nft_compat_net { 47 + struct list_head nft_target_list; 48 + struct list_head nft_match_list; 49 + }; 50 + 51 + static unsigned int nft_compat_net_id __read_mostly; 52 + static struct nft_expr_type nft_match_type; 53 + static struct nft_expr_type nft_target_type; 54 + 55 + static struct nft_compat_net *nft_compat_pernet(struct net *net) 56 + { 57 + return net_generic(net, nft_compat_net_id); 58 + } 59 + 60 + static void nft_xt_get(struct nft_xt *xt) 61 + { 62 + /* refcount_inc() warns on 0 -> 1 transition, but we can't 63 + * init the reference count to 1 in .select_ops -- we can't 64 + * undo such an increase when another expression inside the same 65 + * rule fails afterwards. 66 + */ 67 + if (xt->listcnt == 0) 68 + refcount_set(&xt->refcnt, 1); 69 + else 70 + refcount_inc(&xt->refcnt); 71 + 72 + xt->listcnt++; 73 + } 74 + 50 75 static bool nft_xt_put(struct nft_xt *xt) 51 76 { 52 - if (--xt->refcnt == 0) { 53 - list_del(&xt->head); 77 + if (refcount_dec_and_test(&xt->refcnt)) { 78 + WARN_ON_ONCE(!list_empty(&xt->head)); 54 79 kfree_rcu(xt, rcu_head); 55 80 return true; 56 81 } ··· 306 273 return -EINVAL; 307 274 308 275 nft_xt = container_of(expr->ops, struct nft_xt, ops); 309 - nft_xt->refcnt++; 276 + nft_xt_get(nft_xt); 310 277 return 0; 311 278 } 312 279 ··· 519 486 return ret; 520 487 521 488 nft_xt = container_of(expr->ops, struct nft_xt, ops); 522 - nft_xt->refcnt++; 489 + nft_xt_get(nft_xt); 523 490 return 0; 524 491 } 525 492 ··· 571 538 nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 572 539 { 573 540 __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); 541 + } 542 + 543 + static void nft_compat_deactivate(const struct nft_ctx *ctx, 544 + const struct nft_expr *expr, 545 + enum nft_trans_phase phase) 546 + { 547 + struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); 548 + 549 + if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { 550 + if (--xt->listcnt == 0) 551 + list_del_init(&xt->head); 552 + } 574 553 } 575 554 576 555 static void ··· 779 734 .cb = nfnl_nft_compat_cb, 780 735 }; 781 736 782 - static LIST_HEAD(nft_match_list); 783 - 784 - static struct nft_expr_type nft_match_type; 785 - 786 737 static bool nft_match_cmp(const struct xt_match *match, 787 738 const char *name, u32 rev, u32 family) 788 739 { ··· 790 749 nft_match_select_ops(const struct nft_ctx *ctx, 791 750 const struct nlattr * const tb[]) 792 751 { 752 + struct nft_compat_net *cn; 793 753 struct nft_xt *nft_match; 794 754 struct xt_match *match; 795 755 unsigned int matchsize; ··· 807 765 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); 808 766 family = ctx->family; 809 767 768 + cn = nft_compat_pernet(ctx->net); 769 + 810 770 /* Re-use the existing match if it's already loaded. */ 811 - list_for_each_entry(nft_match, &nft_match_list, head) { 771 + list_for_each_entry(nft_match, &cn->nft_match_list, head) { 812 772 struct xt_match *match = nft_match->ops.data; 813 773 814 774 if (nft_match_cmp(match, mt_name, rev, family)) ··· 833 789 goto err; 834 790 } 835 791 836 - nft_match->refcnt = 0; 792 + refcount_set(&nft_match->refcnt, 0); 837 793 nft_match->ops.type = &nft_match_type; 838 794 nft_match->ops.eval = nft_match_eval; 839 795 nft_match->ops.init = nft_match_init; 840 796 nft_match->ops.destroy = nft_match_destroy; 797 + nft_match->ops.deactivate = nft_compat_deactivate; 841 798 nft_match->ops.dump = nft_match_dump; 842 799 nft_match->ops.validate = nft_match_validate; 843 800 nft_match->ops.data = match; ··· 855 810 856 811 nft_match->ops.size = matchsize; 857 812 858 - list_add(&nft_match->head, &nft_match_list); 813 + nft_match->listcnt = 0; 814 + list_add(&nft_match->head, &cn->nft_match_list); 859 815 860 816 return &nft_match->ops; 861 817 err: ··· 872 826 .owner = THIS_MODULE, 873 827 }; 874 828 875 - static LIST_HEAD(nft_target_list); 876 - 877 - static struct nft_expr_type nft_target_type; 878 - 879 829 static bool nft_target_cmp(const struct xt_target *tg, 880 830 const char *name, u32 rev, u32 family) 881 831 { ··· 883 841 nft_target_select_ops(const struct nft_ctx *ctx, 884 842 const struct nlattr * const tb[]) 885 843 { 844 + struct nft_compat_net *cn; 886 845 struct nft_xt *nft_target; 887 846 struct xt_target *target; 888 847 char *tg_name; ··· 904 861 strcmp(tg_name, "standard") == 0) 905 862 return ERR_PTR(-EINVAL); 906 863 864 + cn = nft_compat_pernet(ctx->net); 907 865 /* Re-use the existing target if it's already loaded. */ 908 - list_for_each_entry(nft_target, &nft_target_list, head) { 866 + list_for_each_entry(nft_target, &cn->nft_target_list, head) { 909 867 struct xt_target *target = nft_target->ops.data; 910 868 911 869 if (!target->target) ··· 937 893 goto err; 938 894 } 939 895 940 - nft_target->refcnt = 0; 896 + refcount_set(&nft_target->refcnt, 0); 941 897 nft_target->ops.type = &nft_target_type; 942 898 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 943 899 nft_target->ops.init = nft_target_init; 944 900 nft_target->ops.destroy = nft_target_destroy; 901 + nft_target->ops.deactivate = nft_compat_deactivate; 945 902 nft_target->ops.dump = nft_target_dump; 946 903 nft_target->ops.validate = nft_target_validate; 947 904 nft_target->ops.data = target; ··· 952 907 else 953 908 nft_target->ops.eval = nft_target_eval_xt; 954 909 955 - list_add(&nft_target->head, &nft_target_list); 910 + nft_target->listcnt = 0; 911 + list_add(&nft_target->head, &cn->nft_target_list); 956 912 957 913 return &nft_target->ops; 958 914 err: ··· 969 923 .owner = THIS_MODULE, 970 924 }; 971 925 926 + static int __net_init nft_compat_init_net(struct net *net) 927 + { 928 + struct nft_compat_net *cn = nft_compat_pernet(net); 929 + 930 + INIT_LIST_HEAD(&cn->nft_target_list); 931 + INIT_LIST_HEAD(&cn->nft_match_list); 932 + 933 + return 0; 934 + } 935 + 936 + static void __net_exit nft_compat_exit_net(struct net *net) 937 + { 938 + struct nft_compat_net *cn = nft_compat_pernet(net); 939 + struct nft_xt *xt, *next; 940 + 941 + if (list_empty(&cn->nft_match_list) && 942 + list_empty(&cn->nft_target_list)) 943 + return; 944 + 945 + /* If there was an error that caused nft_xt expr to not be initialized 946 + * fully and noone else requested the same expression later, the lists 947 + * contain 0-refcount entries that still hold module reference. 948 + * 949 + * Clean them here. 950 + */ 951 + mutex_lock(&net->nft.commit_mutex); 952 + list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) { 953 + struct xt_target *target = xt->ops.data; 954 + 955 + list_del_init(&xt->head); 956 + 957 + if (refcount_read(&xt->refcnt)) 958 + continue; 959 + module_put(target->me); 960 + kfree(xt); 961 + } 962 + 963 + list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) { 964 + struct xt_match *match = xt->ops.data; 965 + 966 + list_del_init(&xt->head); 967 + 968 + if (refcount_read(&xt->refcnt)) 969 + continue; 970 + module_put(match->me); 971 + kfree(xt); 972 + } 973 + mutex_unlock(&net->nft.commit_mutex); 974 + } 975 + 976 + static struct pernet_operations nft_compat_net_ops = { 977 + .init = nft_compat_init_net, 978 + .exit = nft_compat_exit_net, 979 + .id = &nft_compat_net_id, 980 + .size = sizeof(struct nft_compat_net), 981 + }; 982 + 972 983 static int __init nft_compat_module_init(void) 973 984 { 974 985 int ret; 975 986 987 + ret = register_pernet_subsys(&nft_compat_net_ops); 988 + if (ret < 0) 989 + goto err_target; 990 + 976 991 ret = nft_register_expr(&nft_match_type); 977 992 if (ret < 0) 978 - return ret; 993 + goto err_pernet; 979 994 980 995 ret = nft_register_expr(&nft_target_type); 981 996 if (ret < 0) ··· 1049 942 } 1050 943 1051 944 return ret; 1052 - 1053 945 err_target: 1054 946 nft_unregister_expr(&nft_target_type); 1055 947 err_match: 1056 948 nft_unregister_expr(&nft_match_type); 949 + err_pernet: 950 + unregister_pernet_subsys(&nft_compat_net_ops); 1057 951 return ret; 1058 952 } 1059 953 1060 954 static void __exit nft_compat_module_exit(void) 1061 955 { 1062 - struct nft_xt *xt, *next; 1063 - 1064 - /* list should be empty here, it can be non-empty only in case there 1065 - * was an error that caused nft_xt expr to not be initialized fully 1066 - * and noone else requested the same expression later. 1067 - * 1068 - * In this case, the lists contain 0-refcount entries that still 1069 - * hold module reference. 1070 - */ 1071 - list_for_each_entry_safe(xt, next, &nft_target_list, head) { 1072 - struct xt_target *target = xt->ops.data; 1073 - 1074 - if (WARN_ON_ONCE(xt->refcnt)) 1075 - continue; 1076 - module_put(target->me); 1077 - kfree(xt); 1078 - } 1079 - 1080 - list_for_each_entry_safe(xt, next, &nft_match_list, head) { 1081 - struct xt_match *match = xt->ops.data; 1082 - 1083 - if (WARN_ON_ONCE(xt->refcnt)) 1084 - continue; 1085 - module_put(match->me); 1086 - kfree(xt); 1087 - } 1088 956 nfnetlink_subsys_unregister(&nfnl_compat_subsys); 1089 957 nft_unregister_expr(&nft_target_type); 1090 958 nft_unregister_expr(&nft_match_type); 959 + unregister_pernet_subsys(&nft_compat_net_ops); 1091 960 } 1092 961 1093 962 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+7 -11
net/netfilter/nft_dynset.c
··· 235 235 return err; 236 236 } 237 237 238 - static void nft_dynset_activate(const struct nft_ctx *ctx, 239 - const struct nft_expr *expr) 240 - { 241 - struct nft_dynset *priv = nft_expr_priv(expr); 242 - 243 - nf_tables_rebind_set(ctx, priv->set, &priv->binding); 244 - } 245 - 246 238 static void nft_dynset_deactivate(const struct nft_ctx *ctx, 247 - const struct nft_expr *expr) 239 + const struct nft_expr *expr, 240 + enum nft_trans_phase phase) 248 241 { 249 242 struct nft_dynset *priv = nft_expr_priv(expr); 250 243 251 - nf_tables_unbind_set(ctx, priv->set, &priv->binding); 244 + if (phase == NFT_TRANS_PREPARE) 245 + return; 246 + 247 + nf_tables_unbind_set(ctx, priv->set, &priv->binding, 248 + phase == NFT_TRANS_COMMIT); 252 249 } 253 250 254 251 static void nft_dynset_destroy(const struct nft_ctx *ctx, ··· 293 296 .eval = nft_dynset_eval, 294 297 .init = nft_dynset_init, 295 298 .destroy = nft_dynset_destroy, 296 - .activate = nft_dynset_activate, 297 299 .deactivate = nft_dynset_deactivate, 298 300 .dump = nft_dynset_dump, 299 301 };
+5 -1
net/netfilter/nft_immediate.c
··· 72 72 } 73 73 74 74 static void nft_immediate_deactivate(const struct nft_ctx *ctx, 75 - const struct nft_expr *expr) 75 + const struct nft_expr *expr, 76 + enum nft_trans_phase phase) 76 77 { 77 78 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 79 + 80 + if (phase == NFT_TRANS_COMMIT) 81 + return; 78 82 79 83 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); 80 84 }
+7 -11
net/netfilter/nft_lookup.c
··· 121 121 return 0; 122 122 } 123 123 124 - static void nft_lookup_activate(const struct nft_ctx *ctx, 125 - const struct nft_expr *expr) 126 - { 127 - struct nft_lookup *priv = nft_expr_priv(expr); 128 - 129 - nf_tables_rebind_set(ctx, priv->set, &priv->binding); 130 - } 131 - 132 124 static void nft_lookup_deactivate(const struct nft_ctx *ctx, 133 - const struct nft_expr *expr) 125 + const struct nft_expr *expr, 126 + enum nft_trans_phase phase) 134 127 { 135 128 struct nft_lookup *priv = nft_expr_priv(expr); 136 129 137 - nf_tables_unbind_set(ctx, priv->set, &priv->binding); 130 + if (phase == NFT_TRANS_PREPARE) 131 + return; 132 + 133 + nf_tables_unbind_set(ctx, priv->set, &priv->binding, 134 + phase == NFT_TRANS_COMMIT); 138 135 } 139 136 140 137 static void nft_lookup_destroy(const struct nft_ctx *ctx, ··· 222 225 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), 223 226 .eval = nft_lookup_eval, 224 227 .init = nft_lookup_init, 225 - .activate = nft_lookup_activate, 226 228 .deactivate = nft_lookup_deactivate, 227 229 .destroy = nft_lookup_destroy, 228 230 .dump = nft_lookup_dump,
+7 -11
net/netfilter/nft_objref.c
··· 155 155 return -1; 156 156 } 157 157 158 - static void nft_objref_map_activate(const struct nft_ctx *ctx, 159 - const struct nft_expr *expr) 160 - { 161 - struct nft_objref_map *priv = nft_expr_priv(expr); 162 - 163 - nf_tables_rebind_set(ctx, priv->set, &priv->binding); 164 - } 165 - 166 158 static void nft_objref_map_deactivate(const struct nft_ctx *ctx, 167 - const struct nft_expr *expr) 159 + const struct nft_expr *expr, 160 + enum nft_trans_phase phase) 168 161 { 169 162 struct nft_objref_map *priv = nft_expr_priv(expr); 170 163 171 - nf_tables_unbind_set(ctx, priv->set, &priv->binding); 164 + if (phase == NFT_TRANS_PREPARE) 165 + return; 166 + 167 + nf_tables_unbind_set(ctx, priv->set, &priv->binding, 168 + phase == NFT_TRANS_COMMIT); 172 169 } 173 170 174 171 static void nft_objref_map_destroy(const struct nft_ctx *ctx, ··· 182 185 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), 183 186 .eval = nft_objref_map_eval, 184 187 .init = nft_objref_map_init, 185 - .activate = nft_objref_map_activate, 186 188 .deactivate = nft_objref_map_deactivate, 187 189 .destroy = nft_objref_map_destroy, 188 190 .dump = nft_objref_map_dump,
+10 -10
net/netrom/nr_timer.c
··· 52 52 { 53 53 struct nr_sock *nr = nr_sk(sk); 54 54 55 - mod_timer(&nr->t1timer, jiffies + nr->t1); 55 + sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1); 56 56 } 57 57 58 58 void nr_start_t2timer(struct sock *sk) 59 59 { 60 60 struct nr_sock *nr = nr_sk(sk); 61 61 62 - mod_timer(&nr->t2timer, jiffies + nr->t2); 62 + sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2); 63 63 } 64 64 65 65 void nr_start_t4timer(struct sock *sk) 66 66 { 67 67 struct nr_sock *nr = nr_sk(sk); 68 68 69 - mod_timer(&nr->t4timer, jiffies + nr->t4); 69 + sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4); 70 70 } 71 71 72 72 void nr_start_idletimer(struct sock *sk) ··· 74 74 struct nr_sock *nr = nr_sk(sk); 75 75 76 76 if (nr->idle > 0) 77 - mod_timer(&nr->idletimer, jiffies + nr->idle); 77 + sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle); 78 78 } 79 79 80 80 void nr_start_heartbeat(struct sock *sk) 81 81 { 82 - mod_timer(&sk->sk_timer, jiffies + 5 * HZ); 82 + sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ); 83 83 } 84 84 85 85 void nr_stop_t1timer(struct sock *sk) 86 86 { 87 - del_timer(&nr_sk(sk)->t1timer); 87 + sk_stop_timer(sk, &nr_sk(sk)->t1timer); 88 88 } 89 89 90 90 void nr_stop_t2timer(struct sock *sk) 91 91 { 92 - del_timer(&nr_sk(sk)->t2timer); 92 + sk_stop_timer(sk, &nr_sk(sk)->t2timer); 93 93 } 94 94 95 95 void nr_stop_t4timer(struct sock *sk) 96 96 { 97 - del_timer(&nr_sk(sk)->t4timer); 97 + sk_stop_timer(sk, &nr_sk(sk)->t4timer); 98 98 } 99 99 100 100 void nr_stop_idletimer(struct sock *sk) 101 101 { 102 - del_timer(&nr_sk(sk)->idletimer); 102 + sk_stop_timer(sk, &nr_sk(sk)->idletimer); 103 103 } 104 104 105 105 void nr_stop_heartbeat(struct sock *sk) 106 106 { 107 - del_timer(&sk->sk_timer); 107 + sk_stop_timer(sk, &sk->sk_timer); 108 108 } 109 109 110 110 int nr_t1timer_running(struct sock *sk)
+3 -3
net/rds/bind.c
··· 78 78 __rds_create_bind_key(key, addr, port, scope_id); 79 79 rcu_read_lock(); 80 80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); 81 - if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 82 - rds_sock_addref(rs); 83 - else 81 + if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) || 82 + !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt))) 84 83 rs = NULL; 84 + 85 85 rcu_read_unlock(); 86 86 87 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
+5
net/rose/rose_route.c
··· 850 850 851 851 /* 852 852 * Route a frame to an appropriate AX.25 connection. 853 + * A NULL ax25_cb indicates an internally generated frame. 853 854 */ 854 855 int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) 855 856 { ··· 868 867 869 868 if (skb->len < ROSE_MIN_LEN) 870 869 return res; 870 + 871 + if (!ax25) 872 + return rose_loopback_queue(skb, NULL); 873 + 871 874 frametype = skb->data[2]; 872 875 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 873 876 if (frametype == ROSE_CALL_REQUEST &&
+2 -1
net/rxrpc/recvmsg.c
··· 596 596 } 597 597 error_no_call: 598 598 release_sock(&rx->sk); 599 + error_trace: 599 600 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 600 601 return ret; 601 602 ··· 605 604 wait_error: 606 605 finish_wait(sk_sleep(&rx->sk), &wait); 607 606 call = NULL; 608 - goto error_no_call; 607 + goto error_trace; 609 608 } 610 609 611 610 /**
+5 -1
net/sched/cls_flower.c
··· 1371 1371 if (!tc_skip_hw(fnew->flags)) { 1372 1372 err = fl_hw_replace_filter(tp, fnew, extack); 1373 1373 if (err) 1374 - goto errout_mask; 1374 + goto errout_mask_ht; 1375 1375 } 1376 1376 1377 1377 if (!tc_in_hw(fnew->flags)) ··· 1400 1400 kfree(tb); 1401 1401 kfree(mask); 1402 1402 return 0; 1403 + 1404 + errout_mask_ht: 1405 + rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 1406 + fnew->mask->filter_ht_params); 1403 1407 1404 1408 errout_mask: 1405 1409 fl_mask_put(head, fnew->mask, false);
+2 -2
net/sctp/socket.c
··· 2027 2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 2028 2028 struct sctp_transport *transport = NULL; 2029 2029 struct sctp_sndrcvinfo _sinfo, *sinfo; 2030 - struct sctp_association *asoc; 2030 + struct sctp_association *asoc, *tmp; 2031 2031 struct sctp_cmsgs cmsgs; 2032 2032 union sctp_addr *daddr; 2033 2033 bool new = false; ··· 2053 2053 2054 2054 /* SCTP_SENDALL process */ 2055 2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { 2056 - list_for_each_entry(asoc, &ep->asocs, asocs) { 2056 + list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) { 2057 2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg, 2058 2058 msg_len); 2059 2059 if (err == 0)
+20
net/sctp/stream.c
··· 84 84 } 85 85 } 86 86 87 + static size_t fa_index(struct flex_array *fa, void *elem, size_t count) 88 + { 89 + size_t index = 0; 90 + 91 + while (count--) { 92 + if (elem == flex_array_get(fa, index)) 93 + break; 94 + index++; 95 + } 96 + 97 + return index; 98 + } 99 + 87 100 /* Migrates chunks from stream queues to new stream queues if needed, 88 101 * but not across associations. Also, removes those chunks to streams 89 102 * higher than the new max. ··· 160 147 161 148 if (stream->out) { 162 149 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); 150 + if (stream->out_curr) { 151 + size_t index = fa_index(stream->out, stream->out_curr, 152 + stream->outcnt); 153 + 154 + BUG_ON(index == stream->outcnt); 155 + stream->out_curr = flex_array_get(out, index); 156 + } 163 157 fa_free(stream->out); 164 158 } 165 159
+10 -1
net/smc/af_smc.c
··· 1505 1505 1506 1506 smc = smc_sk(sk); 1507 1507 lock_sock(sk); 1508 + if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { 1509 + /* socket was connected before, no more data to read */ 1510 + rc = 0; 1511 + goto out; 1512 + } 1508 1513 if ((sk->sk_state == SMC_INIT) || 1509 1514 (sk->sk_state == SMC_LISTEN) || 1510 1515 (sk->sk_state == SMC_CLOSED)) ··· 1845 1840 1846 1841 smc = smc_sk(sk); 1847 1842 lock_sock(sk); 1848 - 1843 + if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { 1844 + /* socket was connected before, no more data to read */ 1845 + rc = 0; 1846 + goto out; 1847 + } 1849 1848 if (sk->sk_state == SMC_INIT || 1850 1849 sk->sk_state == SMC_LISTEN || 1851 1850 sk->sk_state == SMC_CLOSED)
+10 -11
net/smc/smc_cdc.c
··· 21 21 22 22 /********************************** send *************************************/ 23 23 24 - struct smc_cdc_tx_pend { 25 - struct smc_connection *conn; /* socket connection */ 26 - union smc_host_cursor cursor; /* tx sndbuf cursor sent */ 27 - union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ 28 - u16 ctrl_seq; /* conn. tx sequence # */ 29 - }; 30 - 31 24 /* handler for send/transmission completion of a CDC msg */ 32 25 static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, 33 26 struct smc_link *link, ··· 54 61 55 62 int smc_cdc_get_free_slot(struct smc_connection *conn, 56 63 struct smc_wr_buf **wr_buf, 64 + struct smc_rdma_wr **wr_rdma_buf, 57 65 struct smc_cdc_tx_pend **pend) 58 66 { 59 67 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; 60 68 int rc; 61 69 62 70 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, 71 + wr_rdma_buf, 63 72 (struct smc_wr_tx_pend_priv **)pend); 64 73 if (!conn->alert_token_local) 65 74 /* abnormal termination */ ··· 91 96 struct smc_wr_buf *wr_buf, 92 97 struct smc_cdc_tx_pend *pend) 93 98 { 99 + union smc_host_cursor cfed; 94 100 struct smc_link *link; 95 101 int rc; 96 102 ··· 103 107 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 104 108 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, 105 109 &conn->local_tx_ctrl, conn); 110 + smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn); 106 111 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 107 112 if (!rc) 108 - smc_curs_copy(&conn->rx_curs_confirmed, 109 - &conn->local_tx_ctrl.cons, conn); 113 + smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); 110 114 111 115 return rc; 112 116 } ··· 117 121 struct smc_wr_buf *wr_buf; 118 122 int rc; 119 123 120 - rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 124 + rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend); 121 125 if (rc) 122 126 return rc; 123 127 124 - return smc_cdc_msg_send(conn, wr_buf, pend); 128 + spin_lock_bh(&conn->send_lock); 129 + rc = smc_cdc_msg_send(conn, wr_buf, pend); 130 + spin_unlock_bh(&conn->send_lock); 131 + return rc; 125 132 } 126 133 127 134 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+32 -2
net/smc/smc_cdc.h
··· 160 160 #endif 161 161 } 162 162 163 - /* calculate cursor difference between old and new, where old <= new */ 163 + /* calculate cursor difference between old and new, where old <= new and 164 + * difference cannot exceed size 165 + */ 164 166 static inline int smc_curs_diff(unsigned int size, 165 167 union smc_host_cursor *old, 166 168 union smc_host_cursor *new) ··· 185 183 (old->wrap == new->wrap && old->count > new->count)) 186 184 return -smc_curs_diff(size, new, old); 187 185 return smc_curs_diff(size, old, new); 186 + } 187 + 188 + /* calculate cursor difference between old and new, where old <= new and 189 + * difference may exceed size 190 + */ 191 + static inline int smc_curs_diff_large(unsigned int size, 192 + union smc_host_cursor *old, 193 + union smc_host_cursor *new) 194 + { 195 + if (old->wrap < new->wrap) 196 + return min_t(int, 197 + (size - old->count) + new->count + 198 + (new->wrap - old->wrap - 1) * size, 199 + size); 200 + 201 + if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */ 202 + return min_t(int, 203 + (size - old->count) + new->count + 204 + (new->wrap + 0xffff - old->wrap) * size, 205 + size); 206 + 207 + return max_t(int, 0, (new->count - old->count)); 188 208 } 189 209 190 210 static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, ··· 294 270 smcr_cdc_msg_to_host(local, peer, conn); 295 271 } 296 272 297 - struct smc_cdc_tx_pend; 273 + struct smc_cdc_tx_pend { 274 + struct smc_connection *conn; /* socket connection */ 275 + union smc_host_cursor cursor; /* tx sndbuf cursor sent */ 276 + union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ 277 + u16 ctrl_seq; /* conn. tx sequence # */ 278 + }; 298 279 299 280 int smc_cdc_get_free_slot(struct smc_connection *conn, 300 281 struct smc_wr_buf **wr_buf, 282 + struct smc_rdma_wr **wr_rdma_buf, 301 283 struct smc_cdc_tx_pend **pend); 302 284 void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 303 285 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
+1 -1
net/smc/smc_clc.c
··· 378 378 vec.iov_len = sizeof(struct smc_clc_msg_decline); 379 379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, 380 380 sizeof(struct smc_clc_msg_decline)); 381 - if (len < sizeof(struct smc_clc_msg_decline)) 381 + if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) 382 382 len = -EPROTO; 383 383 return len > 0 ? 0 : len; 384 384 }
+1 -8
net/smc/smc_close.c
··· 345 345 346 346 switch (sk->sk_state) { 347 347 case SMC_INIT: 348 - if (atomic_read(&conn->bytes_to_rcv) || 349 - (rxflags->peer_done_writing && 350 - !smc_cdc_rxed_any_close(conn))) { 351 - sk->sk_state = SMC_APPCLOSEWAIT1; 352 - } else { 353 - sk->sk_state = SMC_CLOSED; 354 - sock_put(sk); /* passive closing */ 355 - } 348 + sk->sk_state = SMC_APPCLOSEWAIT1; 356 349 break; 357 350 case SMC_ACTIVE: 358 351 sk->sk_state = SMC_APPCLOSEWAIT1;
+5 -1
net/smc/smc_core.c
··· 128 128 { 129 129 struct smc_link_group *lgr = conn->lgr; 130 130 131 + if (!lgr) 132 + return; 131 133 write_lock_bh(&lgr->conns_lock); 132 134 if (conn->alert_token_local) { 133 135 __smc_lgr_unregister_conn(conn); ··· 302 300 conn->sndbuf_desc->used = 0; 303 301 if (conn->rmb_desc) { 304 302 if (!conn->rmb_desc->regerr) { 305 - conn->rmb_desc->used = 0; 306 303 if (!lgr->is_smcd) { 307 304 /* unregister rmb with peer */ 308 305 smc_llc_do_delete_rkey( 309 306 &lgr->lnk[SMC_SINGLE_LINK], 310 307 conn->rmb_desc); 311 308 } 309 + conn->rmb_desc->used = 0; 312 310 } else { 313 311 /* buf registration failed, reuse not possible */ 314 312 write_lock_bh(&lgr->rmbs_lock); ··· 630 628 local_contact = SMC_REUSE_CONTACT; 631 629 conn->lgr = lgr; 632 630 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 631 + if (delayed_work_pending(&lgr->free_work)) 632 + cancel_delayed_work(&lgr->free_work); 633 633 write_unlock_bh(&lgr->conns_lock); 634 634 break; 635 635 }
+20
net/smc/smc_core.h
··· 52 52 FAILED /* ib_wr_reg_mr response: failure */ 53 53 }; 54 54 55 + struct smc_rdma_sge { /* sges for RDMA writes */ 56 + struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; 57 + }; 58 + 59 + #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per 60 + * message send 61 + */ 62 + 63 + struct smc_rdma_sges { /* sges per message send */ 64 + struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; 65 + }; 66 + 67 + struct smc_rdma_wr { /* work requests per message 68 + * send 69 + */ 70 + struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; 71 + }; 72 + 55 73 struct smc_link { 56 74 struct smc_ib_device *smcibdev; /* ib-device */ 57 75 u8 ibport; /* port - values 1 | 2 */ ··· 82 64 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ 83 65 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ 84 66 struct ib_sge *wr_tx_sges; /* WR send gather meta data */ 67 + struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ 68 + struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ 85 69 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ 86 70 /* above four vectors have wr_tx_cnt elements and use the same index */ 87 71 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
+3 -3
net/smc/smc_ib.c
··· 289 289 290 290 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) 291 291 { 292 - struct smc_ib_device *smcibdev = 293 - (struct smc_ib_device *)ibevent->device; 292 + struct smc_link *lnk = (struct smc_link *)priv; 293 + struct smc_ib_device *smcibdev = lnk->smcibdev; 294 294 u8 port_idx; 295 295 296 296 switch (ibevent->event) { ··· 298 298 case IB_EVENT_GID_CHANGE: 299 299 case IB_EVENT_PORT_ERR: 300 300 case IB_EVENT_QP_ACCESS_ERR: 301 - port_idx = ibevent->element.port_num - 1; 301 + port_idx = ibevent->element.qp->port - 1; 302 302 set_bit(port_idx, &smcibdev->port_event_mask); 303 303 schedule_work(&smcibdev->port_event_work); 304 304 break;
+2 -1
net/smc/smc_llc.c
··· 166 166 { 167 167 int rc; 168 168 169 - rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); 169 + rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, 170 + pend); 170 171 if (rc < 0) 171 172 return rc; 172 173 BUILD_BUG_ON_MSG(
+1 -1
net/smc/smc_pnet.c
··· 27 27 static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { 28 28 [SMC_PNETID_NAME] = { 29 29 .type = NLA_NUL_STRING, 30 - .len = SMC_MAX_PNETID_LEN - 1 30 + .len = SMC_MAX_PNETID_LEN 31 31 }, 32 32 [SMC_PNETID_ETHNAME] = { 33 33 .type = NLA_NUL_STRING,
+31 -33
net/smc/smc_tx.c
··· 165 165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 166 166 167 167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 168 + if (send_done) 169 + return send_done; 168 170 rc = smc_tx_wait(smc, msg->msg_flags); 169 - if (rc) { 170 - if (send_done) 171 - return send_done; 171 + if (rc) 172 172 goto out_err; 173 - } 174 173 continue; 175 174 } 176 175 ··· 266 267 267 268 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 268 269 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 269 - int num_sges, struct ib_sge sges[]) 270 + int num_sges, struct ib_rdma_wr *rdma_wr) 270 271 { 271 272 struct smc_link_group *lgr = conn->lgr; 272 - struct ib_rdma_wr rdma_wr; 273 273 struct smc_link *link; 274 274 int rc; 275 275 276 - memset(&rdma_wr, 0, sizeof(rdma_wr)); 277 276 link = &lgr->lnk[SMC_SINGLE_LINK]; 278 - rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); 279 - rdma_wr.wr.sg_list = sges; 280 - rdma_wr.wr.num_sge = num_sges; 281 - rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; 282 - rdma_wr.remote_addr = 277 + rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); 278 + rdma_wr->wr.num_sge = num_sges; 279 + rdma_wr->remote_addr = 283 280 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 284 281 /* RMBE within RMB */ 285 282 conn->tx_off + 286 283 /* offset within RMBE */ 287 284 peer_rmbe_offset; 288 - rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 289 - rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); 285 + rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 286 + rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); 290 287 if (rc) { 291 288 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 292 289 smc_lgr_terminate(lgr); ··· 309 314 /* SMC-R helper for smc_tx_rdma_writes() */ 310 315 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 311 316 size_t src_off, size_t src_len, 312 - size_t dst_off, size_t dst_len) 317 + size_t dst_off, size_t dst_len, 318 + struct smc_rdma_wr *wr_rdma_buf) 313 319 { 314 320 dma_addr_t dma_addr = 315 321 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 316 - struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; 317 322 int src_len_sum = src_len, dst_len_sum = dst_len; 318 - struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; 319 323 int sent_count = src_off; 320 324 int srcchunk, dstchunk; 321 325 int num_sges; 322 326 int rc; 323 327 324 328 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 329 + struct ib_sge *sge = 330 + wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; 331 + 325 332 num_sges = 0; 326 333 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 327 - sges[srcchunk].addr = dma_addr + src_off; 328 - sges[srcchunk].length = src_len; 329 - sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; 334 + sge[srcchunk].addr = dma_addr + src_off; 335 + sge[srcchunk].length = src_len; 330 336 num_sges++; 331 337 332 338 src_off += src_len; ··· 340 344 src_len = dst_len - src_len; /* remainder */ 341 345 src_len_sum += src_len; 342 346 } 343 - rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); 347 + rc = smc_tx_rdma_write(conn, dst_off, num_sges, 348 + &wr_rdma_buf->wr_tx_rdma[dstchunk]); 344 349 if (rc) 345 350 return rc; 346 351 if (dst_len_sum == len) ··· 400 403 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 401 404 * usable snd_wnd as max transmit 402 405 */ 403 - static int smc_tx_rdma_writes(struct smc_connection *conn) 406 + static int smc_tx_rdma_writes(struct smc_connection *conn, 407 + struct smc_rdma_wr *wr_rdma_buf) 404 408 { 405 409 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 406 410 union smc_host_cursor sent, prep, prod, cons; ··· 462 464 dst_off, dst_len); 463 465 else 464 466 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 465 - dst_off, dst_len); 467 + dst_off, dst_len, wr_rdma_buf); 466 468 if (rc) 467 469 return rc; 468 470 ··· 483 485 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 484 486 { 485 487 struct smc_cdc_producer_flags *pflags; 488 + struct smc_rdma_wr *wr_rdma_buf; 486 489 struct smc_cdc_tx_pend *pend; 487 490 struct smc_wr_buf *wr_buf; 488 491 int rc; 489 492 490 - spin_lock_bh(&conn->send_lock); 491 - rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 493 + rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); 492 494 if (rc < 0) { 493 495 if (rc == -EBUSY) { 494 496 struct smc_sock *smc = 495 497 container_of(conn, struct smc_sock, conn); 496 498 497 - if (smc->sk.sk_err == ECONNABORTED) { 498 - rc = sock_error(&smc->sk); 499 - goto out_unlock; 500 - } 499 + if (smc->sk.sk_err == ECONNABORTED) 500 + return sock_error(&smc->sk); 501 501 rc = 0; 502 502 if (conn->alert_token_local) /* connection healthy */ 503 503 mod_delayed_work(system_wq, &conn->tx_work, 504 504 SMC_TX_WORK_DELAY); 505 505 } 506 - goto out_unlock; 506 + return rc; 507 507 } 508 508 509 + spin_lock_bh(&conn->send_lock); 509 510 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 510 - rc = smc_tx_rdma_writes(conn); 511 + rc = smc_tx_rdma_writes(conn, wr_rdma_buf); 511 512 if (rc) { 512 513 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 513 514 (struct smc_wr_tx_pend_priv *)pend); ··· 533 536 534 537 spin_lock_bh(&conn->send_lock); 535 538 if (!pflags->urg_data_present) 536 - rc = smc_tx_rdma_writes(conn); 539 + rc = smc_tx_rdma_writes(conn, NULL); 537 540 if (!rc) 538 541 rc = smcd_cdc_msg_send(conn); 539 542 ··· 595 598 if (to_confirm > conn->rmbe_update_limit) { 596 599 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 597 600 sender_free = conn->rmb_desc->len - 598 - smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); 601 + smc_curs_diff_large(conn->rmb_desc->len, 602 + &cfed, &prod); 599 603 } 600 604 601 605 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
+41 -5
net/smc/smc_wr.c
··· 160 160 * @link: Pointer to smc_link used to later send the message. 161 161 * @handler: Send completion handler function pointer. 162 162 * @wr_buf: Out value returns pointer to message buffer. 163 + * @wr_rdma_buf: Out value returns pointer to rdma work request. 163 164 * @wr_pend_priv: Out value returns pointer serving as handler context. 164 165 * 165 166 * Return: 0 on success, or -errno on error. ··· 168 167 int smc_wr_tx_get_free_slot(struct smc_link *link, 169 168 smc_wr_tx_handler handler, 170 169 struct smc_wr_buf **wr_buf, 170 + struct smc_rdma_wr **wr_rdma_buf, 171 171 struct smc_wr_tx_pend_priv **wr_pend_priv) 172 172 { 173 173 struct smc_wr_tx_pend *wr_pend; ··· 206 204 wr_ib = &link->wr_tx_ibs[idx]; 207 205 wr_ib->wr_id = wr_id; 208 206 *wr_buf = &link->wr_tx_bufs[idx]; 207 + if (wr_rdma_buf) 208 + *wr_rdma_buf = &link->wr_tx_rdmas[idx]; 209 209 *wr_pend_priv = &wr_pend->priv; 210 210 return 0; 211 211 } ··· 222 218 u32 idx = pend->idx; 223 219 224 220 /* clear the full struct smc_wr_tx_pend including .priv */ 225 - memset(&link->wr_tx_pends[pend->idx], 0, 226 - sizeof(link->wr_tx_pends[pend->idx])); 227 - memset(&link->wr_tx_bufs[pend->idx], 0, 228 - sizeof(link->wr_tx_bufs[pend->idx])); 221 + memset(&link->wr_tx_pends[idx], 0, 222 + sizeof(link->wr_tx_pends[idx])); 223 + memset(&link->wr_tx_bufs[idx], 0, 224 + sizeof(link->wr_tx_bufs[idx])); 229 225 test_and_clear_bit(idx, link->wr_tx_mask); 230 226 return 1; 231 227 } ··· 469 465 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; 470 466 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; 471 467 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 468 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = 469 + lnk->roce_pd->local_dma_lkey; 470 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = 471 + lnk->roce_pd->local_dma_lkey; 472 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = 473 + lnk->roce_pd->local_dma_lkey; 474 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = 475 + lnk->roce_pd->local_dma_lkey; 472 476 lnk->wr_tx_ibs[i].next = NULL; 473 477 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; 474 478 lnk->wr_tx_ibs[i].num_sge = 1; 475 479 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; 476 480 lnk->wr_tx_ibs[i].send_flags = 477 481 IB_SEND_SIGNALED | IB_SEND_SOLICITED; 482 + lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; 483 + lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; 484 + lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = 485 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; 486 + lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = 487 + lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; 478 488 } 479 489 for (i = 0; i < lnk->wr_rx_cnt; i++) { 480 490 lnk->wr_rx_sges[i].addr = ··· 539 521 lnk->wr_tx_mask = NULL; 540 522 kfree(lnk->wr_tx_sges); 541 523 lnk->wr_tx_sges = NULL; 524 + kfree(lnk->wr_tx_rdma_sges); 525 + lnk->wr_tx_rdma_sges = NULL; 542 526 kfree(lnk->wr_rx_sges); 543 527 lnk->wr_rx_sges = NULL; 528 + kfree(lnk->wr_tx_rdmas); 529 + lnk->wr_tx_rdmas = NULL; 544 530 kfree(lnk->wr_rx_ibs); 545 531 lnk->wr_rx_ibs = NULL; 546 532 kfree(lnk->wr_tx_ibs); ··· 574 552 GFP_KERNEL); 575 553 if (!link->wr_rx_ibs) 576 554 goto no_mem_wr_tx_ibs; 555 + link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, 556 + sizeof(link->wr_tx_rdmas[0]), 557 + GFP_KERNEL); 558 + if (!link->wr_tx_rdmas) 559 + goto no_mem_wr_rx_ibs; 560 + link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, 561 + sizeof(link->wr_tx_rdma_sges[0]), 562 + GFP_KERNEL); 563 + if (!link->wr_tx_rdma_sges) 564 + goto no_mem_wr_tx_rdmas; 577 565 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), 578 566 GFP_KERNEL); 579 567 if (!link->wr_tx_sges) 580 - goto no_mem_wr_rx_ibs; 568 + goto no_mem_wr_tx_rdma_sges; 581 569 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, 582 570 sizeof(link->wr_rx_sges[0]), 583 571 GFP_KERNEL); ··· 611 579 kfree(link->wr_rx_sges); 612 580 no_mem_wr_tx_sges: 613 581 kfree(link->wr_tx_sges); 582 + no_mem_wr_tx_rdma_sges: 583 + kfree(link->wr_tx_rdma_sges); 584 + no_mem_wr_tx_rdmas: 585 + kfree(link->wr_tx_rdmas); 614 586 no_mem_wr_rx_ibs: 615 587 kfree(link->wr_rx_ibs); 616 588 no_mem_wr_tx_ibs:
+1
net/smc/smc_wr.h
··· 85 85 86 86 int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, 87 87 struct smc_wr_buf **wr_buf, 88 + struct smc_rdma_wr **wrs, 88 89 struct smc_wr_tx_pend_priv **wr_pend_priv); 89 90 int smc_wr_tx_put_slot(struct smc_link *link, 90 91 struct smc_wr_tx_pend_priv *wr_pend_priv);
+63 -19
net/socket.c
··· 941 941 EXPORT_SYMBOL(dlci_ioctl_set); 942 942 943 943 static long sock_do_ioctl(struct net *net, struct socket *sock, 944 - unsigned int cmd, unsigned long arg, 945 - unsigned int ifreq_size) 944 + unsigned int cmd, unsigned long arg) 946 945 { 947 946 int err; 948 947 void __user *argp = (void __user *)arg; ··· 967 968 } else { 968 969 struct ifreq ifr; 969 970 bool need_copyout; 970 - if (copy_from_user(&ifr, argp, ifreq_size)) 971 + if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) 971 972 return -EFAULT; 972 973 err = dev_ioctl(net, cmd, &ifr, &need_copyout); 973 974 if (!err && need_copyout) 974 - if (copy_to_user(argp, &ifr, ifreq_size)) 975 + if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) 975 976 return -EFAULT; 976 977 } 977 978 return err; ··· 1070 1071 err = open_related_ns(&net->ns, get_net_ns); 1071 1072 break; 1072 1073 default: 1073 - err = sock_do_ioctl(net, sock, cmd, arg, 1074 - sizeof(struct ifreq)); 1074 + err = sock_do_ioctl(net, sock, cmd, arg); 1075 1075 break; 1076 1076 } 1077 1077 return err; ··· 2778 2780 int err; 2779 2781 2780 2782 set_fs(KERNEL_DS); 2781 - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, 2782 - sizeof(struct compat_ifreq)); 2783 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2783 2784 set_fs(old_fs); 2784 2785 if (!err) 2785 2786 err = compat_put_timeval(&ktv, up); ··· 2794 2797 int err; 2795 2798 2796 2799 set_fs(KERNEL_DS); 2797 - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, 2798 - sizeof(struct compat_ifreq)); 2800 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2799 2801 set_fs(old_fs); 2800 2802 if (!err) 2801 2803 err = compat_put_timespec(&kts, up); ··· 2990 2994 return dev_ioctl(net, cmd, &ifreq, NULL); 2991 2995 } 2992 2996 2997 + static int compat_ifreq_ioctl(struct net *net, struct socket *sock, 2998 + unsigned int cmd, 2999 + struct compat_ifreq __user *uifr32) 3000 + { 3001 + struct ifreq __user *uifr; 3002 + int err; 3003 + 3004 + /* Handle the fact that while struct ifreq has the same *layout* on 3005 + * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data, 3006 + * which are handled elsewhere, it still has different *size* due to 3007 + * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit, 3008 + * resulting in struct ifreq being 32 and 40 bytes respectively). 3009 + * As a result, if the struct happens to be at the end of a page and 3010 + * the next page isn't readable/writable, we get a fault. To prevent 3011 + * that, copy back and forth to the full size. 3012 + */ 3013 + 3014 + uifr = compat_alloc_user_space(sizeof(*uifr)); 3015 + if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) 3016 + return -EFAULT; 3017 + 3018 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); 3019 + 3020 + if (!err) { 3021 + switch (cmd) { 3022 + case SIOCGIFFLAGS: 3023 + case SIOCGIFMETRIC: 3024 + case SIOCGIFMTU: 3025 + case SIOCGIFMEM: 3026 + case SIOCGIFHWADDR: 3027 + case SIOCGIFINDEX: 3028 + case SIOCGIFADDR: 3029 + case SIOCGIFBRDADDR: 3030 + case SIOCGIFDSTADDR: 3031 + case SIOCGIFNETMASK: 3032 + case SIOCGIFPFLAGS: 3033 + case SIOCGIFTXQLEN: 3034 + case SIOCGMIIPHY: 3035 + case SIOCGMIIREG: 3036 + case SIOCGIFNAME: 3037 + if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) 3038 + err = -EFAULT; 3039 + break; 3040 + } 3041 + } 3042 + return err; 3043 + } 3044 + 2993 3045 static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 2994 3046 struct compat_ifreq __user *uifr32) 2995 3047 { ··· 3153 3109 } 3154 3110 3155 3111 set_fs(KERNEL_DS); 3156 - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, 3157 - sizeof(struct compat_ifreq)); 3112 + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); 3158 3113 set_fs(old_fs); 3159 3114 3160 3115 out: ··· 3253 3210 case SIOCSIFTXQLEN: 3254 3211 case SIOCBRADDIF: 3255 3212 case SIOCBRDELIF: 3213 + case SIOCGIFNAME: 3256 3214 case SIOCSIFNAME: 3257 3215 case SIOCGMIIPHY: 3258 3216 case SIOCGMIIREG: 3259 3217 case SIOCSMIIREG: 3260 - case SIOCSARP: 3261 - case SIOCGARP: 3262 - case SIOCDARP: 3263 - case SIOCATMARK: 3264 3218 case SIOCBONDENSLAVE: 3265 3219 case SIOCBONDRELEASE: 3266 3220 case SIOCBONDSETHWADDR: 3267 3221 case SIOCBONDCHANGEACTIVE: 3268 - case SIOCGIFNAME: 3269 - return sock_do_ioctl(net, sock, cmd, arg, 3270 - sizeof(struct compat_ifreq)); 3222 + return compat_ifreq_ioctl(net, sock, cmd, argp); 3223 + 3224 + case SIOCSARP: 3225 + case SIOCGARP: 3226 + case SIOCDARP: 3227 + case SIOCATMARK: 3228 + return sock_do_ioctl(net, sock, cmd, arg); 3271 3229 } 3272 3230 3273 3231 return -ENOIOCTLCMD;
+99 -6
net/sunrpc/xprtrdma/svc_rdma_sendto.c
··· 537 537 DMA_TO_DEVICE); 538 538 } 539 539 540 + /* If the xdr_buf has more elements than the device can 541 + * transmit in a single RDMA Send, then the reply will 542 + * have to be copied into a bounce buffer. 543 + */ 544 + static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, 545 + struct xdr_buf *xdr, 546 + __be32 *wr_lst) 547 + { 548 + int elements; 549 + 550 + /* xdr->head */ 551 + elements = 1; 552 + 553 + /* xdr->pages */ 554 + if (!wr_lst) { 555 + unsigned int remaining; 556 + unsigned long pageoff; 557 + 558 + pageoff = xdr->page_base & ~PAGE_MASK; 559 + remaining = xdr->page_len; 560 + while (remaining) { 561 + ++elements; 562 + remaining -= min_t(u32, PAGE_SIZE - pageoff, 563 + remaining); 564 + pageoff = 0; 565 + } 566 + } 567 + 568 + /* xdr->tail */ 569 + if (xdr->tail[0].iov_len) 570 + ++elements; 571 + 572 + /* assume 1 SGE is needed for the transport header */ 573 + return elements >= rdma->sc_max_send_sges; 574 + } 575 + 576 + /* The device is not capable of sending the reply directly. 577 + * Assemble the elements of @xdr into the transport header 578 + * buffer. 579 + */ 580 + static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, 581 + struct svc_rdma_send_ctxt *ctxt, 582 + struct xdr_buf *xdr, __be32 *wr_lst) 583 + { 584 + unsigned char *dst, *tailbase; 585 + unsigned int taillen; 586 + 587 + dst = ctxt->sc_xprt_buf; 588 + dst += ctxt->sc_sges[0].length; 589 + 590 + memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); 591 + dst += xdr->head[0].iov_len; 592 + 593 + tailbase = xdr->tail[0].iov_base; 594 + taillen = xdr->tail[0].iov_len; 595 + if (wr_lst) { 596 + u32 xdrpad; 597 + 598 + xdrpad = xdr_padsize(xdr->page_len); 599 + if (taillen && xdrpad) { 600 + tailbase += xdrpad; 601 + taillen -= xdrpad; 602 + } 603 + } else { 604 + unsigned int len, remaining; 605 + unsigned long pageoff; 606 + struct page **ppages; 607 + 608 + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 609 + pageoff = xdr->page_base & ~PAGE_MASK; 610 + remaining = xdr->page_len; 611 + while (remaining) { 612 + len = min_t(u32, PAGE_SIZE - pageoff, remaining); 613 + 614 + memcpy(dst, page_address(*ppages), len); 615 + remaining -= len; 616 + dst += len; 617 + pageoff = 0; 618 + } 619 + } 620 + 621 + if (taillen) 622 + memcpy(dst, tailbase, taillen); 623 + 624 + ctxt->sc_sges[0].length += xdr->len; 625 + ib_dma_sync_single_for_device(rdma->sc_pd->device, 626 + ctxt->sc_sges[0].addr, 627 + ctxt->sc_sges[0].length, 628 + DMA_TO_DEVICE); 629 + 630 + return 0; 631 + } 632 + 540 633 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message 541 634 * @rdma: controlling transport 542 635 * @ctxt: send_ctxt for the Send WR ··· 652 559 u32 xdr_pad; 653 560 int ret; 654 561 655 - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 656 - return -EIO; 562 + if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) 563 + return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); 564 + 565 + ++ctxt->sc_cur_sge_no; 657 566 ret = svc_rdma_dma_map_buf(rdma, ctxt, 658 567 xdr->head[0].iov_base, 659 568 xdr->head[0].iov_len); ··· 686 591 while (remaining) { 687 592 len = min_t(u32, PAGE_SIZE - page_off, remaining); 688 593 689 - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 690 - return -EIO; 594 + ++ctxt->sc_cur_sge_no; 691 595 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, 692 596 page_off, len); 693 597 if (ret < 0) ··· 700 606 len = xdr->tail[0].iov_len; 701 607 tail: 702 608 if (len) { 703 - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 704 - return -EIO; 609 + ++ctxt->sc_cur_sge_no; 705 610 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); 706 611 if (ret < 0) 707 612 return ret;
+3 -6
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 419 419 /* Transport header, head iovec, tail iovec */ 420 420 newxprt->sc_max_send_sges = 3; 421 421 /* Add one SGE per page list entry */ 422 - newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; 423 - if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { 424 - pr_err("svcrdma: too few Send SGEs available (%d needed)\n", 425 - newxprt->sc_max_send_sges); 426 - goto errout; 427 - } 422 + newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1; 423 + if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) 424 + newxprt->sc_max_send_sges = dev->attrs.max_send_sge; 428 425 newxprt->sc_max_req_size = svcrdma_max_req_size; 429 426 newxprt->sc_max_requests = svcrdma_max_requests; 430 427 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
+5 -1
net/tls/tls_sw.c
··· 439 439 struct scatterlist *sge = sk_msg_elem(msg_en, start); 440 440 int rc; 441 441 442 + memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); 443 + 442 444 sge->offset += tls_ctx->tx.prepend_size; 443 445 sge->length -= tls_ctx->tx.prepend_size; 444 446 ··· 450 448 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 451 449 aead_request_set_crypt(aead_req, rec->sg_aead_in, 452 450 rec->sg_aead_out, 453 - data_len, tls_ctx->tx.iv); 451 + data_len, rec->iv_data); 454 452 455 453 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 456 454 tls_encrypt_done, sk); ··· 1794 1792 if (atomic_read(&ctx->encrypt_pending)) 1795 1793 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1796 1794 1795 + release_sock(sk); 1797 1796 cancel_delayed_work_sync(&ctx->tx_work.work); 1797 + lock_sock(sk); 1798 1798 1799 1799 /* Tx whatever records we can transmit and abandon the rest */ 1800 1800 tls_tx_records(sk, -1);
+21 -8
net/vmw_vsock/virtio_transport.c
··· 75 75 { 76 76 struct virtio_vsock *vsock = virtio_vsock_get(); 77 77 78 + if (!vsock) 79 + return VMADDR_CID_ANY; 80 + 78 81 return vsock->guest_cid; 79 82 } 80 83 ··· 587 584 588 585 virtio_vsock_update_guest_cid(vsock); 589 586 590 - ret = vsock_core_init(&virtio_transport.transport); 591 - if (ret < 0) 592 - goto out_vqs; 593 - 594 587 vsock->rx_buf_nr = 0; 595 588 vsock->rx_buf_max_nr = 0; 596 589 atomic_set(&vsock->queued_replies, 0); ··· 617 618 mutex_unlock(&the_virtio_vsock_mutex); 618 619 return 0; 619 620 620 - out_vqs: 621 - vsock->vdev->config->del_vqs(vsock->vdev); 622 621 out: 623 622 kfree(vsock); 624 623 mutex_unlock(&the_virtio_vsock_mutex); ··· 633 636 flush_work(&vsock->tx_work); 634 637 flush_work(&vsock->event_work); 635 638 flush_work(&vsock->send_pkt_work); 639 + 640 + /* Reset all connected sockets when the device disappear */ 641 + vsock_for_each_connected_socket(virtio_vsock_reset_sock); 636 642 637 643 vdev->config->reset(vdev); 638 644 ··· 669 669 670 670 mutex_lock(&the_virtio_vsock_mutex); 671 671 the_virtio_vsock = NULL; 672 - vsock_core_exit(); 673 672 mutex_unlock(&the_virtio_vsock_mutex); 674 673 675 674 vdev->config->del_vqs(vdev); ··· 701 702 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 702 703 if (!virtio_vsock_workqueue) 703 704 return -ENOMEM; 705 + 704 706 ret = register_virtio_driver(&virtio_vsock_driver); 705 707 if (ret) 706 - destroy_workqueue(virtio_vsock_workqueue); 708 + goto out_wq; 709 + 710 + ret = vsock_core_init(&virtio_transport.transport); 711 + if (ret) 712 + goto out_vdr; 713 + 714 + return 0; 715 + 716 + out_vdr: 717 + unregister_virtio_driver(&virtio_vsock_driver); 718 + out_wq: 719 + destroy_workqueue(virtio_vsock_workqueue); 707 720 return ret; 721 + 708 722 } 709 723 710 724 static void __exit virtio_vsock_exit(void) 711 725 { 726 + vsock_core_exit(); 712 727 unregister_virtio_driver(&virtio_vsock_driver); 713 728 destroy_workqueue(virtio_vsock_workqueue); 714 729 }
+2
net/wireless/ap.c
··· 41 41 cfg80211_sched_dfs_chan_update(rdev); 42 42 } 43 43 44 + schedule_work(&cfg80211_disconnect_work); 45 + 44 46 return err; 45 47 } 46 48
+2
net/wireless/core.h
··· 445 445 bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, 446 446 u32 center_freq_khz, u32 bw_khz); 447 447 448 + extern struct work_struct cfg80211_disconnect_work; 449 + 448 450 /** 449 451 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable 450 452 * @wiphy: the wiphy to validate against
+1 -1
net/wireless/sme.c
··· 667 667 rtnl_unlock(); 668 668 } 669 669 670 - static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); 670 + DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); 671 671 672 672 673 673 /*
+33 -30
net/xfrm/xfrm_policy.c
··· 680 680 mutex_unlock(&hash_resize_mutex); 681 681 } 682 682 683 - static void xfrm_hash_reset_inexact_table(struct net *net) 684 - { 685 - struct xfrm_pol_inexact_bin *b; 686 - 687 - lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 688 - 689 - list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins) 690 - INIT_HLIST_HEAD(&b->hhead); 691 - } 692 - 693 683 /* Make sure *pol can be inserted into fastbin. 694 684 * Useful to check that later insert requests will be sucessful 695 685 * (provided xfrm_policy_lock is held throughout). ··· 823 833 u16 family) 824 834 { 825 835 unsigned int matched_s, matched_d; 826 - struct hlist_node *newpos = NULL; 827 836 struct xfrm_policy *policy, *p; 828 837 829 838 matched_s = 0; 830 839 matched_d = 0; 831 840 832 841 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 842 + struct hlist_node *newpos = NULL; 833 843 bool matches_s, matches_d; 834 844 835 845 if (!policy->bydst_reinsert) ··· 839 849 840 850 policy->bydst_reinsert = false; 841 851 hlist_for_each_entry(p, &n->hhead, bydst) { 842 - if (policy->priority >= p->priority) 852 + if (policy->priority > p->priority) 853 + newpos = &p->bydst; 854 + else if (policy->priority == p->priority && 855 + policy->pos > p->pos) 843 856 newpos = &p->bydst; 844 857 else 845 858 break; 846 859 } 847 860 848 861 if (newpos) 849 - hlist_add_behind(&policy->bydst, newpos); 862 + hlist_add_behind_rcu(&policy->bydst, newpos); 850 863 else 851 - hlist_add_head(&policy->bydst, &n->hhead); 864 + hlist_add_head_rcu(&policy->bydst, &n->hhead); 852 865 853 866 /* paranoia checks follow. 854 867 * Check that the reinserted policy matches at least ··· 886 893 struct rb_root *new, 887 894 u16 family) 888 895 { 889 - struct rb_node **p, *parent = NULL; 890 896 struct xfrm_pol_inexact_node *node; 897 + struct rb_node **p, *parent; 891 898 892 899 /* we should not have another subtree here */ 893 900 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 894 - 901 + restart: 902 + parent = NULL; 895 903 p = &new->rb_node; 896 904 while (*p) { 897 905 u8 prefixlen; ··· 912 918 } else { 913 919 struct xfrm_policy *tmp; 914 920 915 - hlist_for_each_entry(tmp, &node->hhead, bydst) 921 + hlist_for_each_entry(tmp, &n->hhead, bydst) { 916 922 tmp->bydst_reinsert = true; 917 - hlist_for_each_entry(tmp, &n->hhead, bydst) 918 - tmp->bydst_reinsert = true; 923 + hlist_del_rcu(&tmp->bydst); 924 + } 919 925 920 - INIT_HLIST_HEAD(&node->hhead); 921 926 xfrm_policy_inexact_list_reinsert(net, node, family); 922 927 923 928 if (node->prefixlen == n->prefixlen) { ··· 928 935 kfree_rcu(n, rcu); 929 936 n = node; 930 937 n->prefixlen = prefixlen; 931 - *p = new->rb_node; 932 - parent = NULL; 938 + goto restart; 933 939 } 934 940 } 935 941 ··· 957 965 family); 958 966 } 959 967 960 - hlist_for_each_entry(tmp, &v->hhead, bydst) 968 + hlist_for_each_entry(tmp, &v->hhead, bydst) { 961 969 tmp->bydst_reinsert = true; 962 - hlist_for_each_entry(tmp, &n->hhead, bydst) 963 - tmp->bydst_reinsert = true; 970 + hlist_del_rcu(&tmp->bydst); 971 + } 964 972 965 - INIT_HLIST_HEAD(&n->hhead); 966 973 xfrm_policy_inexact_list_reinsert(net, n, family); 967 974 } 968 975 ··· 1226 1235 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1227 1236 1228 1237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1238 + write_seqcount_begin(&xfrm_policy_hash_generation); 1229 1239 1230 1240 /* make sure that we can insert the indirect policies again before 1231 1241 * we start with destructive action. ··· 1270 1278 } 1271 1279 1272 1280 /* reset the bydst and inexact table in all directions */ 1273 - xfrm_hash_reset_inexact_table(net); 1274 - 1275 1281 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1276 - INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 1282 + struct hlist_node *n; 1283 + 1284 + hlist_for_each_entry_safe(policy, n, 1285 + &net->xfrm.policy_inexact[dir], 1286 + bydst_inexact_list) 1287 + hlist_del_init(&policy->bydst_inexact_list); 1288 + 1277 1289 hmask = net->xfrm.policy_bydst[dir].hmask; 1278 1290 odst = net->xfrm.policy_bydst[dir].table; 1279 1291 for (i = hmask; i >= 0; i--) ··· 1309 1313 newpos = NULL; 1310 1314 chain = policy_hash_bysel(net, &policy->selector, 1311 1315 policy->family, dir); 1316 + 1317 + hlist_del_rcu(&policy->bydst); 1318 + 1312 1319 if (!chain) { 1313 1320 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1314 1321 ··· 1333 1334 1334 1335 out_unlock: 1335 1336 __xfrm_policy_inexact_flush(net); 1337 + write_seqcount_end(&xfrm_policy_hash_generation); 1336 1338 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1337 1339 1338 1340 mutex_unlock(&hash_resize_mutex); ··· 2600 2600 dst_copy_metrics(dst1, dst); 2601 2601 2602 2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2603 - __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2603 + __u32 mark = 0; 2604 + 2605 + if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) 2606 + mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2604 2607 2605 2608 family = xfrm[i]->props.family; 2606 2609 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+9 -4
net/xfrm/xfrm_user.c
··· 1488 1488 if (!ut[i].family) 1489 1489 ut[i].family = family; 1490 1490 1491 - if ((ut[i].mode == XFRM_MODE_TRANSPORT) && 1492 - (ut[i].family != prev_family)) 1493 - return -EINVAL; 1494 - 1491 + switch (ut[i].mode) { 1492 + case XFRM_MODE_TUNNEL: 1493 + case XFRM_MODE_BEET: 1494 + break; 1495 + default: 1496 + if (ut[i].family != prev_family) 1497 + return -EINVAL; 1498 + break; 1499 + } 1495 1500 if (ut[i].mode >= XFRM_MODE_MAX) 1496 1501 return -EINVAL; 1497 1502
+1 -1
samples/mei/mei-amt-version.c
··· 117 117 118 118 me->verbose = verbose; 119 119 120 - me->fd = open("/dev/mei", O_RDWR); 120 + me->fd = open("/dev/mei0", O_RDWR); 121 121 if (me->fd == -1) { 122 122 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); 123 123 goto err;
+4 -1
security/apparmor/domain.c
··· 1444 1444 new = aa_label_merge(label, target, GFP_KERNEL); 1445 1445 if (IS_ERR_OR_NULL(new)) { 1446 1446 info = "failed to build target label"; 1447 - error = PTR_ERR(new); 1447 + if (!new) 1448 + error = -ENOMEM; 1449 + else 1450 + error = PTR_ERR(new); 1448 1451 new = NULL; 1449 1452 perms.allow = 0; 1450 1453 goto audit;
+2
security/apparmor/lsm.c
··· 1599 1599 return apparmor_ip_postroute(priv, skb, state); 1600 1600 } 1601 1601 1602 + #if IS_ENABLED(CONFIG_IPV6) 1602 1603 static unsigned int apparmor_ipv6_postroute(void *priv, 1603 1604 struct sk_buff *skb, 1604 1605 const struct nf_hook_state *state) 1605 1606 { 1606 1607 return apparmor_ip_postroute(priv, skb, state); 1607 1608 } 1609 + #endif 1608 1610 1609 1611 static const struct nf_hook_ops apparmor_nf_ops[] = { 1610 1612 {
+8 -1
sound/core/pcm_lib.c
··· 2112 2112 return 0; 2113 2113 } 2114 2114 2115 + /* allow waiting for a capture stream that hasn't been started */ 2116 + #if IS_ENABLED(CONFIG_SND_PCM_OSS) 2117 + #define wait_capture_start(substream) ((substream)->oss.oss) 2118 + #else 2119 + #define wait_capture_start(substream) false 2120 + #endif 2121 + 2115 2122 /* the common loop for read/write data */ 2116 2123 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2117 2124 void *data, bool interleaved, ··· 2189 2182 err = snd_pcm_start(substream); 2190 2183 if (err < 0) 2191 2184 goto _end_unlock; 2192 - } else { 2185 + } else if (!wait_capture_start(substream)) { 2193 2186 /* nothing to do */ 2194 2187 err = 0; 2195 2188 goto _end_unlock;
+2 -1
sound/pci/hda/hda_bind.c
··· 115 115 err = snd_hda_codec_build_controls(codec); 116 116 if (err < 0) 117 117 goto error_module; 118 - if (codec->card->registered) { 118 + /* only register after the bus probe finished; otherwise it's racy */ 119 + if (!codec->bus->bus_probing && codec->card->registered) { 119 120 err = snd_card_register(codec->card); 120 121 if (err < 0) 121 122 goto error_module;
+2
sound/pci/hda/hda_intel.c
··· 2185 2185 int dev = chip->dev_index; 2186 2186 int err; 2187 2187 2188 + to_hda_bus(bus)->bus_probing = 1; 2188 2189 hda->probe_continued = 1; 2189 2190 2190 2191 /* bind with i915 if needed */ ··· 2270 2269 if (err < 0) 2271 2270 hda->init_failed = 1; 2272 2271 complete_all(&hda->probe_wait); 2272 + to_hda_bus(bus)->bus_probing = 0; 2273 2273 return err; 2274 2274 } 2275 2275
+3 -1
sound/pci/hda/patch_ca0132.c
··· 8451 8451 ca0132_exit_chip(codec); 8452 8452 8453 8453 snd_hda_power_down(codec); 8454 - if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) 8454 + #ifdef CONFIG_PCI 8455 + if (spec->mem_base) 8455 8456 pci_iounmap(codec->bus->pci, spec->mem_base); 8457 + #endif 8456 8458 kfree(spec->spec_init_verbs); 8457 8459 kfree(codec->spec); 8458 8460 }
+79 -59
sound/pci/hda/patch_realtek.c
··· 117 117 int codec_variant; /* flag for other variants */ 118 118 unsigned int has_alc5505_dsp:1; 119 119 unsigned int no_depop_delay:1; 120 + unsigned int done_hp_init:1; 120 121 121 122 /* for PLL fix */ 122 123 hda_nid_t pll_nid; ··· 515 514 } 516 515 } 517 516 517 + /* get a primary headphone pin if available */ 518 + static hda_nid_t alc_get_hp_pin(struct alc_spec *spec) 519 + { 520 + if (spec->gen.autocfg.hp_pins[0]) 521 + return spec->gen.autocfg.hp_pins[0]; 522 + if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) 523 + return spec->gen.autocfg.line_out_pins[0]; 524 + return 0; 525 + } 518 526 519 527 /* 520 528 * Realtek SSID verification ··· 734 724 * 15 : 1 --> enable the function "Mute internal speaker 735 725 * when the external headphone out jack is plugged" 736 726 */ 737 - if (!spec->gen.autocfg.hp_pins[0] && 738 - !(spec->gen.autocfg.line_out_pins[0] && 739 - spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) { 727 + if (!alc_get_hp_pin(spec)) { 740 728 hda_nid_t nid; 741 729 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 742 730 nid = ports[tmp]; ··· 2966 2958 static void alc282_init(struct hda_codec *codec) 2967 2959 { 2968 2960 struct alc_spec *spec = codec->spec; 2969 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 2961 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 2970 2962 bool hp_pin_sense; 2971 2963 int coef78; 2972 2964 ··· 3003 2995 static void alc282_shutup(struct hda_codec *codec) 3004 2996 { 3005 2997 struct alc_spec *spec = codec->spec; 3006 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 2998 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3007 2999 bool hp_pin_sense; 3008 3000 int coef78; 3009 3001 ··· 3081 3073 static void alc283_init(struct hda_codec *codec) 3082 3074 { 3083 3075 struct alc_spec *spec = codec->spec; 3084 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3076 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3085 3077 bool hp_pin_sense; 3086 - 3087 - if (!spec->gen.autocfg.hp_outs) { 3088 - if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) 3089 - hp_pin = spec->gen.autocfg.line_out_pins[0]; 3090 - } 3091 3078 3092 3079 alc283_restore_default_value(codec); 3093 3080 ··· 3117 3114 static void alc283_shutup(struct hda_codec *codec) 3118 3115 { 3119 3116 struct alc_spec *spec = codec->spec; 3120 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3117 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3121 3118 bool hp_pin_sense; 3122 - 3123 - if (!spec->gen.autocfg.hp_outs) { 3124 - if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) 3125 - hp_pin = spec->gen.autocfg.line_out_pins[0]; 3126 - } 3127 3119 3128 3120 if (!hp_pin) { 3129 3121 alc269_shutup(codec); ··· 3153 3155 static void alc256_init(struct hda_codec *codec) 3154 3156 { 3155 3157 struct alc_spec *spec = codec->spec; 3156 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3158 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3157 3159 bool hp_pin_sense; 3158 3160 3159 3161 if (!hp_pin) ··· 3189 3191 static void alc256_shutup(struct hda_codec *codec) 3190 3192 { 3191 3193 struct alc_spec *spec = codec->spec; 3192 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3194 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3193 3195 bool hp_pin_sense; 3194 3196 3195 3197 if (!hp_pin) { ··· 3225 3227 static void alc225_init(struct hda_codec *codec) 3226 3228 { 3227 3229 struct alc_spec *spec = codec->spec; 3228 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3230 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3229 3231 bool hp1_pin_sense, hp2_pin_sense; 3230 3232 3231 3233 if (!hp_pin) ··· 3268 3270 static void alc225_shutup(struct hda_codec *codec) 3269 3271 { 3270 3272 struct alc_spec *spec = codec->spec; 3271 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3273 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3272 3274 bool hp1_pin_sense, hp2_pin_sense; 3273 3275 3274 3276 if (!hp_pin) { ··· 3312 3314 static void alc_default_init(struct hda_codec *codec) 3313 3315 { 3314 3316 struct alc_spec *spec = codec->spec; 3315 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3317 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3316 3318 bool hp_pin_sense; 3317 3319 3318 3320 if (!hp_pin) ··· 3341 3343 static void alc_default_shutup(struct hda_codec *codec) 3342 3344 { 3343 3345 struct alc_spec *spec = codec->spec; 3344 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3346 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3345 3347 bool hp_pin_sense; 3346 3348 3347 3349 if (!hp_pin) { ··· 3368 3370 3369 3371 alc_auto_setup_eapd(codec, false); 3370 3372 snd_hda_shutup_pins(codec); 3373 + } 3374 + 3375 + static void alc294_hp_init(struct hda_codec *codec) 3376 + { 3377 + struct alc_spec *spec = codec->spec; 3378 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 3379 + int i, val; 3380 + 3381 + if (!hp_pin) 3382 + return; 3383 + 3384 + snd_hda_codec_write(codec, hp_pin, 0, 3385 + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 3386 + 3387 + msleep(100); 3388 + 3389 + snd_hda_codec_write(codec, hp_pin, 0, 3390 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); 3391 + 3392 + alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ 3393 + alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ 3394 + 3395 + /* Wait for depop procedure finish */ 3396 + val = alc_read_coefex_idx(codec, 0x58, 0x01); 3397 + for (i = 0; i < 20 && val & 0x0080; i++) { 3398 + msleep(50); 3399 + val = alc_read_coefex_idx(codec, 0x58, 0x01); 3400 + } 3401 + /* Set HP depop to auto mode */ 3402 + alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); 3403 + msleep(50); 3404 + } 3405 + 3406 + static void alc294_init(struct hda_codec *codec) 3407 + { 3408 + struct alc_spec *spec = codec->spec; 3409 + 3410 + if (!spec->done_hp_init) { 3411 + alc294_hp_init(codec); 3412 + spec->done_hp_init = true; 3413 + } 3414 + alc_default_init(codec); 3371 3415 } 3372 3416 3373 3417 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, ··· 4777 4737 struct alc_spec *spec = codec->spec; 4778 4738 4779 4739 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; 4780 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 4740 + hda_nid_t hp_pin = alc_get_hp_pin(spec); 4781 4741 4782 4742 int new_headset_mode; 4783 4743 ··· 5056 5016 static void alc_shutup_dell_xps13(struct hda_codec *codec) 5057 5017 { 5058 5018 struct alc_spec *spec = codec->spec; 5059 - int hp_pin = spec->gen.autocfg.hp_pins[0]; 5019 + int hp_pin = alc_get_hp_pin(spec); 5060 5020 5061 5021 /* Prevent pop noises when headphones are plugged in */ 5062 5022 snd_hda_codec_write(codec, hp_pin, 0, ··· 5149 5109 5150 5110 if (action == HDA_FIXUP_ACT_PROBE) { 5151 5111 int mic_pin = find_ext_mic_pin(codec); 5152 - int hp_pin = spec->gen.autocfg.hp_pins[0]; 5112 + int hp_pin = alc_get_hp_pin(spec); 5153 5113 5154 5114 if (snd_BUG_ON(!mic_pin || !hp_pin)) 5155 5115 return; ··· 5631 5591 ALC294_FIXUP_ASUS_HEADSET_MIC, 5632 5592 ALC294_FIXUP_ASUS_SPK, 5633 5593 ALC225_FIXUP_HEADSET_JACK, 5594 + ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, 5634 5595 }; 5635 5596 5636 5597 static const struct hda_fixup alc269_fixups[] = { ··· 6578 6537 .type = HDA_FIXUP_FUNC, 6579 6538 .v.func = alc_fixup_headset_jack, 6580 6539 }, 6540 + [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { 6541 + .type = HDA_FIXUP_PINS, 6542 + .v.pins = (const struct hda_pintbl[]) { 6543 + { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ 6544 + { } 6545 + }, 6546 + .chained = true, 6547 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 6548 + }, 6581 6549 }; 6582 6550 6583 6551 static const struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 6765 6715 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6766 6716 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6767 6717 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), 6718 + SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 6768 6719 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6769 6720 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6770 6721 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), ··· 7424 7373 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 7425 7374 } 7426 7375 7427 - static void alc294_hp_init(struct hda_codec *codec) 7428 - { 7429 - struct alc_spec *spec = codec->spec; 7430 - hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 7431 - int i, val; 7432 - 7433 - if (!hp_pin) 7434 - return; 7435 - 7436 - snd_hda_codec_write(codec, hp_pin, 0, 7437 - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 7438 - 7439 - msleep(100); 7440 - 7441 - snd_hda_codec_write(codec, hp_pin, 0, 7442 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); 7443 - 7444 - alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ 7445 - alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ 7446 - 7447 - /* Wait for depop procedure finish */ 7448 - val = alc_read_coefex_idx(codec, 0x58, 0x01); 7449 - for (i = 0; i < 20 && val & 0x0080; i++) { 7450 - msleep(50); 7451 - val = alc_read_coefex_idx(codec, 0x58, 0x01); 7452 - } 7453 - /* Set HP depop to auto mode */ 7454 - alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); 7455 - msleep(50); 7456 - } 7457 - 7458 7376 /* 7459 7377 */ 7460 7378 static int patch_alc269(struct hda_codec *codec) ··· 7549 7529 spec->codec_variant = ALC269_TYPE_ALC294; 7550 7530 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7551 7531 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7552 - alc294_hp_init(codec); 7532 + spec->init_hook = alc294_init; 7553 7533 break; 7554 7534 case 0x10ec0300: 7555 7535 spec->codec_variant = ALC269_TYPE_ALC300; ··· 7561 7541 spec->codec_variant = ALC269_TYPE_ALC700; 7562 7542 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ 7563 7543 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ 7564 - alc294_hp_init(codec); 7544 + spec->init_hook = alc294_init; 7565 7545 break; 7566 7546 7567 7547 }
+2
sound/usb/quirks.c
··· 1492 1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1493 1493 break; 1494 1494 1495 + case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ 1495 1496 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ 1496 1497 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1497 1498 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ ··· 1567 1566 case 0x20b1: /* XMOS based devices */ 1568 1567 case 0x152a: /* Thesycon devices */ 1569 1568 case 0x25ce: /* Mytek devices */ 1569 + case 0x2ab6: /* T+A devices */ 1570 1570 if (fp->dsd_raw) 1571 1571 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1572 1572 break;
+1 -5
tools/bpf/bpftool/common.c
··· 297 297 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); 298 298 299 299 fdi = fopen(path, "r"); 300 - if (!fdi) { 301 - p_err("can't open fdinfo: %s", strerror(errno)); 300 + if (!fdi) 302 301 return NULL; 303 - } 304 302 305 303 while ((n = getline(&line, &line_n, fdi)) > 0) { 306 304 char *value; ··· 311 313 312 314 value = strchr(line, '\t'); 313 315 if (!value || !value[1]) { 314 - p_err("malformed fdinfo!?"); 315 316 free(line); 316 317 return NULL; 317 318 } ··· 323 326 return line; 324 327 } 325 328 326 - p_err("key '%s' not found in fdinfo", key); 327 329 free(line); 328 330 fclose(fdi); 329 331 return NULL;
+24 -9
tools/bpf/bpftool/map.c
··· 347 347 return argv + i; 348 348 } 349 349 350 + /* on per cpu maps we must copy the provided value on all value instances */ 351 + static void fill_per_cpu_value(struct bpf_map_info *info, void *value) 352 + { 353 + unsigned int i, n, step; 354 + 355 + if (!map_is_per_cpu(info->type)) 356 + return; 357 + 358 + n = get_possible_cpus(); 359 + step = round_up(info->value_size, 8); 360 + for (i = 1; i < n; i++) 361 + memcpy(value + i * step, value, info->value_size); 362 + } 363 + 350 364 static int parse_elem(char **argv, struct bpf_map_info *info, 351 365 void *key, void *value, __u32 key_size, __u32 value_size, 352 366 __u32 *flags, __u32 **value_fd) ··· 440 426 argv = parse_bytes(argv, "value", value, value_size); 441 427 if (!argv) 442 428 return -1; 429 + 430 + fill_per_cpu_value(info, value); 443 431 } 444 432 445 433 return parse_elem(argv, info, key, NULL, key_size, value_size, ··· 513 497 jsonw_uint_field(json_wtr, "owner_prog_type", 514 498 prog_type); 515 499 } 516 - if (atoi(owner_jited)) 517 - jsonw_bool_field(json_wtr, "owner_jited", true); 518 - else 519 - jsonw_bool_field(json_wtr, "owner_jited", false); 500 + if (owner_jited) 501 + jsonw_bool_field(json_wtr, "owner_jited", 502 + !!atoi(owner_jited)); 520 503 521 504 free(owner_prog_type); 522 505 free(owner_jited); ··· 568 553 char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); 569 554 char *owner_jited = get_fdinfo(fd, "owner_jited"); 570 555 571 - printf("\n\t"); 556 + if (owner_prog_type || owner_jited) 557 + printf("\n\t"); 572 558 if (owner_prog_type) { 573 559 unsigned int prog_type = atoi(owner_prog_type); 574 560 ··· 579 563 else 580 564 printf("owner_prog_type %d ", prog_type); 581 565 } 582 - if (atoi(owner_jited)) 583 - printf("owner jited"); 584 - else 585 - printf("owner not jited"); 566 + if (owner_jited) 567 + printf("owner%s jited", 568 + atoi(owner_jited) ? "" : " not"); 586 569 587 570 free(owner_prog_type); 588 571 free(owner_jited);
+3 -2
tools/bpf/bpftool/prog.c
··· 78 78 79 79 static int prog_fd_by_tag(unsigned char *tag) 80 80 { 81 - struct bpf_prog_info info = {}; 82 - __u32 len = sizeof(info); 83 81 unsigned int id = 0; 84 82 int err; 85 83 int fd; 86 84 87 85 while (true) { 86 + struct bpf_prog_info info = {}; 87 + __u32 len = sizeof(info); 88 + 88 89 err = bpf_prog_get_next_id(id, &id); 89 90 if (err) { 90 91 p_err("%s", strerror(errno));
+1 -1
tools/iio/iio_generic_buffer.c
··· 330 330 331 331 int main(int argc, char **argv) 332 332 { 333 - unsigned long long num_loops = 2; 333 + long long num_loops = 2; 334 334 unsigned long timedelay = 1000000; 335 335 unsigned long buf_len = 128; 336 336
+1 -1
tools/include/uapi/linux/in.h
··· 268 268 #define IN_MULTICAST(a) IN_CLASSD(a) 269 269 #define IN_MULTICAST_NET 0xe0000000 270 270 271 - #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) 271 + #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) 272 272 #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) 273 273 274 274 #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+12 -4
tools/perf/Documentation/perf-c2c.txt
··· 19 19 The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows 20 20 you to track down the cacheline contentions. 21 21 22 - The tool is based on x86's load latency and precise store facility events 23 - provided by Intel CPUs. These events provide: 22 + On x86, the tool is based on load latency and precise store facility events 23 + provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling 24 + with thresholding feature. 25 + 26 + These events provide: 24 27 - memory address of the access 25 28 - type of the access (load and store details) 26 29 - latency (in cycles) of the load access ··· 49 46 50 47 -l:: 51 48 --ldlat:: 52 - Configure mem-loads latency. 49 + Configure mem-loads latency. (x86 only) 53 50 54 51 -k:: 55 52 --all-kernel:: ··· 122 119 -W,-d,--phys-data,--sample-cpu 123 120 124 121 Unless specified otherwise with '-e' option, following events are monitored by 125 - default: 122 + default on x86: 126 123 127 124 cpu/mem-loads,ldlat=30/P 128 125 cpu/mem-stores/P 126 + 127 + and following on PowerPC: 128 + 129 + cpu/mem-loads/ 130 + cpu/mem-stores/ 129 131 130 132 User can pass any 'perf record' option behind '--' mark, like (to enable 131 133 callchains and system wide monitoring):
+1 -1
tools/perf/Documentation/perf-mem.txt
··· 82 82 Be more verbose (show counter open errors, etc) 83 83 84 84 --ldlat <n>:: 85 - Specify desired latency for loads event. 85 + Specify desired latency for loads event. (x86 only) 86 86 87 87 In addition, for report all perf report options are valid, and for record 88 88 all perf record options.
+1
tools/perf/arch/powerpc/util/Build
··· 2 2 libperf-y += sym-handling.o 3 3 libperf-y += kvm-stat.o 4 4 libperf-y += perf_regs.o 5 + libperf-y += mem-events.o 5 6 6 7 libperf-$(CONFIG_DWARF) += dwarf-regs.o 7 8 libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+11
tools/perf/arch/powerpc/util/mem-events.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "mem-events.h" 3 + 4 + /* PowerPC does not support 'ldlat' parameter. */ 5 + char *perf_mem_events__name(int i) 6 + { 7 + if (i == PERF_MEM_EVENTS__LOAD) 8 + return (char *) "cpu/mem-loads/"; 9 + 10 + return (char *) "cpu/mem-stores/"; 11 + }
+3 -6
tools/perf/builtin-script.c
··· 1681 1681 .force_header = false, 1682 1682 }; 1683 1683 struct perf_evsel *ev2; 1684 - static bool init; 1685 1684 u64 val; 1686 1685 1687 - if (!init) { 1688 - perf_stat__init_shadow_stats(); 1689 - init = true; 1690 - } 1691 1686 if (!evsel->stats) 1692 1687 perf_evlist__alloc_stats(script->session->evlist, false); 1693 1688 if (evsel_script(evsel->leader)->gnum++ == 0) ··· 1789 1794 return; 1790 1795 } 1791 1796 1792 - if (PRINT_FIELD(TRACE)) { 1797 + if (PRINT_FIELD(TRACE) && sample->raw_data) { 1793 1798 event_format__fprintf(evsel->tp_format, sample->cpu, 1794 1799 sample->raw_data, sample->raw_size, fp); 1795 1800 } ··· 2353 2358 int ret; 2354 2359 2355 2360 signal(SIGINT, sig_handler); 2361 + 2362 + perf_stat__init_shadow_stats(); 2356 2363 2357 2364 /* override event processing functions */ 2358 2365 if (script->show_task_events) {
+18 -7
tools/perf/builtin-trace.c
··· 2514 2514 2515 2515 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) 2516 2516 { 2517 - struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); 2517 + bool found = false; 2518 + struct perf_evsel *evsel, *tmp; 2519 + struct parse_events_error err = { .idx = 0, }; 2520 + int ret = parse_events(evlist, "probe:vfs_getname*", &err); 2518 2521 2519 - if (IS_ERR(evsel)) 2522 + if (ret) 2520 2523 return false; 2521 2524 2522 - if (perf_evsel__field(evsel, "pathname") == NULL) { 2525 + evlist__for_each_entry_safe(evlist, evsel, tmp) { 2526 + if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname")) 2527 + continue; 2528 + 2529 + if (perf_evsel__field(evsel, "pathname")) { 2530 + evsel->handler = trace__vfs_getname; 2531 + found = true; 2532 + continue; 2533 + } 2534 + 2535 + list_del_init(&evsel->node); 2536 + evsel->evlist = NULL; 2523 2537 perf_evsel__delete(evsel); 2524 - return false; 2525 2538 } 2526 2539 2527 - evsel->handler = trace__vfs_getname; 2528 - perf_evlist__add(evlist, evsel); 2529 - return true; 2540 + return found; 2530 2541 } 2531 2542 2532 2543 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
+19 -13
tools/perf/tests/attr.py
··· 1 1 #! /usr/bin/python 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 + from __future__ import print_function 5 + 4 6 import os 5 7 import sys 6 8 import glob ··· 10 8 import tempfile 11 9 import logging 12 10 import shutil 13 - import ConfigParser 11 + 12 + try: 13 + import configparser 14 + except ImportError: 15 + import ConfigParser as configparser 14 16 15 17 def data_equal(a, b): 16 18 # Allow multiple values in assignment separated by '|' ··· 106 100 def equal(self, other): 107 101 for t in Event.terms: 108 102 log.debug(" [%s] %s %s" % (t, self[t], other[t])); 109 - if not self.has_key(t) or not other.has_key(t): 103 + if t not in self or t not in other: 110 104 return False 111 105 if not data_equal(self[t], other[t]): 112 106 return False 113 107 return True 114 108 115 109 def optional(self): 116 - if self.has_key('optional') and self['optional'] == '1': 110 + if 'optional' in self and self['optional'] == '1': 117 111 return True 118 112 return False 119 113 120 114 def diff(self, other): 121 115 for t in Event.terms: 122 - if not self.has_key(t) or not other.has_key(t): 116 + if t not in self or t not in other: 123 117 continue 124 118 if not data_equal(self[t], other[t]): 125 119 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) ··· 140 134 # - expected values assignments 141 135 class Test(object): 142 136 def __init__(self, path, options): 143 - parser = ConfigParser.SafeConfigParser() 137 + parser = configparser.SafeConfigParser() 144 138 parser.read(path) 145 139 146 140 log.warning("running '%s'" % path) ··· 199 193 return True 200 194 201 195 def load_events(self, path, events): 202 - parser_event = ConfigParser.SafeConfigParser() 196 + parser_event = configparser.SafeConfigParser() 203 197 parser_event.read(path) 204 198 205 199 # The event record section header contains 'event' word, ··· 213 207 # Read parent event if there's any 214 208 if (':' in section): 215 209 base = section[section.index(':') + 1:] 216 - parser_base = ConfigParser.SafeConfigParser() 210 + parser_base = configparser.SafeConfigParser() 217 211 parser_base.read(self.test_dir + '/' + base) 218 212 base_items = parser_base.items('event') 219 213 ··· 328 322 for f in glob.glob(options.test_dir + '/' + options.test): 329 323 try: 330 324 Test(f, options).run() 331 - except Unsup, obj: 325 + except Unsup as obj: 332 326 log.warning("unsupp %s" % obj.getMsg()) 333 - except Notest, obj: 327 + except Notest as obj: 334 328 log.warning("skipped %s" % obj.getMsg()) 335 329 336 330 def setup_log(verbose): ··· 369 363 parser.add_option("-p", "--perf", 370 364 action="store", type="string", dest="perf") 371 365 parser.add_option("-v", "--verbose", 372 - action="count", dest="verbose") 366 + default=0, action="count", dest="verbose") 373 367 374 368 options, args = parser.parse_args() 375 369 if args: ··· 379 373 setup_log(options.verbose) 380 374 381 375 if not options.test_dir: 382 - print 'FAILED no -d option specified' 376 + print('FAILED no -d option specified') 383 377 sys.exit(-1) 384 378 385 379 if not options.test: ··· 388 382 try: 389 383 run_tests(options) 390 384 391 - except Fail, obj: 392 - print "FAILED %s" % obj.getMsg(); 385 + except Fail as obj: 386 + print("FAILED %s" % obj.getMsg()) 393 387 sys.exit(-1) 394 388 395 389 sys.exit(0)
+1 -1
tools/perf/tests/evsel-tp-sched.c
··· 17 17 return -1; 18 18 } 19 19 20 - is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED); 20 + is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED); 21 21 if (should_be_signed && !is_signed) { 22 22 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", 23 23 evsel->name, name, is_signed, should_be_signed);
+10 -6
tools/perf/ui/browsers/annotate.c
··· 224 224 return ret; 225 225 } 226 226 227 - static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) 227 + static double disasm__cmp(struct annotation_line *a, struct annotation_line *b, 228 + int percent_type) 228 229 { 229 230 int i; 230 231 231 232 for (i = 0; i < a->data_nr; i++) { 232 - if (a->data[i].percent == b->data[i].percent) 233 + if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type]) 233 234 continue; 234 - return a->data[i].percent < b->data[i].percent; 235 + return a->data[i].percent[percent_type] - 236 + b->data[i].percent[percent_type]; 235 237 } 236 238 return 0; 237 239 } 238 240 239 - static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) 241 + static void disasm_rb_tree__insert(struct annotate_browser *browser, 242 + struct annotation_line *al) 240 243 { 244 + struct rb_root *root = &browser->entries; 241 245 struct rb_node **p = &root->rb_node; 242 246 struct rb_node *parent = NULL; 243 247 struct annotation_line *l; ··· 250 246 parent = *p; 251 247 l = rb_entry(parent, struct annotation_line, rb_node); 252 248 253 - if (disasm__cmp(al, l)) 249 + if (disasm__cmp(al, l, browser->opts->percent_type) < 0) 254 250 p = &(*p)->rb_left; 255 251 else 256 252 p = &(*p)->rb_right; ··· 333 329 RB_CLEAR_NODE(&pos->al.rb_node); 334 330 continue; 335 331 } 336 - disasm_rb_tree__insert(&browser->entries, &pos->al); 332 + disasm_rb_tree__insert(browser, &pos->al); 337 333 } 338 334 pthread_mutex_unlock(&notes->lock); 339 335
+1 -1
tools/perf/util/c++/clang.cpp
··· 160 160 } 161 161 PM.run(*Module); 162 162 163 - return std::move(Buffer); 163 + return Buffer; 164 164 } 165 165 166 166 }
+9 -2
tools/perf/util/cpumap.c
··· 134 134 if (!cpu_list) 135 135 return cpu_map__read_all_cpu_map(); 136 136 137 - if (!isdigit(*cpu_list)) 137 + /* 138 + * must handle the case of empty cpumap to cover 139 + * TOPOLOGY header for NUMA nodes with no CPU 140 + * ( e.g., because of CPU hotplug) 141 + */ 142 + if (!isdigit(*cpu_list) && *cpu_list != '\0') 138 143 goto out; 139 144 140 145 while (isdigit(*cpu_list)) { ··· 186 181 187 182 if (nr_cpus > 0) 188 183 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); 189 - else 184 + else if (*cpu_list != '\0') 190 185 cpus = cpu_map__default_new(); 186 + else 187 + cpus = cpu_map__dummy_new(); 191 188 invalid: 192 189 free(tmp_cpus); 193 190 out:
+1 -1
tools/perf/util/mem-events.c
··· 28 28 static char mem_loads_name[100]; 29 29 static bool mem_loads_name__init; 30 30 31 - char *perf_mem_events__name(int i) 31 + char * __weak perf_mem_events__name(int i) 32 32 { 33 33 if (i == PERF_MEM_EVENTS__LOAD) { 34 34 if (!mem_loads_name__init) {
+4 -2
tools/perf/util/ordered-events.c
··· 391 391 * Current buffer might not have all the events allocated 392 392 * yet, we need to free only allocated ones ... 393 393 */ 394 - list_del(&oe->buffer->list); 395 - ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 394 + if (oe->buffer) { 395 + list_del(&oe->buffer->list); 396 + ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 397 + } 396 398 397 399 /* ... and continue with the rest */ 398 400 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
+2
tools/perf/util/setup.py
··· 17 17 vars[var] = sub("-mcet", "", vars[var]) 18 18 if not clang_has_option("-fcf-protection"): 19 19 vars[var] = sub("-fcf-protection", "", vars[var]) 20 + if not clang_has_option("-fstack-clash-protection"): 21 + vars[var] = sub("-fstack-clash-protection", "", vars[var]) 20 22 21 23 from distutils.core import setup, Extension 22 24
+22 -1
tools/perf/util/symbol-elf.c
··· 19 19 #define EM_AARCH64 183 /* ARM 64 bit */ 20 20 #endif 21 21 22 + #ifndef ELF32_ST_VISIBILITY 23 + #define ELF32_ST_VISIBILITY(o) ((o) & 0x03) 24 + #endif 25 + 26 + /* For ELF64 the definitions are the same. */ 27 + #ifndef ELF64_ST_VISIBILITY 28 + #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) 29 + #endif 30 + 31 + /* How to extract information held in the st_other field. */ 32 + #ifndef GELF_ST_VISIBILITY 33 + #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) 34 + #endif 35 + 22 36 typedef Elf64_Nhdr GElf_Nhdr; 23 37 24 38 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT ··· 101 87 return GELF_ST_TYPE(sym->st_info); 102 88 } 103 89 90 + static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) 91 + { 92 + return GELF_ST_VISIBILITY(sym->st_other); 93 + } 94 + 104 95 #ifndef STT_GNU_IFUNC 105 96 #define STT_GNU_IFUNC 10 106 97 #endif ··· 130 111 return elf_sym__type(sym) == STT_NOTYPE && 131 112 sym->st_name != 0 && 132 113 sym->st_shndx != SHN_UNDEF && 133 - sym->st_shndx != SHN_ABS; 114 + sym->st_shndx != SHN_ABS && 115 + elf_sym__visibility(sym) != STV_HIDDEN && 116 + elf_sym__visibility(sym) != STV_INTERNAL; 134 117 } 135 118 136 119 static bool elf_sym__filter(GElf_Sym *sym)
+1
tools/testing/selftests/Makefile
··· 10 10 TARGETS += efivarfs 11 11 TARGETS += exec 12 12 TARGETS += filesystems 13 + TARGETS += filesystems/binderfs 13 14 TARGETS += firmware 14 15 TARGETS += ftrace 15 16 TARGETS += futex
+21 -11
tools/testing/selftests/bpf/bpf_util.h
··· 13 13 unsigned int start, end, possible_cpus = 0; 14 14 char buff[128]; 15 15 FILE *fp; 16 - int n; 16 + int len, n, i, j = 0; 17 17 18 18 fp = fopen(fcpu, "r"); 19 19 if (!fp) { ··· 21 21 exit(1); 22 22 } 23 23 24 - while (fgets(buff, sizeof(buff), fp)) { 25 - n = sscanf(buff, "%u-%u", &start, &end); 26 - if (n == 0) { 27 - printf("Failed to retrieve # possible CPUs!\n"); 28 - exit(1); 29 - } else if (n == 1) { 30 - end = start; 31 - } 32 - possible_cpus = start == 0 ? end + 1 : 0; 33 - break; 24 + if (!fgets(buff, sizeof(buff), fp)) { 25 + printf("Failed to read %s!\n", fcpu); 26 + exit(1); 34 27 } 28 + 29 + len = strlen(buff); 30 + for (i = 0; i <= len; i++) { 31 + if (buff[i] == ',' || buff[i] == '\0') { 32 + buff[i] = '\0'; 33 + n = sscanf(&buff[j], "%u-%u", &start, &end); 34 + if (n <= 0) { 35 + printf("Failed to retrieve # possible CPUs!\n"); 36 + exit(1); 37 + } else if (n == 1) { 38 + end = start; 39 + } 40 + possible_cpus += end - start + 1; 41 + j = i + 1; 42 + } 43 + } 44 + 35 45 fclose(fp); 36 46 37 47 return possible_cpus;
+3 -6
tools/testing/selftests/bpf/test_btf.c
··· 1881 1881 }, 1882 1882 1883 1883 { 1884 - .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", 1884 + .descr = "func proto (TYPEDEF=>FUNC_PROTO)", 1885 1885 .raw_types = { 1886 1886 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 1887 1887 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ 1888 - BTF_CONST_ENC(4), /* [3] */ 1889 - BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ 1890 - BTF_FUNC_PROTO_ENC(0, 2), /* [5] */ 1888 + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ 1889 + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ 1891 1890 BTF_FUNC_PROTO_ARG_ENC(0, 1), 1892 1891 BTF_FUNC_PROTO_ARG_ENC(0, 2), 1893 1892 BTF_END_RAW, ··· 1900 1901 .key_type_id = 1, 1901 1902 .value_type_id = 1, 1902 1903 .max_entries = 4, 1903 - .btf_load_err = true, 1904 - .err_str = "Invalid type_id", 1905 1904 }, 1906 1905 1907 1906 {
+10 -3
tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
··· 37 37 exit $ksft_skip 38 38 fi 39 39 40 + present_cpus=`cat $SYSFS/devices/system/cpu/present` 41 + present_max=${present_cpus##*-} 42 + echo "present_cpus = $present_cpus present_max = $present_max" 43 + 40 44 echo -e "\t Cpus in online state: $online_cpus" 41 45 42 46 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` ··· 155 151 online_max=0 156 152 offline_cpus=0 157 153 offline_max=0 154 + present_cpus=0 155 + present_max=0 158 156 159 157 while getopts e:ahp: opt; do 160 158 case $opt in ··· 196 190 online_cpu_expect_success $online_max 197 191 198 192 if [[ $offline_cpus -gt 0 ]]; then 199 - echo -e "\t offline to online to offline: cpu $offline_max" 200 - online_cpu_expect_success $offline_max 201 - offline_cpu_expect_success $offline_max 193 + echo -e "\t offline to online to offline: cpu $present_max" 194 + online_cpu_expect_success $present_max 195 + offline_cpu_expect_success $present_max 196 + online_cpu $present_max 202 197 fi 203 198 exit 0 204 199 else
+1
tools/testing/selftests/filesystems/binderfs/.gitignore
··· 1 + binderfs_test
+6
tools/testing/selftests/filesystems/binderfs/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + CFLAGS += -I../../../../../usr/include/ 4 + TEST_GEN_PROGS := binderfs_test 5 + 6 + include ../../lib.mk
+275
tools/testing/selftests/filesystems/binderfs/binderfs_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #define _GNU_SOURCE 4 + #include <errno.h> 5 + #include <fcntl.h> 6 + #include <sched.h> 7 + #include <stdbool.h> 8 + #include <stdio.h> 9 + #include <stdlib.h> 10 + #include <string.h> 11 + #include <sys/ioctl.h> 12 + #include <sys/mount.h> 13 + #include <sys/stat.h> 14 + #include <sys/types.h> 15 + #include <unistd.h> 16 + #include <linux/android/binder.h> 17 + #include <linux/android/binderfs.h> 18 + #include "../../kselftest.h" 19 + 20 + static ssize_t write_nointr(int fd, const void *buf, size_t count) 21 + { 22 + ssize_t ret; 23 + again: 24 + ret = write(fd, buf, count); 25 + if (ret < 0 && errno == EINTR) 26 + goto again; 27 + 28 + return ret; 29 + } 30 + 31 + static void write_to_file(const char *filename, const void *buf, size_t count, 32 + int allowed_errno) 33 + { 34 + int fd, saved_errno; 35 + ssize_t ret; 36 + 37 + fd = open(filename, O_WRONLY | O_CLOEXEC); 38 + if (fd < 0) 39 + ksft_exit_fail_msg("%s - Failed to open file %s\n", 40 + strerror(errno), filename); 41 + 42 + ret = write_nointr(fd, buf, count); 43 + if (ret < 0) { 44 + if (allowed_errno && (errno == allowed_errno)) { 45 + close(fd); 46 + return; 47 + } 48 + 49 + goto on_error; 50 + } 51 + 52 + if ((size_t)ret != count) 53 + goto on_error; 54 + 55 + close(fd); 56 + return; 57 + 58 + on_error: 59 + saved_errno = errno; 60 + close(fd); 61 + errno = saved_errno; 62 + 63 + if (ret < 0) 64 + ksft_exit_fail_msg("%s - Failed to write to file %s\n", 65 + strerror(errno), filename); 66 + 67 + ksft_exit_fail_msg("Failed to write to file %s\n", filename); 68 + } 69 + 70 + static void change_to_userns(void) 71 + { 72 + int ret; 73 + uid_t uid; 74 + gid_t gid; 75 + /* {g,u}id_map files only allow a max of 4096 bytes written to them */ 76 + char idmap[4096]; 77 + 78 + uid = getuid(); 79 + gid = getgid(); 80 + 81 + ret = unshare(CLONE_NEWUSER); 82 + if (ret < 0) 83 + ksft_exit_fail_msg("%s - Failed to unshare user namespace\n", 84 + strerror(errno)); 85 + 86 + write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT); 87 + 88 + ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid); 89 + if (ret < 0 || (size_t)ret >= sizeof(idmap)) 90 + ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", 91 + strerror(errno)); 92 + 93 + write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0); 94 + 95 + ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid); 96 + if (ret < 0 || (size_t)ret >= sizeof(idmap)) 97 + ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", 98 + strerror(errno)); 99 + 100 + write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0); 101 + 102 + ret = setgid(0); 103 + if (ret) 104 + ksft_exit_fail_msg("%s - Failed to setgid(0)\n", 105 + strerror(errno)); 106 + 107 + ret = setuid(0); 108 + if (ret) 109 + ksft_exit_fail_msg("%s - Failed to setgid(0)\n", 110 + strerror(errno)); 111 + } 112 + 113 + static void change_to_mountns(void) 114 + { 115 + int ret; 116 + 117 + ret = unshare(CLONE_NEWNS); 118 + if (ret < 0) 119 + ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n", 120 + strerror(errno)); 121 + 122 + ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); 123 + if (ret < 0) 124 + ksft_exit_fail_msg("%s - Failed to mount / as private\n", 125 + strerror(errno)); 126 + } 127 + 128 + static void rmdir_protect_errno(const char *dir) 129 + { 130 + int saved_errno = errno; 131 + (void)rmdir(dir); 132 + errno = saved_errno; 133 + } 134 + 135 + static void __do_binderfs_test(void) 136 + { 137 + int fd, ret, saved_errno; 138 + size_t len; 139 + ssize_t wret; 140 + bool keep = false; 141 + struct binderfs_device device = { 0 }; 142 + struct binder_version version = { 0 }; 143 + 144 + change_to_mountns(); 145 + 146 + ret = mkdir("/dev/binderfs", 0755); 147 + if (ret < 0) { 148 + if (errno != EEXIST) 149 + ksft_exit_fail_msg( 150 + "%s - Failed to create binderfs mountpoint\n", 151 + strerror(errno)); 152 + 153 + keep = true; 154 + } 155 + 156 + ret = mount(NULL, "/dev/binderfs", "binder", 0, 0); 157 + if (ret < 0) { 158 + if (errno != ENODEV) 159 + ksft_exit_fail_msg("%s - Failed to mount binderfs\n", 160 + strerror(errno)); 161 + 162 + keep ? : rmdir_protect_errno("/dev/binderfs"); 163 + ksft_exit_skip( 164 + "The Android binderfs filesystem is not available\n"); 165 + } 166 + 167 + /* binderfs mount test passed */ 168 + ksft_inc_pass_cnt(); 169 + 170 + memcpy(device.name, "my-binder", strlen("my-binder")); 171 + 172 + fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC); 173 + if (fd < 0) 174 + ksft_exit_fail_msg( 175 + "%s - Failed to open binder-control device\n", 176 + strerror(errno)); 177 + 178 + ret = ioctl(fd, BINDER_CTL_ADD, &device); 179 + saved_errno = errno; 180 + close(fd); 181 + errno = saved_errno; 182 + if (ret < 0) { 183 + keep ? : rmdir_protect_errno("/dev/binderfs"); 184 + ksft_exit_fail_msg( 185 + "%s - Failed to allocate new binder device\n", 186 + strerror(errno)); 187 + } 188 + 189 + ksft_print_msg( 190 + "Allocated new binder device with major %d, minor %d, and name %s\n", 191 + device.major, device.minor, device.name); 192 + 193 + /* binder device allocation test passed */ 194 + ksft_inc_pass_cnt(); 195 + 196 + fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY); 197 + if (fd < 0) { 198 + keep ? : rmdir_protect_errno("/dev/binderfs"); 199 + ksft_exit_fail_msg("%s - Failed to open my-binder device\n", 200 + strerror(errno)); 201 + } 202 + 203 + ret = ioctl(fd, BINDER_VERSION, &version); 204 + saved_errno = errno; 205 + close(fd); 206 + errno = saved_errno; 207 + if (ret < 0) { 208 + keep ? : rmdir_protect_errno("/dev/binderfs"); 209 + ksft_exit_fail_msg( 210 + "%s - Failed to open perform BINDER_VERSION request\n", 211 + strerror(errno)); 212 + } 213 + 214 + ksft_print_msg("Detected binder version: %d\n", 215 + version.protocol_version); 216 + 217 + /* binder transaction with binderfs binder device passed */ 218 + ksft_inc_pass_cnt(); 219 + 220 + ret = unlink("/dev/binderfs/my-binder"); 221 + if (ret < 0) { 222 + keep ? : rmdir_protect_errno("/dev/binderfs"); 223 + ksft_exit_fail_msg("%s - Failed to delete binder device\n", 224 + strerror(errno)); 225 + } 226 + 227 + /* binder device removal passed */ 228 + ksft_inc_pass_cnt(); 229 + 230 + ret = unlink("/dev/binderfs/binder-control"); 231 + if (!ret) { 232 + keep ? : rmdir_protect_errno("/dev/binderfs"); 233 + ksft_exit_fail_msg("Managed to delete binder-control device\n"); 234 + } else if (errno != EPERM) { 235 + keep ? : rmdir_protect_errno("/dev/binderfs"); 236 + ksft_exit_fail_msg( 237 + "%s - Failed to delete binder-control device but exited with unexpected error code\n", 238 + strerror(errno)); 239 + } 240 + 241 + /* binder-control device removal failed as expected */ 242 + ksft_inc_xfail_cnt(); 243 + 244 + on_error: 245 + ret = umount2("/dev/binderfs", MNT_DETACH); 246 + keep ?: rmdir_protect_errno("/dev/binderfs"); 247 + if (ret < 0) 248 + ksft_exit_fail_msg("%s - Failed to unmount binderfs\n", 249 + strerror(errno)); 250 + 251 + /* binderfs unmount test passed */ 252 + ksft_inc_pass_cnt(); 253 + } 254 + 255 + static void binderfs_test_privileged() 256 + { 257 + if (geteuid() != 0) 258 + ksft_print_msg( 259 + "Tests are not run as root. Skipping privileged tests\n"); 260 + else 261 + __do_binderfs_test(); 262 + } 263 + 264 + static void binderfs_test_unprivileged() 265 + { 266 + change_to_userns(); 267 + __do_binderfs_test(); 268 + } 269 + 270 + int main(int argc, char *argv[]) 271 + { 272 + binderfs_test_privileged(); 273 + binderfs_test_unprivileged(); 274 + ksft_exit_pass(); 275 + }
+3
tools/testing/selftests/filesystems/binderfs/config
··· 1 + CONFIG_ANDROID=y 2 + CONFIG_ANDROID_BINDERFS=y 3 + CONFIG_ANDROID_BINDER_IPC=y
+2
tools/testing/selftests/ir/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 TEST_PROGS := ir_loopback.sh 3 3 TEST_GEN_PROGS_EXTENDED := ir_loopback 4 + APIDIR := ../../../include/uapi 5 + CFLAGS += -Wall -O2 -I$(APIDIR) 4 6 5 7 include ../lib.mk
+1 -1
tools/testing/selftests/net/Makefile
··· 21 21 KSFT_KHDR_INSTALL := 1 22 22 include ../lib.mk 23 23 24 - $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 24 + $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma 25 25 $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread 26 26 $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
+131 -22
tools/testing/selftests/net/xfrm_policy.sh
··· 28 28 SPI1=0x1 29 29 SPI2=0x2 30 30 31 + do_esp_policy() { 32 + local ns=$1 33 + local me=$2 34 + local remote=$3 35 + local lnet=$4 36 + local rnet=$5 37 + 38 + # to encrypt packets as they go out (includes forwarded packets that need encapsulation) 39 + ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow 40 + # to fwd decrypted packets after esp processing: 41 + ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow 42 + } 43 + 31 44 do_esp() { 32 45 local ns=$1 33 46 local me=$2 ··· 53 40 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet 54 41 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet 55 42 56 - # to encrypt packets as they go out (includes forwarded packets that need encapsulation) 57 - ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow 58 - # to fwd decrypted packets after esp processing: 59 - ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow 43 + do_esp_policy $ns $me $remote $lnet $rnet 44 + } 45 + 46 + # add policies with different netmasks, to make sure kernel carries 47 + # the policies contained within new netmask over when search tree is 48 + # re-built. 49 + # peer netns that are supposed to be encapsulated via esp have addresses 50 + # in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively. 51 + # 52 + # Adding a policy for '10.0.1.0/23' will make it necessary to 53 + # alter the prefix of 10.0.1.0 subnet. 54 + # In case new prefix overlaps with existing node, the node and all 55 + # policies it carries need to be merged with the existing one(s). 56 + # 57 + # Do that here. 58 + do_overlap() 59 + { 60 + local ns=$1 61 + 62 + # adds new nodes to tree (neither network exists yet in policy database). 63 + ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block 64 + 65 + # adds a new node in the 10.0.0.0/24 tree (dst node exists). 66 + ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block 67 + 68 + # adds a 10.2.0.0/23 node, but for different dst. 69 + ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block 70 + 71 + # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd. 72 + # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23. 73 + # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node 74 + # also has to be merged too, including source-sorted subtrees. 75 + # old: 76 + # 10.0.0.0/24 (node 1 in dst tree of the bin) 77 + # 10.1.0.0/24 (node in src tree of dst node 1) 78 + # 10.2.0.0/24 (node in src tree of dst node 1) 79 + # 10.0.1.0/24 (node 2 in dst tree of the bin) 80 + # 10.0.2.0/24 (node in src tree of dst node 2) 81 + # 10.2.0.0/24 (node in src tree of dst node 2) 82 + # 83 + # The next 'policy add' adds dst '10.0.0.0/23', which means 84 + # that dst node 1 and dst node 2 have to be merged including 85 + # the sub-tree. As no duplicates are allowed, policies in 86 + # the two '10.0.2.0/24' are also merged. 87 + # 88 + # after the 'add', internal search tree should look like this: 89 + # 10.0.0.0/23 (node in dst tree of bin) 90 + # 10.0.2.0/24 (node in src tree of dst node) 91 + # 10.1.0.0/24 (node in src tree of dst node) 92 + # 10.2.0.0/24 (node in src tree of dst node) 93 + # 94 + # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23. 95 + ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block 60 96 } 61 97 62 98 do_esp_policy_get_check() { ··· 222 160 return $lret 223 161 } 224 162 163 + check_exceptions() 164 + { 165 + logpostfix="$1" 166 + local lret=0 167 + 168 + # ping to .254 should be excluded from the tunnel (exception is in place). 169 + check_xfrm 0 254 170 + if [ $? -ne 0 ]; then 171 + echo "FAIL: expected ping to .254 to fail ($logpostfix)" 172 + lret=1 173 + else 174 + echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)" 175 + fi 176 + 177 + # ping to .253 should use use ipsec due to direct policy exception. 178 + check_xfrm 1 253 179 + if [ $? -ne 0 ]; then 180 + echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)" 181 + lret=1 182 + else 183 + echo "PASS: direct policy matches ($logpostfix)" 184 + fi 185 + 186 + # ping to .2 should use ipsec. 187 + check_xfrm 1 2 188 + if [ $? -ne 0 ]; then 189 + echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)" 190 + lret=1 191 + else 192 + echo "PASS: policy matches ($logpostfix)" 193 + fi 194 + 195 + return $lret 196 + } 197 + 225 198 #check for needed privileges 226 199 if [ "$(id -u)" -ne 0 ];then 227 200 echo "SKIP: Need root privileges" ··· 367 270 do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 368 271 do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 369 272 370 - # ping to .254 should now be excluded from the tunnel 371 - check_xfrm 0 254 273 + check_exceptions "exceptions" 372 274 if [ $? -ne 0 ]; then 373 - echo "FAIL: expected ping to .254 to fail" 374 275 ret=1 375 - else 376 - echo "PASS: ping to .254 bypassed ipsec tunnel" 377 276 fi 378 277 379 - # ping to .253 should use use ipsec due to direct policy exception. 380 - check_xfrm 1 253 278 + # insert block policies with adjacent/overlapping netmasks 279 + do_overlap ns3 280 + 281 + check_exceptions "exceptions and block policies" 381 282 if [ $? -ne 0 ]; then 382 - echo "FAIL: expected ping to .253 to use ipsec tunnel" 383 283 ret=1 384 - else 385 - echo "PASS: direct policy matches" 386 284 fi 387 285 388 - # ping to .2 should use ipsec. 389 - check_xfrm 1 2 390 - if [ $? -ne 0 ]; then 391 - echo "FAIL: expected ping to .2 to use ipsec tunnel" 392 - ret=1 393 - else 394 - echo "PASS: policy matches" 395 - fi 286 + for n in ns3 ns4;do 287 + ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125 288 + sleep $((RANDOM%5)) 289 + done 290 + 291 + check_exceptions "exceptions and block policies after hresh changes" 292 + 293 + # full flush of policy db, check everything gets freed incl. internal meta data 294 + ip -net ns3 xfrm policy flush 295 + 296 + do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24 297 + do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28 298 + 299 + # move inexact policies to hash table 300 + ip -net ns3 xfrm policy set hthresh4 16 16 301 + 302 + sleep $((RANDOM%5)) 303 + check_exceptions "exceptions and block policies after hthresh change in ns3" 304 + 305 + # restore original hthresh settings -- move policies back to tables 306 + for n in ns3 ns4;do 307 + ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128 308 + sleep $((RANDOM%5)) 309 + done 310 + check_exceptions "exceptions and block policies after hresh change to normal" 396 311 397 312 for i in 1 2 3 4;do ip netns del ns$i;done 398 313
+1 -1
tools/testing/selftests/netfilter/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # Makefile for netfilter selftests 3 3 4 - TEST_PROGS := nft_trans_stress.sh 4 + TEST_PROGS := nft_trans_stress.sh nft_nat.sh 5 5 6 6 include ../lib.mk
+1 -1
tools/testing/selftests/netfilter/config
··· 1 1 CONFIG_NET_NS=y 2 - NF_TABLES_INET=y 2 + CONFIG_NF_TABLES_INET=y
+762
tools/testing/selftests/netfilter/nft_nat.sh
··· 1 + #!/bin/bash 2 + # 3 + # This test is for basic NAT functionality: snat, dnat, redirect, masquerade. 4 + # 5 + 6 + # Kselftest framework requirement - SKIP code is 4. 7 + ksft_skip=4 8 + ret=0 9 + 10 + nft --version > /dev/null 2>&1 11 + if [ $? -ne 0 ];then 12 + echo "SKIP: Could not run test without nft tool" 13 + exit $ksft_skip 14 + fi 15 + 16 + ip -Version > /dev/null 2>&1 17 + if [ $? -ne 0 ];then 18 + echo "SKIP: Could not run test without ip tool" 19 + exit $ksft_skip 20 + fi 21 + 22 + ip netns add ns0 23 + ip netns add ns1 24 + ip netns add ns2 25 + 26 + ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 27 + ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 28 + 29 + ip -net ns0 link set lo up 30 + ip -net ns0 link set veth0 up 31 + ip -net ns0 addr add 10.0.1.1/24 dev veth0 32 + ip -net ns0 addr add dead:1::1/64 dev veth0 33 + 34 + ip -net ns0 link set veth1 up 35 + ip -net ns0 addr add 10.0.2.1/24 dev veth1 36 + ip -net ns0 addr add dead:2::1/64 dev veth1 37 + 38 + for i in 1 2; do 39 + ip -net ns$i link set lo up 40 + ip -net ns$i link set eth0 up 41 + ip -net ns$i addr add 10.0.$i.99/24 dev eth0 42 + ip -net ns$i route add default via 10.0.$i.1 43 + ip -net ns$i addr add dead:$i::99/64 dev eth0 44 + ip -net ns$i route add default via dead:$i::1 45 + done 46 + 47 + bad_counter() 48 + { 49 + local ns=$1 50 + local counter=$2 51 + local expect=$3 52 + 53 + echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 54 + ip netns exec $ns nft list counter inet filter $counter 1>&2 55 + } 56 + 57 + check_counters() 58 + { 59 + ns=$1 60 + local lret=0 61 + 62 + cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") 63 + if [ $? -ne 0 ]; then 64 + bad_counter $ns ns0in "packets 1 bytes 84" 65 + lret=1 66 + fi 67 + cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") 68 + if [ $? -ne 0 ]; then 69 + bad_counter $ns ns0out "packets 1 bytes 84" 70 + lret=1 71 + fi 72 + 73 + expect="packets 1 bytes 104" 74 + cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") 75 + if [ $? -ne 0 ]; then 76 + bad_counter $ns ns0in6 "$expect" 77 + lret=1 78 + fi 79 + cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") 80 + if [ $? -ne 0 ]; then 81 + bad_counter $ns ns0out6 "$expect" 82 + lret=1 83 + fi 84 + 85 + return $lret 86 + } 87 + 88 + check_ns0_counters() 89 + { 90 + local ns=$1 91 + local lret=0 92 + 93 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") 94 + if [ $? -ne 0 ]; then 95 + bad_counter ns0 ns0in "packets 0 bytes 0" 96 + lret=1 97 + fi 98 + 99 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") 100 + if [ $? -ne 0 ]; then 101 + bad_counter ns0 ns0in6 "packets 0 bytes 0" 102 + lret=1 103 + fi 104 + 105 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") 106 + if [ $? -ne 0 ]; then 107 + bad_counter ns0 ns0out "packets 0 bytes 0" 108 + lret=1 109 + fi 110 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") 111 + if [ $? -ne 0 ]; then 112 + bad_counter ns0 ns0out6 "packets 0 bytes 0" 113 + lret=1 114 + fi 115 + 116 + for dir in "in" "out" ; do 117 + expect="packets 1 bytes 84" 118 + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") 119 + if [ $? -ne 0 ]; then 120 + bad_counter ns0 $ns$dir "$expect" 121 + lret=1 122 + fi 123 + 124 + expect="packets 1 bytes 104" 125 + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") 126 + if [ $? -ne 0 ]; then 127 + bad_counter ns0 $ns$dir6 "$expect" 128 + lret=1 129 + fi 130 + done 131 + 132 + return $lret 133 + } 134 + 135 + reset_counters() 136 + { 137 + for i in 0 1 2;do 138 + ip netns exec ns$i nft reset counters inet > /dev/null 139 + done 140 + } 141 + 142 + test_local_dnat6() 143 + { 144 + local lret=0 145 + ip netns exec ns0 nft -f - <<EOF 146 + table ip6 nat { 147 + chain output { 148 + type nat hook output priority 0; policy accept; 149 + ip6 daddr dead:1::99 dnat to dead:2::99 150 + } 151 + } 152 + EOF 153 + if [ $? -ne 0 ]; then 154 + echo "SKIP: Could not add add ip6 dnat hook" 155 + return $ksft_skip 156 + fi 157 + 158 + # ping netns1, expect rewrite to netns2 159 + ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null 160 + if [ $? -ne 0 ]; then 161 + lret=1 162 + echo "ERROR: ping6 failed" 163 + return $lret 164 + fi 165 + 166 + expect="packets 0 bytes 0" 167 + for dir in "in6" "out6" ; do 168 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") 169 + if [ $? -ne 0 ]; then 170 + bad_counter ns0 ns1$dir "$expect" 171 + lret=1 172 + fi 173 + done 174 + 175 + expect="packets 1 bytes 104" 176 + for dir in "in6" "out6" ; do 177 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") 178 + if [ $? -ne 0 ]; then 179 + bad_counter ns0 ns2$dir "$expect" 180 + lret=1 181 + fi 182 + done 183 + 184 + # expect 0 count in ns1 185 + expect="packets 0 bytes 0" 186 + for dir in "in6" "out6" ; do 187 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 188 + if [ $? -ne 0 ]; then 189 + bad_counter ns1 ns0$dir "$expect" 190 + lret=1 191 + fi 192 + done 193 + 194 + # expect 1 packet in ns2 195 + expect="packets 1 bytes 104" 196 + for dir in "in6" "out6" ; do 197 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") 198 + if [ $? -ne 0 ]; then 199 + bad_counter ns2 ns0$dir "$expect" 200 + lret=1 201 + fi 202 + done 203 + 204 + test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2" 205 + ip netns exec ns0 nft flush chain ip6 nat output 206 + 207 + return $lret 208 + } 209 + 210 + test_local_dnat() 211 + { 212 + local lret=0 213 + ip netns exec ns0 nft -f - <<EOF 214 + table ip nat { 215 + chain output { 216 + type nat hook output priority 0; policy accept; 217 + ip daddr 10.0.1.99 dnat to 10.0.2.99 218 + } 219 + } 220 + EOF 221 + # ping netns1, expect rewrite to netns2 222 + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null 223 + if [ $? -ne 0 ]; then 224 + lret=1 225 + echo "ERROR: ping failed" 226 + return $lret 227 + fi 228 + 229 + expect="packets 0 bytes 0" 230 + for dir in "in" "out" ; do 231 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") 232 + if [ $? -ne 0 ]; then 233 + bad_counter ns0 ns1$dir "$expect" 234 + lret=1 235 + fi 236 + done 237 + 238 + expect="packets 1 bytes 84" 239 + for dir in "in" "out" ; do 240 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") 241 + if [ $? -ne 0 ]; then 242 + bad_counter ns0 ns2$dir "$expect" 243 + lret=1 244 + fi 245 + done 246 + 247 + # expect 0 count in ns1 248 + expect="packets 0 bytes 0" 249 + for dir in "in" "out" ; do 250 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 251 + if [ $? -ne 0 ]; then 252 + bad_counter ns1 ns0$dir "$expect" 253 + lret=1 254 + fi 255 + done 256 + 257 + # expect 1 packet in ns2 258 + expect="packets 1 bytes 84" 259 + for dir in "in" "out" ; do 260 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") 261 + if [ $? -ne 0 ]; then 262 + bad_counter ns2 ns0$dir "$expect" 263 + lret=1 264 + fi 265 + done 266 + 267 + test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2" 268 + 269 + ip netns exec ns0 nft flush chain ip nat output 270 + 271 + reset_counters 272 + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null 273 + if [ $? -ne 0 ]; then 274 + lret=1 275 + echo "ERROR: ping failed" 276 + return $lret 277 + fi 278 + 279 + expect="packets 1 bytes 84" 280 + for dir in "in" "out" ; do 281 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") 282 + if [ $? -ne 0 ]; then 283 + bad_counter ns1 ns1$dir "$expect" 284 + lret=1 285 + fi 286 + done 287 + expect="packets 0 bytes 0" 288 + for dir in "in" "out" ; do 289 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") 290 + if [ $? -ne 0 ]; then 291 + bad_counter ns0 ns2$dir "$expect" 292 + lret=1 293 + fi 294 + done 295 + 296 + # expect 1 count in ns1 297 + expect="packets 1 bytes 84" 298 + for dir in "in" "out" ; do 299 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 300 + if [ $? -ne 0 ]; then 301 + bad_counter ns0 ns0$dir "$expect" 302 + lret=1 303 + fi 304 + done 305 + 306 + # expect 0 packet in ns2 307 + expect="packets 0 bytes 0" 308 + for dir in "in" "out" ; do 309 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") 310 + if [ $? -ne 0 ]; then 311 + bad_counter ns2 ns2$dir "$expect" 312 + lret=1 313 + fi 314 + done 315 + 316 + test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush" 317 + 318 + return $lret 319 + } 320 + 321 + 322 + test_masquerade6() 323 + { 324 + local lret=0 325 + 326 + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null 327 + 328 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 329 + if [ $? -ne 0 ] ; then 330 + echo "ERROR: cannot ping ns1 from ns2 via ipv6" 331 + return 1 332 + lret=1 333 + fi 334 + 335 + expect="packets 1 bytes 104" 336 + for dir in "in6" "out6" ; do 337 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 338 + if [ $? -ne 0 ]; then 339 + bad_counter ns1 ns2$dir "$expect" 340 + lret=1 341 + fi 342 + 343 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 344 + if [ $? -ne 0 ]; then 345 + bad_counter ns2 ns1$dir "$expect" 346 + lret=1 347 + fi 348 + done 349 + 350 + reset_counters 351 + 352 + # add masquerading rule 353 + ip netns exec ns0 nft -f - <<EOF 354 + table ip6 nat { 355 + chain postrouting { 356 + type nat hook postrouting priority 0; policy accept; 357 + meta oif veth0 masquerade 358 + } 359 + } 360 + EOF 361 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 362 + if [ $? -ne 0 ] ; then 363 + echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" 364 + lret=1 365 + fi 366 + 367 + # ns1 should have seen packets from ns0, due to masquerade 368 + expect="packets 1 bytes 104" 369 + for dir in "in6" "out6" ; do 370 + 371 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 372 + if [ $? -ne 0 ]; then 373 + bad_counter ns1 ns0$dir "$expect" 374 + lret=1 375 + fi 376 + 377 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 378 + if [ $? -ne 0 ]; then 379 + bad_counter ns2 ns1$dir "$expect" 380 + lret=1 381 + fi 382 + done 383 + 384 + # ns1 should not have seen packets from ns2, due to masquerade 385 + expect="packets 0 bytes 0" 386 + for dir in "in6" "out6" ; do 387 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 388 + if [ $? -ne 0 ]; then 389 + bad_counter ns1 ns0$dir "$expect" 390 + lret=1 391 + fi 392 + 393 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 394 + if [ $? -ne 0 ]; then 395 + bad_counter ns2 ns1$dir "$expect" 396 + lret=1 397 + fi 398 + done 399 + 400 + ip netns exec ns0 nft flush chain ip6 nat postrouting 401 + if [ $? -ne 0 ]; then 402 + echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 403 + lret=1 404 + fi 405 + 406 + test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" 407 + 408 + return $lret 409 + } 410 + 411 + test_masquerade() 412 + { 413 + local lret=0 414 + 415 + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null 416 + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null 417 + 418 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 419 + if [ $? -ne 0 ] ; then 420 + echo "ERROR: canot ping ns1 from ns2" 421 + lret=1 422 + fi 423 + 424 + expect="packets 1 bytes 84" 425 + for dir in "in" "out" ; do 426 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 427 + if [ $? -ne 0 ]; then 428 + bad_counter ns1 ns2$dir "$expect" 429 + lret=1 430 + fi 431 + 432 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 433 + if [ $? -ne 0 ]; then 434 + bad_counter ns2 ns1$dir "$expect" 435 + lret=1 436 + fi 437 + done 438 + 439 + reset_counters 440 + 441 + # add masquerading rule 442 + ip netns exec ns0 nft -f - <<EOF 443 + table ip nat { 444 + chain postrouting { 445 + type nat hook postrouting priority 0; policy accept; 446 + meta oif veth0 masquerade 447 + } 448 + } 449 + EOF 450 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 451 + if [ $? -ne 0 ] ; then 452 + echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" 453 + lret=1 454 + fi 455 + 456 + # ns1 should have seen packets from ns0, due to masquerade 457 + expect="packets 1 bytes 84" 458 + for dir in "in" "out" ; do 459 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 460 + if [ $? -ne 0 ]; then 461 + bad_counter ns1 ns0$dir "$expect" 462 + lret=1 463 + fi 464 + 465 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 466 + if [ $? -ne 0 ]; then 467 + bad_counter ns2 ns1$dir "$expect" 468 + lret=1 469 + fi 470 + done 471 + 472 + # ns1 should not have seen packets from ns2, due to masquerade 473 + expect="packets 0 bytes 0" 474 + for dir in "in" "out" ; do 475 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 476 + if [ $? -ne 0 ]; then 477 + bad_counter ns1 ns0$dir "$expect" 478 + lret=1 479 + fi 480 + 481 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 482 + if [ $? -ne 0 ]; then 483 + bad_counter ns2 ns1$dir "$expect" 484 + lret=1 485 + fi 486 + done 487 + 488 + ip netns exec ns0 nft flush chain ip nat postrouting 489 + if [ $? -ne 0 ]; then 490 + echo "ERROR: Could not flush nat postrouting" 1>&2 491 + lret=1 492 + fi 493 + 494 + test $lret -eq 0 && echo "PASS: IP masquerade for ns2" 495 + 496 + return $lret 497 + } 498 + 499 + test_redirect6() 500 + { 501 + local lret=0 502 + 503 + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null 504 + 505 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 506 + if [ $? -ne 0 ] ; then 507 + echo "ERROR: cannnot ping ns1 from ns2 via ipv6" 508 + lret=1 509 + fi 510 + 511 + expect="packets 1 bytes 104" 512 + for dir in "in6" "out6" ; do 513 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 514 + if [ $? -ne 0 ]; then 515 + bad_counter ns1 ns2$dir "$expect" 516 + lret=1 517 + fi 518 + 519 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 520 + if [ $? -ne 0 ]; then 521 + bad_counter ns2 ns1$dir "$expect" 522 + lret=1 523 + fi 524 + done 525 + 526 + reset_counters 527 + 528 + # add redirect rule 529 + ip netns exec ns0 nft -f - <<EOF 530 + table ip6 nat { 531 + chain prerouting { 532 + type nat hook prerouting priority 0; policy accept; 533 + meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect 534 + } 535 + } 536 + EOF 537 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 538 + if [ $? -ne 0 ] ; then 539 + echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect" 540 + lret=1 541 + fi 542 + 543 + # ns1 should have seen no packets from ns2, due to redirection 544 + expect="packets 0 bytes 0" 545 + for dir in "in6" "out6" ; do 546 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 547 + if [ $? -ne 0 ]; then 548 + bad_counter ns1 ns0$dir "$expect" 549 + lret=1 550 + fi 551 + done 552 + 553 + # ns0 should have seen packets from ns2, due to masquerade 554 + expect="packets 1 bytes 104" 555 + for dir in "in6" "out6" ; do 556 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") 557 + if [ $? -ne 0 ]; then 558 + bad_counter ns1 ns0$dir "$expect" 559 + lret=1 560 + fi 561 + done 562 + 563 + ip netns exec ns0 nft delete table ip6 nat 564 + if [ $? -ne 0 ]; then 565 + echo "ERROR: Could not delete ip6 nat table" 1>&2 566 + lret=1 567 + fi 568 + 569 + test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2" 570 + 571 + return $lret 572 + } 573 + 574 + test_redirect() 575 + { 576 + local lret=0 577 + 578 + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null 579 + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null 580 + 581 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 582 + if [ $? -ne 0 ] ; then 583 + echo "ERROR: cannot ping ns1 from ns2" 584 + lret=1 585 + fi 586 + 587 + expect="packets 1 bytes 84" 588 + for dir in "in" "out" ; do 589 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 590 + if [ $? -ne 0 ]; then 591 + bad_counter ns1 ns2$dir "$expect" 592 + lret=1 593 + fi 594 + 595 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") 596 + if [ $? -ne 0 ]; then 597 + bad_counter ns2 ns1$dir "$expect" 598 + lret=1 599 + fi 600 + done 601 + 602 + reset_counters 603 + 604 + # add redirect rule 605 + ip netns exec ns0 nft -f - <<EOF 606 + table ip nat { 607 + chain prerouting { 608 + type nat hook prerouting priority 0; policy accept; 609 + meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect 610 + } 611 + } 612 + EOF 613 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 614 + if [ $? -ne 0 ] ; then 615 + echo "ERROR: cannot ping ns1 from ns2 with active ip redirect" 616 + lret=1 617 + fi 618 + 619 + # ns1 should have seen no packets from ns2, due to redirection 620 + expect="packets 0 bytes 0" 621 + for dir in "in" "out" ; do 622 + 623 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") 624 + if [ $? -ne 0 ]; then 625 + bad_counter ns1 ns0$dir "$expect" 626 + lret=1 627 + fi 628 + done 629 + 630 + # ns0 should have seen packets from ns2, due to masquerade 631 + expect="packets 1 bytes 84" 632 + for dir in "in" "out" ; do 633 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") 634 + if [ $? -ne 0 ]; then 635 + bad_counter ns1 ns0$dir "$expect" 636 + lret=1 637 + fi 638 + done 639 + 640 + ip netns exec ns0 nft delete table ip nat 641 + if [ $? -ne 0 ]; then 642 + echo "ERROR: Could not delete nat table" 1>&2 643 + lret=1 644 + fi 645 + 646 + test $lret -eq 0 && echo "PASS: IP redirection for ns2" 647 + 648 + return $lret 649 + } 650 + 651 + 652 + # ip netns exec ns0 ping -c 1 -q 10.0.$i.99 653 + for i in 0 1 2; do 654 + ip netns exec ns$i nft -f - <<EOF 655 + table inet filter { 656 + counter ns0in {} 657 + counter ns1in {} 658 + counter ns2in {} 659 + 660 + counter ns0out {} 661 + counter ns1out {} 662 + counter ns2out {} 663 + 664 + counter ns0in6 {} 665 + counter ns1in6 {} 666 + counter ns2in6 {} 667 + 668 + counter ns0out6 {} 669 + counter ns1out6 {} 670 + counter ns2out6 {} 671 + 672 + map nsincounter { 673 + type ipv4_addr : counter 674 + elements = { 10.0.1.1 : "ns0in", 675 + 10.0.2.1 : "ns0in", 676 + 10.0.1.99 : "ns1in", 677 + 10.0.2.99 : "ns2in" } 678 + } 679 + 680 + map nsincounter6 { 681 + type ipv6_addr : counter 682 + elements = { dead:1::1 : "ns0in6", 683 + dead:2::1 : "ns0in6", 684 + dead:1::99 : "ns1in6", 685 + dead:2::99 : "ns2in6" } 686 + } 687 + 688 + map nsoutcounter { 689 + type ipv4_addr : counter 690 + elements = { 10.0.1.1 : "ns0out", 691 + 10.0.2.1 : "ns0out", 692 + 10.0.1.99: "ns1out", 693 + 10.0.2.99: "ns2out" } 694 + } 695 + 696 + map nsoutcounter6 { 697 + type ipv6_addr : counter 698 + elements = { dead:1::1 : "ns0out6", 699 + dead:2::1 : "ns0out6", 700 + dead:1::99 : "ns1out6", 701 + dead:2::99 : "ns2out6" } 702 + } 703 + 704 + chain input { 705 + type filter hook input priority 0; policy accept; 706 + counter name ip saddr map @nsincounter 707 + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6 708 + } 709 + chain output { 710 + type filter hook output priority 0; policy accept; 711 + counter name ip daddr map @nsoutcounter 712 + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6 713 + } 714 + } 715 + EOF 716 + done 717 + 718 + sleep 3 719 + # test basic connectivity 720 + for i in 1 2; do 721 + ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null 722 + if [ $? -ne 0 ];then 723 + echo "ERROR: Could not reach other namespace(s)" 1>&2 724 + ret=1 725 + fi 726 + 727 + ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null 728 + if [ $? -ne 0 ];then 729 + echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 730 + ret=1 731 + fi 732 + check_counters ns$i 733 + if [ $? -ne 0 ]; then 734 + ret=1 735 + fi 736 + 737 + check_ns0_counters ns$i 738 + if [ $? -ne 0 ]; then 739 + ret=1 740 + fi 741 + reset_counters 742 + done 743 + 744 + if [ $ret -eq 0 ];then 745 + echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" 746 + fi 747 + 748 + reset_counters 749 + test_local_dnat 750 + test_local_dnat6 751 + 752 + reset_counters 753 + test_masquerade 754 + test_masquerade6 755 + 756 + reset_counters 757 + test_redirect 758 + test_redirect6 759 + 760 + for i in 0 1 2; do ip netns del ns$i;done 761 + 762 + exit $ret
+1
tools/testing/selftests/proc/.gitignore
··· 10 10 /proc-uptime-002 11 11 /read 12 12 /self 13 + /setns-dcache 13 14 /thread-self
+1
tools/testing/selftests/proc/Makefile
··· 14 14 TEST_GEN_PROGS += proc-uptime-002 15 15 TEST_GEN_PROGS += read 16 16 TEST_GEN_PROGS += self 17 + TEST_GEN_PROGS += setns-dcache 17 18 TEST_GEN_PROGS += thread-self 18 19 19 20 include ../lib.mk
+129
tools/testing/selftests/proc/setns-dcache.c
··· 1 + /* 2 + * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com> 3 + * 4 + * Permission to use, copy, modify, and distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + /* 17 + * Test that setns(CLONE_NEWNET) points to new /proc/net content even 18 + * if old one is in dcache. 19 + * 20 + * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled. 21 + */ 22 + #undef NDEBUG 23 + #include <assert.h> 24 + #include <errno.h> 25 + #include <sched.h> 26 + #include <signal.h> 27 + #include <stdio.h> 28 + #include <stdlib.h> 29 + #include <string.h> 30 + #include <unistd.h> 31 + #include <sys/types.h> 32 + #include <sys/stat.h> 33 + #include <fcntl.h> 34 + #include <sys/socket.h> 35 + 36 + static pid_t pid = -1; 37 + 38 + static void f(void) 39 + { 40 + if (pid > 0) { 41 + kill(pid, SIGTERM); 42 + } 43 + } 44 + 45 + int main(void) 46 + { 47 + int fd[2]; 48 + char _ = 0; 49 + int nsfd; 50 + 51 + atexit(f); 52 + 53 + /* Check for priviledges and syscall availability straight away. */ 54 + if (unshare(CLONE_NEWNET) == -1) { 55 + if (errno == ENOSYS || errno == EPERM) { 56 + return 4; 57 + } 58 + return 1; 59 + } 60 + /* Distinguisher between two otherwise empty net namespaces. */ 61 + if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) { 62 + return 1; 63 + } 64 + 65 + if (pipe(fd) == -1) { 66 + return 1; 67 + } 68 + 69 + pid = fork(); 70 + if (pid == -1) { 71 + return 1; 72 + } 73 + 74 + if (pid == 0) { 75 + if (unshare(CLONE_NEWNET) == -1) { 76 + return 1; 77 + } 78 + 79 + if (write(fd[1], &_, 1) != 1) { 80 + return 1; 81 + } 82 + 83 + pause(); 84 + 85 + return 0; 86 + } 87 + 88 + if (read(fd[0], &_, 1) != 1) { 89 + return 1; 90 + } 91 + 92 + { 93 + char buf[64]; 94 + snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid); 95 + nsfd = open(buf, O_RDONLY); 96 + if (nsfd == -1) { 97 + return 1; 98 + } 99 + } 100 + 101 + /* Reliably pin dentry into dcache. */ 102 + (void)open("/proc/net/unix", O_RDONLY); 103 + 104 + if (setns(nsfd, CLONE_NEWNET) == -1) { 105 + return 1; 106 + } 107 + 108 + kill(pid, SIGTERM); 109 + pid = 0; 110 + 111 + { 112 + char buf[4096]; 113 + ssize_t rv; 114 + int fd; 115 + 116 + fd = open("/proc/net/unix", O_RDONLY); 117 + if (fd == -1) { 118 + return 1; 119 + } 120 + 121 + #define S "Num RefCount Protocol Flags Type St Inode Path\n" 122 + rv = read(fd, buf, sizeof(buf)); 123 + 124 + assert(rv == strlen(S)); 125 + assert(memcmp(buf, S, strlen(S)) == 0); 126 + } 127 + 128 + return 0; 129 + }
+57 -15
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 1608 1608 #ifdef SYSCALL_NUM_RET_SHARE_REG 1609 1609 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1610 1610 #else 1611 - # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) 1611 + # define EXPECT_SYSCALL_RETURN(val, action) \ 1612 + do { \ 1613 + errno = 0; \ 1614 + if (val < 0) { \ 1615 + EXPECT_EQ(-1, action); \ 1616 + EXPECT_EQ(-(val), errno); \ 1617 + } else { \ 1618 + EXPECT_EQ(val, action); \ 1619 + } \ 1620 + } while (0) 1612 1621 #endif 1613 1622 1614 1623 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for ··· 1656 1647 1657 1648 /* Architecture-specific syscall changing routine. */ 1658 1649 void change_syscall(struct __test_metadata *_metadata, 1659 - pid_t tracee, int syscall) 1650 + pid_t tracee, int syscall, int result) 1660 1651 { 1661 1652 int ret; 1662 1653 ARCH_REGS regs; ··· 1715 1706 #ifdef SYSCALL_NUM_RET_SHARE_REG 1716 1707 TH_LOG("Can't modify syscall return on this architecture"); 1717 1708 #else 1718 - regs.SYSCALL_RET = EPERM; 1709 + regs.SYSCALL_RET = result; 1719 1710 #endif 1720 1711 1721 1712 #ifdef HAVE_GETREGS ··· 1743 1734 case 0x1002: 1744 1735 /* change getpid to getppid. */ 1745 1736 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1746 - change_syscall(_metadata, tracee, __NR_getppid); 1737 + change_syscall(_metadata, tracee, __NR_getppid, 0); 1747 1738 break; 1748 1739 case 0x1003: 1749 - /* skip gettid. */ 1740 + /* skip gettid with valid return code. */ 1750 1741 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1751 - change_syscall(_metadata, tracee, -1); 1742 + change_syscall(_metadata, tracee, -1, 45000); 1752 1743 break; 1753 1744 case 0x1004: 1745 + /* skip openat with error. */ 1746 + EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); 1747 + change_syscall(_metadata, tracee, -1, -ESRCH); 1748 + break; 1749 + case 0x1005: 1754 1750 /* do nothing (allow getppid) */ 1755 1751 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1756 1752 break; ··· 1788 1774 nr = get_syscall(_metadata, tracee); 1789 1775 1790 1776 if (nr == __NR_getpid) 1791 - change_syscall(_metadata, tracee, __NR_getppid); 1777 + change_syscall(_metadata, tracee, __NR_getppid, 0); 1778 + if (nr == __NR_gettid) 1779 + change_syscall(_metadata, tracee, -1, 45000); 1792 1780 if (nr == __NR_openat) 1793 - change_syscall(_metadata, tracee, -1); 1781 + change_syscall(_metadata, tracee, -1, -ESRCH); 1794 1782 } 1795 1783 1796 1784 FIXTURE_DATA(TRACE_syscall) { ··· 1809 1793 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1810 1794 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1811 1795 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1812 - BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1796 + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), 1813 1797 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1798 + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1799 + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), 1814 1800 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1815 1801 }; 1816 1802 ··· 1860 1842 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1861 1843 } 1862 1844 1863 - TEST_F(TRACE_syscall, ptrace_syscall_dropped) 1845 + TEST_F(TRACE_syscall, ptrace_syscall_errno) 1864 1846 { 1865 1847 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1866 1848 teardown_trace_fixture(_metadata, self->tracer); 1867 1849 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1868 1850 true); 1869 1851 1870 - /* Tracer should skip the open syscall, resulting in EPERM. */ 1871 - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); 1852 + /* Tracer should skip the open syscall, resulting in ESRCH. */ 1853 + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 1854 + } 1855 + 1856 + TEST_F(TRACE_syscall, ptrace_syscall_faked) 1857 + { 1858 + /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1859 + teardown_trace_fixture(_metadata, self->tracer); 1860 + self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1861 + true); 1862 + 1863 + /* Tracer should skip the gettid syscall, resulting fake pid. */ 1864 + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 1872 1865 } 1873 1866 1874 1867 TEST_F(TRACE_syscall, syscall_allowed) ··· 1912 1883 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1913 1884 } 1914 1885 1915 - TEST_F(TRACE_syscall, syscall_dropped) 1886 + TEST_F(TRACE_syscall, syscall_errno) 1887 + { 1888 + long ret; 1889 + 1890 + ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1891 + ASSERT_EQ(0, ret); 1892 + 1893 + ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1894 + ASSERT_EQ(0, ret); 1895 + 1896 + /* openat has been skipped and an errno return. */ 1897 + EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 1898 + } 1899 + 1900 + TEST_F(TRACE_syscall, syscall_faked) 1916 1901 { 1917 1902 long ret; 1918 1903 ··· 1937 1894 ASSERT_EQ(0, ret); 1938 1895 1939 1896 /* gettid has been skipped and an altered return value stored. */ 1940 - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); 1941 - EXPECT_NE(self->mytid, syscall(__NR_gettid)); 1897 + EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 1942 1898 } 1943 1899 1944 1900 TEST_F(TRACE_syscall, skip_after_RET_TRACE)
+1 -1
tools/testing/selftests/timers/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 CFLAGS += -O3 -Wl,-no-as-needed -Wall 3 - LDFLAGS += -lrt -lpthread -lm 3 + LDLIBS += -lrt -lpthread -lm 4 4 5 5 # these are all "safe" tests that don't modify 6 6 # system time or require escalated privileges
+2 -1
virt/kvm/kvm_main.c
··· 3000 3000 if (ops->init) 3001 3001 ops->init(dev); 3002 3002 3003 + kvm_get_kvm(kvm); 3003 3004 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3004 3005 if (ret < 0) { 3006 + kvm_put_kvm(kvm); 3005 3007 mutex_lock(&kvm->lock); 3006 3008 list_del(&dev->vm_node); 3007 3009 mutex_unlock(&kvm->lock); ··· 3011 3009 return ret; 3012 3010 } 3013 3011 3014 - kvm_get_kvm(kvm); 3015 3012 cd->fd = ret; 3016 3013 return 0; 3017 3014 }