Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
commit 077cdda764c7 ("net/mlx5e: TC, Fix memory leak with rules with internal port")
commit 31108d142f36 ("net/mlx5: Fix some error handling paths in 'mlx5e_tc_add_fdb_flow()'")
commit 4390c6edc0fb ("net/mlx5: Fix some error handling paths in 'mlx5e_tc_add_fdb_flow()'")
https://lore.kernel.org/all/20211229065352.30178-1-saeed@kernel.org/

net/smc/smc_wr.c
commit 49dc9013e34b ("net/smc: Use the bitmap API when applicable")
commit 349d43127dac ("net/smc: fix kernel panic caused by race of smc_sock")
bitmap_zero()/memset() is removed by the fix

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2237 -1192
+8 -2
Documentation/admin-guide/kernel-parameters.txt
··· 1689 1689 architectures force reset to be always executed 1690 1690 i8042.unlock [HW] Unlock (ignore) the keylock 1691 1691 i8042.kbdreset [HW] Reset device connected to KBD port 1692 + i8042.probe_defer 1693 + [HW] Allow deferred probing upon i8042 probe errors 1692 1694 1693 1695 i810= [HW,DRM] 1694 1696 ··· 2415 2413 Default is 1 (enabled) 2416 2414 2417 2415 kvm-intel.emulate_invalid_guest_state= 2418 - [KVM,Intel] Enable emulation of invalid guest states 2419 - Default is 0 (disabled) 2416 + [KVM,Intel] Disable emulation of invalid guest state. 2417 + Ignored if kvm-intel.enable_unrestricted_guest=1, as 2418 + guest state is never invalid for unrestricted guests. 2419 + This param doesn't apply to nested guests (L2), as KVM 2420 + never emulates invalid L2 guest state. 2421 + Default is 1 (enabled) 2420 2422 2421 2423 kvm-intel.flexpriority= 2422 2424 [KVM,Intel] Disable FlexPriority feature (TPR shadow).
+25
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
··· 51 51 description: 52 52 Properties for single BUCK regulator. 53 53 54 + properties: 55 + op_mode: 56 + $ref: /schemas/types.yaml#/definitions/uint32 57 + enum: [0, 1, 2, 3] 58 + default: 1 59 + description: | 60 + Describes the different operating modes of the regulator with power 61 + mode change in SOC. The different possible values are: 62 + 0 - always off mode 63 + 1 - on in normal mode 64 + 2 - low power mode 65 + 3 - suspend mode 66 + 54 67 required: 55 68 - regulator-name 56 69 ··· 76 63 Properties for single BUCK regulator. 77 64 78 65 properties: 66 + op_mode: 67 + $ref: /schemas/types.yaml#/definitions/uint32 68 + enum: [0, 1, 2, 3] 69 + default: 1 70 + description: | 71 + Describes the different operating modes of the regulator with power 72 + mode change in SOC. The different possible values are: 73 + 0 - always off mode 74 + 1 - on in normal mode 75 + 2 - low power mode 76 + 3 - suspend mode 77 + 79 78 s5m8767,pmic-ext-control-gpios: 80 79 maxItems: 1 81 80 description: |
+4 -2
Documentation/networking/ip-sysctl.rst
··· 25 25 ip_no_pmtu_disc - INTEGER 26 26 Disable Path MTU Discovery. If enabled in mode 1 and a 27 27 fragmentation-required ICMP is received, the PMTU to this 28 - destination will be set to min_pmtu (see below). You will need 28 + destination will be set to the smallest of the old MTU to 29 + this destination and min_pmtu (see below). You will need 29 30 to raise min_pmtu to the smallest interface MTU on your system 30 31 manually if you want to avoid locally generated fragments. 31 32 ··· 50 49 Default: FALSE 51 50 52 51 min_pmtu - INTEGER 53 - default 552 - minimum discovered Path MTU 52 + default 552 - minimum Path MTU. Unless this is changed mannually, 53 + each cached pmtu will never be lower than this setting. 54 54 55 55 ip_forward_use_pmtu - BOOLEAN 56 56 By default we don't trust protocol path MTUs while forwarding
+2
Documentation/sound/hd-audio/models.rst
··· 326 326 Headset support on USI machines 327 327 dual-codecs 328 328 Lenovo laptops with dual codecs 329 + alc285-hp-amp-init 330 + HP laptops which require speaker amplifier initialization (ALC285) 329 331 330 332 ALC680 331 333 ======
+3 -3
MAINTAINERS
··· 14852 14852 M: Ryder Lee <ryder.lee@mediatek.com> 14853 14853 M: Jianjun Wang <jianjun.wang@mediatek.com> 14854 14854 L: linux-pci@vger.kernel.org 14855 - L: linux-mediatek@lists.infradead.org 14855 + L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) 14856 14856 S: Supported 14857 14857 F: Documentation/devicetree/bindings/pci/mediatek* 14858 14858 F: drivers/pci/controller/*mediatek* ··· 17438 17438 SILVACO I3C DUAL-ROLE MASTER 17439 17439 M: Miquel Raynal <miquel.raynal@bootlin.com> 17440 17440 M: Conor Culhane <conor.culhane@silvaco.com> 17441 - L: linux-i3c@lists.infradead.org 17441 + L: linux-i3c@lists.infradead.org (moderated for non-subscribers) 17442 17442 S: Maintained 17443 17443 F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml 17444 17444 F: drivers/i3c/master/svc-i3c-master.c ··· 21074 21074 F: arch/x86/kernel/cpu/zhaoxin.c 21075 21075 21076 21076 ZONEFS FILESYSTEM 21077 - M: Damien Le Moal <damien.lemoal@wdc.com> 21077 + M: Damien Le Moal <damien.lemoal@opensource.wdc.com> 21078 21078 M: Naohiro Aota <naohiro.aota@wdc.com> 21079 21079 R: Johannes Thumshirn <jth@kernel.org> 21080 21080 L: linux-fsdevel@vger.kernel.org
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 16 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc5 5 + EXTRAVERSION = -rc7 6 6 NAME = Gobble Gobble 7 7 8 8 # *DOCUMENTATION*
+1
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
··· 309 309 310 310 ethphy: ethernet-phy@1 { 311 311 reg = <1>; 312 + qca,clk-out-frequency = <125000000>; 312 313 }; 313 314 }; 314 315 };
-1
arch/arm/include/asm/efi.h
··· 17 17 18 18 #ifdef CONFIG_EFI 19 19 void efi_init(void); 20 - extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 21 20 22 21 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 23 22 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+3 -5
arch/arm/kernel/entry-armv.S
··· 596 596 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 597 597 reteq lr 598 598 and r8, r0, #0x00000f00 @ mask out CP number 599 - THUMB( lsr r8, r8, #8 ) 600 599 mov r7, #1 601 - add r6, r10, #TI_USED_CP 602 - ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 603 - THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 600 + add r6, r10, r8, lsr #8 @ add used_cp[] array offset first 601 + strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[] 604 602 #ifdef CONFIG_IWMMXT 605 603 @ Test if we need to give access to iWMMXt coprocessors 606 604 ldr r5, [r10, #TI_FLAGS] ··· 607 609 bcs iwmmxt_task_enable 608 610 #endif 609 611 ARM( add pc, pc, r8, lsr #6 ) 610 - THUMB( lsl r8, r8, #2 ) 612 + THUMB( lsr r8, r8, #6 ) 611 613 THUMB( add pc, r8 ) 612 614 nop 613 615
+1
arch/arm/kernel/head-nommu.S
··· 114 114 add r12, r12, r10 115 115 ret r12 116 116 1: bl __after_proc_init 117 + ldr r7, __secondary_data @ reload r7 117 118 ldr sp, [r7, #12] @ set up the stack pointer 118 119 ldr r0, [r7, #16] @ set up task pointer 119 120 mov fp, #0
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
··· 69 69 pinctrl-0 = <&emac_rgmii_pins>; 70 70 phy-supply = <&reg_gmac_3v3>; 71 71 phy-handle = <&ext_rgmii_phy>; 72 - phy-mode = "rgmii"; 72 + phy-mode = "rgmii-id"; 73 73 status = "okay"; 74 74 }; 75 75
+2 -2
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
··· 719 719 clock-names = "i2c"; 720 720 clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 721 721 QORIQ_CLK_PLL_DIV(16)>; 722 - scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>; 722 + scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 723 723 status = "disabled"; 724 724 }; 725 725 ··· 768 768 clock-names = "i2c"; 769 769 clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 770 770 QORIQ_CLK_PLL_DIV(16)>; 771 - scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; 771 + scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>; 772 772 status = "disabled"; 773 773 }; 774 774
-1
arch/arm64/include/asm/efi.h
··· 14 14 15 15 #ifdef CONFIG_EFI 16 16 extern void efi_init(void); 17 - extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 18 17 #else 19 18 #define efi_init() 20 19 #endif
+2
arch/mips/include/asm/mach-ralink/spaces.h
··· 6 6 #define PCI_IOSIZE SZ_64K 7 7 #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) 8 8 9 + #define pci_remap_iospace pci_remap_iospace 10 + 9 11 #include <asm/mach-generic/spaces.h> 10 12 #endif
-4
arch/mips/include/asm/pci.h
··· 20 20 #include <linux/list.h> 21 21 #include <linux/of.h> 22 22 23 - #ifdef CONFIG_PCI_DRIVERS_GENERIC 24 - #define pci_remap_iospace pci_remap_iospace 25 - #endif 26 - 27 23 #ifdef CONFIG_PCI_DRIVERS_LEGACY 28 24 29 25 /*
+2
arch/mips/pci/pci-generic.c
··· 47 47 pci_read_bridge_bases(bus); 48 48 } 49 49 50 + #ifdef pci_remap_iospace 50 51 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) 51 52 { 52 53 unsigned long vaddr; ··· 61 60 set_io_port_base(vaddr); 62 61 return 0; 63 62 } 63 + #endif
-5
arch/parisc/Kconfig
··· 85 85 config STACK_GROWSUP 86 86 def_bool y 87 87 88 - config ARCH_DEFCONFIG 89 - string 90 - default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT 91 - default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT 92 - 93 88 config GENERIC_LOCKBREAK 94 89 bool 95 90 default y
+2 -2
arch/parisc/include/asm/futex.h
··· 14 14 _futex_spin_lock(u32 __user *uaddr) 15 15 { 16 16 extern u32 lws_lock_start[]; 17 - long index = ((long)uaddr & 0x3f8) >> 1; 17 + long index = ((long)uaddr & 0x7f8) >> 1; 18 18 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 19 19 preempt_disable(); 20 20 arch_spin_lock(s); ··· 24 24 _futex_spin_unlock(u32 __user *uaddr) 25 25 { 26 26 extern u32 lws_lock_start[]; 27 - long index = ((long)uaddr & 0x3f8) >> 1; 27 + long index = ((long)uaddr & 0x7f8) >> 1; 28 28 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 29 29 arch_spin_unlock(s); 30 30 preempt_enable();
+1 -1
arch/parisc/kernel/syscall.S
··· 472 472 extrd,u %r1,PSW_W_BIT,1,%r1 473 473 /* sp must be aligned on 4, so deposit the W bit setting into 474 474 * the bottom of sp temporarily */ 475 - or,ev %r1,%r30,%r30 475 + or,od %r1,%r30,%r30 476 476 477 477 /* Clip LWS number to a 32-bit value for 32-bit processes */ 478 478 depdi 0, 31, 32, %r20
+2
arch/parisc/kernel/traps.c
··· 730 730 } 731 731 mmap_read_unlock(current->mm); 732 732 } 733 + /* CPU could not fetch instruction, so clear stale IIR value. */ 734 + regs->iir = 0xbaadf00d; 733 735 fallthrough; 734 736 case 27: 735 737 /* Data memory protection ID trap */
+34 -8
arch/powerpc/kernel/module_64.c
··· 422 422 const char *name) 423 423 { 424 424 long reladdr; 425 + func_desc_t desc; 426 + int i; 425 427 426 428 if (is_mprofile_ftrace_call(name)) 427 429 return create_ftrace_stub(entry, addr, me); 428 430 429 - memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); 431 + for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) { 432 + if (patch_instruction(&entry->jump[i], 433 + ppc_inst(ppc64_stub_insns[i]))) 434 + return 0; 435 + } 430 436 431 437 /* Stub uses address relative to r2. */ 432 438 reladdr = (unsigned long)entry - my_r2(sechdrs, me); ··· 443 437 } 444 438 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); 445 439 446 - entry->jump[0] |= PPC_HA(reladdr); 447 - entry->jump[1] |= PPC_LO(reladdr); 448 - entry->funcdata = func_desc(addr); 449 - entry->magic = STUB_MAGIC; 440 + if (patch_instruction(&entry->jump[0], 441 + ppc_inst(entry->jump[0] | PPC_HA(reladdr)))) 442 + return 0; 443 + 444 + if (patch_instruction(&entry->jump[1], 445 + ppc_inst(entry->jump[1] | PPC_LO(reladdr)))) 446 + return 0; 447 + 448 + // func_desc_t is 8 bytes if ABIv2, else 16 bytes 449 + desc = func_desc(addr); 450 + for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) { 451 + if (patch_instruction(((u32 *)&entry->funcdata) + i, 452 + ppc_inst(((u32 *)(&desc))[i]))) 453 + return 0; 454 + } 455 + 456 + if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC))) 457 + return 0; 450 458 451 459 return 1; 452 460 } ··· 515 495 me->name, *instruction, instruction); 516 496 return 0; 517 497 } 498 + 518 499 /* ld r2,R2_STACK_OFFSET(r1) */ 519 - *instruction = PPC_INST_LD_TOC; 500 + if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC))) 501 + return 0; 502 + 520 503 return 1; 521 504 } 522 505 ··· 659 636 } 660 637 661 638 /* Only replace bits 2 through 26 */ 662 - *(uint32_t *)location 663 - = (*(uint32_t *)location & ~0x03fffffc) 639 + value = (*(uint32_t *)location & ~0x03fffffc) 664 640 | (value & 0x03fffffc); 641 + 642 + if (patch_instruction((u32 *)location, ppc_inst(value))) 643 + return -EFAULT; 644 + 665 645 break; 666 646 667 647 case R_PPC64_REL64:
+1 -1
arch/powerpc/mm/ptdump/ptdump.c
··· 183 183 { 184 184 pte_t pte = __pte(st->current_flags); 185 185 186 - if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) 186 + if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx) 187 187 return; 188 188 189 189 if (!pte_write(pte) || !pte_exec(pte))
+2 -2
arch/powerpc/platforms/85xx/smp.c
··· 220 220 local_irq_save(flags); 221 221 hard_irq_disable(); 222 222 223 - if (qoriq_pm_ops) 223 + if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) 224 224 qoriq_pm_ops->cpu_up_prepare(cpu); 225 225 226 226 /* if cpu is not spinning, reset it */ ··· 292 292 booting_thread_hwid = cpu_thread_in_core(nr); 293 293 primary = cpu_first_thread_sibling(nr); 294 294 295 - if (qoriq_pm_ops) 295 + if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) 296 296 qoriq_pm_ops->cpu_up_prepare(nr); 297 297 298 298 /*
+1
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
··· 76 76 spi-max-frequency = <20000000>; 77 77 voltage-ranges = <3300 3300>; 78 78 disable-wp; 79 + gpios = <&gpio 11 GPIO_ACTIVE_LOW>; 79 80 }; 80 81 }; 81 82
+53 -60
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
··· 2 2 /* Copyright (c) 2020 SiFive, Inc */ 3 3 4 4 #include "fu740-c000.dtsi" 5 + #include <dt-bindings/gpio/gpio.h> 5 6 #include <dt-bindings/interrupt-controller/irq.h> 6 7 7 8 /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ ··· 55 54 temperature-sensor@4c { 56 55 compatible = "ti,tmp451"; 57 56 reg = <0x4c>; 57 + vcc-supply = <&vdd_bpro>; 58 58 interrupt-parent = <&gpio>; 59 59 interrupts = <6 IRQ_TYPE_LEVEL_LOW>; 60 + }; 61 + 62 + eeprom@54 { 63 + compatible = "microchip,24c02", "atmel,24c02"; 64 + reg = <0x54>; 65 + vcc-supply = <&vdd_bpro>; 66 + label = "board-id"; 67 + pagesize = <16>; 68 + read-only; 69 + size = <256>; 60 70 }; 61 71 62 72 pmic@58 { ··· 77 65 interrupts = <1 IRQ_TYPE_LEVEL_LOW>; 78 66 interrupt-controller; 79 67 80 - regulators { 81 - vdd_bcore1: bcore1 { 82 - regulator-min-microvolt = <900000>; 83 - regulator-max-microvolt = <900000>; 84 - regulator-min-microamp = <5000000>; 85 - regulator-max-microamp = <5000000>; 86 - regulator-always-on; 87 - }; 68 + onkey { 69 + compatible = "dlg,da9063-onkey"; 70 + }; 88 71 89 - vdd_bcore2: bcore2 { 90 - regulator-min-microvolt = <900000>; 91 - regulator-max-microvolt = <900000>; 92 - regulator-min-microamp = <5000000>; 93 - regulator-max-microamp = <5000000>; 72 + rtc { 73 + compatible = "dlg,da9063-rtc"; 74 + }; 75 + 76 + wdt { 77 + compatible = "dlg,da9063-watchdog"; 78 + }; 79 + 80 + regulators { 81 + vdd_bcore: bcores-merged { 82 + regulator-min-microvolt = <1050000>; 83 + regulator-max-microvolt = <1050000>; 84 + regulator-min-microamp = <4800000>; 85 + regulator-max-microamp = <4800000>; 94 86 regulator-always-on; 95 87 }; 96 88 97 89 vdd_bpro: bpro { 98 90 regulator-min-microvolt = <1800000>; 99 91 regulator-max-microvolt = <1800000>; 100 - regulator-min-microamp = <2500000>; 101 - regulator-max-microamp = <2500000>; 92 + regulator-min-microamp = <2400000>; 93 + regulator-max-microamp = <2400000>; 102 94 regulator-always-on; 103 95 }; 104 96 105 97 vdd_bperi: bperi { 106 - regulator-min-microvolt = <1050000>; 107 - regulator-max-microvolt = <1050000>; 98 + regulator-min-microvolt = <1060000>; 99 + regulator-max-microvolt = <1060000>; 108 100 regulator-min-microamp = <1500000>; 109 101 regulator-max-microamp = <1500000>; 110 102 regulator-always-on; 111 103 }; 112 104 113 - vdd_bmem: bmem { 114 - regulator-min-microvolt = <1200000>; 115 - regulator-max-microvolt = <1200000>; 116 - regulator-min-microamp = <3000000>; 117 - regulator-max-microamp = <3000000>; 118 - regulator-always-on; 119 - }; 120 - 121 - vdd_bio: bio { 105 + vdd_bmem_bio: bmem-bio-merged { 122 106 regulator-min-microvolt = <1200000>; 123 107 regulator-max-microvolt = <1200000>; 124 108 regulator-min-microamp = <3000000>; ··· 125 117 vdd_ldo1: ldo1 { 126 118 regulator-min-microvolt = <1800000>; 127 119 regulator-max-microvolt = <1800000>; 128 - regulator-min-microamp = <100000>; 129 - regulator-max-microamp = <100000>; 130 120 regulator-always-on; 131 121 }; 132 122 133 123 vdd_ldo2: ldo2 { 134 124 regulator-min-microvolt = <1800000>; 135 125 regulator-max-microvolt = <1800000>; 136 - regulator-min-microamp = <200000>; 137 - regulator-max-microamp = <200000>; 138 126 regulator-always-on; 139 127 }; 140 128 141 129 vdd_ldo3: ldo3 { 142 - regulator-min-microvolt = <1800000>; 143 - regulator-max-microvolt = <1800000>; 144 - regulator-min-microamp = <200000>; 145 - regulator-max-microamp = <200000>; 130 + regulator-min-microvolt = <3300000>; 131 + regulator-max-microvolt = <3300000>; 146 132 regulator-always-on; 147 133 }; 148 134 149 135 vdd_ldo4: ldo4 { 150 - regulator-min-microvolt = <1800000>; 151 - regulator-max-microvolt = <1800000>; 152 - regulator-min-microamp = <200000>; 153 - regulator-max-microamp = <200000>; 136 + regulator-min-microvolt = <2500000>; 137 + regulator-max-microvolt = <2500000>; 154 138 regulator-always-on; 155 139 }; 156 140 157 141 vdd_ldo5: ldo5 { 158 - regulator-min-microvolt = <1800000>; 159 - regulator-max-microvolt = <1800000>; 160 - regulator-min-microamp = <100000>; 161 - regulator-max-microamp = <100000>; 142 + regulator-min-microvolt = <3300000>; 143 + regulator-max-microvolt = <3300000>; 162 144 regulator-always-on; 163 145 }; 164 146 165 147 vdd_ldo6: ldo6 { 166 - regulator-min-microvolt = <3300000>; 167 - regulator-max-microvolt = <3300000>; 168 - regulator-min-microamp = <200000>; 169 - regulator-max-microamp = <200000>; 148 + regulator-min-microvolt = <1800000>; 149 + regulator-max-microvolt = <1800000>; 170 150 regulator-always-on; 171 151 }; 172 152 173 153 vdd_ldo7: ldo7 { 174 - regulator-min-microvolt = <1800000>; 175 - regulator-max-microvolt = <1800000>; 176 - regulator-min-microamp = <200000>; 177 - regulator-max-microamp = <200000>; 154 + regulator-min-microvolt = <3300000>; 155 + regulator-max-microvolt = <3300000>; 178 156 regulator-always-on; 179 157 }; 180 158 181 159 vdd_ldo8: ldo8 { 182 - regulator-min-microvolt = <1800000>; 183 - regulator-max-microvolt = <1800000>; 184 - regulator-min-microamp = <200000>; 185 - regulator-max-microamp = <200000>; 160 + regulator-min-microvolt = <3300000>; 161 + regulator-max-microvolt = <3300000>; 186 162 regulator-always-on; 187 163 }; 188 164 189 165 vdd_ld09: ldo9 { 190 166 regulator-min-microvolt = <1050000>; 191 167 regulator-max-microvolt = <1050000>; 192 - regulator-min-microamp = <200000>; 193 - regulator-max-microamp = <200000>; 168 + regulator-always-on; 194 169 }; 195 170 196 171 vdd_ldo10: ldo10 { 197 172 regulator-min-microvolt = <1000000>; 198 173 regulator-max-microvolt = <1000000>; 199 - regulator-min-microamp = <300000>; 200 - regulator-max-microamp = <300000>; 174 + regulator-always-on; 201 175 }; 202 176 203 177 vdd_ldo11: ldo11 { 204 178 regulator-min-microvolt = <2500000>; 205 179 regulator-max-microvolt = <2500000>; 206 - regulator-min-microamp = <300000>; 207 - regulator-max-microamp = <300000>; 208 180 regulator-always-on; 209 181 }; 210 182 }; ··· 211 223 spi-max-frequency = <20000000>; 212 224 voltage-ranges = <3300 3300>; 213 225 disable-wp; 226 + gpios = <&gpio 15 GPIO_ACTIVE_LOW>; 214 227 }; 215 228 }; 216 229 ··· 234 245 235 246 &gpio { 236 247 status = "okay"; 248 + gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3", 249 + "PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN", 250 + "ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4", 251 + "EN_VDD_SD", "SD_CD"; 237 252 };
-1
arch/riscv/include/asm/efi.h
··· 13 13 14 14 #ifdef CONFIG_EFI 15 15 extern void efi_init(void); 16 - extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 17 16 #else 18 17 #define efi_init() 19 18 #endif
-2
arch/x86/include/asm/efi.h
··· 197 197 198 198 extern void parse_efi_setup(u64 phys_addr, u32 data_len); 199 199 200 - extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 201 - 202 200 extern void efi_thunk_runtime_setup(void); 203 201 efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 204 202 unsigned long descriptor_size,
+1
arch/x86/include/asm/kvm-x86-ops.h
··· 47 47 KVM_X86_OP(cache_reg) 48 48 KVM_X86_OP(get_rflags) 49 49 KVM_X86_OP(set_rflags) 50 + KVM_X86_OP(get_if_flag) 50 51 KVM_X86_OP(tlb_flush_all) 51 52 KVM_X86_OP(tlb_flush_current) 52 53 KVM_X86_OP_NULL(tlb_remote_flush)
+1
arch/x86/include/asm/kvm_host.h
··· 1349 1349 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 1350 1350 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 1351 1351 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 1352 + bool (*get_if_flag)(struct kvm_vcpu *vcpu); 1352 1353 1353 1354 void (*tlb_flush_all)(struct kvm_vcpu *vcpu); 1354 1355 void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
+2 -2
arch/x86/include/asm/pkru.h
··· 4 4 5 5 #include <asm/cpufeature.h> 6 6 7 - #define PKRU_AD_BIT 0x1 8 - #define PKRU_WD_BIT 0x2 7 + #define PKRU_AD_BIT 0x1u 8 + #define PKRU_WD_BIT 0x2u 9 9 #define PKRU_BITS_PER_PKEY 2 10 10 11 11 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+30 -42
arch/x86/kernel/setup.c
··· 713 713 714 714 early_reserve_initrd(); 715 715 716 - if (efi_enabled(EFI_BOOT)) 717 - efi_memblock_x86_reserve_range(); 718 - 719 716 memblock_x86_reserve_range_setup_data(); 720 717 721 718 reserve_ibft_region(); ··· 737 740 } 738 741 739 742 return 0; 740 - } 741 - 742 - static char * __init prepare_command_line(void) 743 - { 744 - #ifdef CONFIG_CMDLINE_BOOL 745 - #ifdef CONFIG_CMDLINE_OVERRIDE 746 - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 747 - #else 748 - if (builtin_cmdline[0]) { 749 - /* append boot loader cmdline to builtin */ 750 - strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); 751 - strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); 752 - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 753 - } 754 - #endif 755 - #endif 756 - 757 - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 758 - 759 - parse_early_param(); 760 - 761 - return command_line; 762 743 } 763 744 764 745 /* ··· 828 853 x86_init.oem.arch_setup(); 829 854 830 855 /* 831 - * x86_configure_nx() is called before parse_early_param() (called by 832 - * prepare_command_line()) to detect whether hardware doesn't support 833 - * NX (so that the early EHCI debug console setup can safely call 834 - * set_fixmap()). It may then be called again from within noexec_setup() 835 - * during parsing early parameters to honor the respective command line 836 - * option. 837 - */ 838 - x86_configure_nx(); 839 - 840 - /* 841 - * This parses early params and it needs to run before 842 - * early_reserve_memory() because latter relies on such settings 843 - * supplied as early params. 844 - */ 845 - *cmdline_p = prepare_command_line(); 846 - 847 - /* 848 856 * Do some memory reservations *before* memory is added to memblock, so 849 857 * memblock allocations won't overwrite it. 850 858 * ··· 859 901 data_resource.end = __pa_symbol(_edata)-1; 860 902 bss_resource.start = __pa_symbol(__bss_start); 861 903 bss_resource.end = __pa_symbol(__bss_stop)-1; 904 + 905 + #ifdef CONFIG_CMDLINE_BOOL 906 + #ifdef CONFIG_CMDLINE_OVERRIDE 907 + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 908 + #else 909 + if (builtin_cmdline[0]) { 910 + /* append boot loader cmdline to builtin */ 911 + strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); 912 + strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); 913 + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 914 + } 915 + #endif 916 + #endif 917 + 918 + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 919 + *cmdline_p = command_line; 920 + 921 + /* 922 + * x86_configure_nx() is called before parse_early_param() to detect 923 + * whether hardware doesn't support NX (so that the early EHCI debug 924 + * console setup can safely call set_fixmap()). It may then be called 925 + * again from within noexec_setup() during parsing early parameters 926 + * to honor the respective command line option. 927 + */ 928 + x86_configure_nx(); 929 + 930 + parse_early_param(); 931 + 932 + if (efi_enabled(EFI_BOOT)) 933 + efi_memblock_x86_reserve_range(); 862 934 863 935 #ifdef CONFIG_MEMORY_HOTPLUG 864 936 /*
+15 -1
arch/x86/kvm/mmu/mmu.c
··· 3987 3987 static bool is_page_fault_stale(struct kvm_vcpu *vcpu, 3988 3988 struct kvm_page_fault *fault, int mmu_seq) 3989 3989 { 3990 - if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa))) 3990 + struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa); 3991 + 3992 + /* Special roots, e.g. pae_root, are not backed by shadow pages. */ 3993 + if (sp && is_obsolete_sp(vcpu->kvm, sp)) 3994 + return true; 3995 + 3996 + /* 3997 + * Roots without an associated shadow page are considered invalid if 3998 + * there is a pending request to free obsolete roots. The request is 3999 + * only a hint that the current root _may_ be obsolete and needs to be 4000 + * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a 4001 + * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs 4002 + * to reload even if no vCPU is actively using the root. 4003 + */ 4004 + if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu)) 3991 4005 return true; 3992 4006 3993 4007 return fault->slot &&
+6
arch/x86/kvm/mmu/tdp_iter.c
··· 26 26 */ 27 27 void tdp_iter_restart(struct tdp_iter *iter) 28 28 { 29 + iter->yielded = false; 29 30 iter->yielded_gfn = iter->next_last_level_gfn; 30 31 iter->level = iter->root_level; 31 32 ··· 161 160 */ 162 161 void tdp_iter_next(struct tdp_iter *iter) 163 162 { 163 + if (iter->yielded) { 164 + tdp_iter_restart(iter); 165 + return; 166 + } 167 + 164 168 if (try_step_down(iter)) 165 169 return; 166 170
+6
arch/x86/kvm/mmu/tdp_iter.h
··· 45 45 * iterator walks off the end of the paging structure. 46 46 */ 47 47 bool valid; 48 + /* 49 + * True if KVM dropped mmu_lock and yielded in the middle of a walk, in 50 + * which case tdp_iter_next() needs to restart the walk at the root 51 + * level instead of advancing to the next entry. 52 + */ 53 + bool yielded; 48 54 }; 49 55 50 56 /*
+16 -13
arch/x86/kvm/mmu/tdp_mmu.c
··· 502 502 struct tdp_iter *iter, 503 503 u64 new_spte) 504 504 { 505 + WARN_ON_ONCE(iter->yielded); 506 + 505 507 lockdep_assert_held_read(&kvm->mmu_lock); 506 508 507 509 /* ··· 577 575 u64 new_spte, bool record_acc_track, 578 576 bool record_dirty_log) 579 577 { 578 + WARN_ON_ONCE(iter->yielded); 579 + 580 580 lockdep_assert_held_write(&kvm->mmu_lock); 581 581 582 582 /* ··· 644 640 * If this function should yield and flush is set, it will perform a remote 645 641 * TLB flush before yielding. 646 642 * 647 - * If this function yields, it will also reset the tdp_iter's walk over the 648 - * paging structure and the calling function should skip to the next 649 - * iteration to allow the iterator to continue its traversal from the 650 - * paging structure root. 643 + * If this function yields, iter->yielded is set and the caller must skip to 644 + * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 645 + * over the paging structures to allow the iterator to continue its traversal 646 + * from the paging structure root. 651 647 * 652 - * Return true if this function yielded and the iterator's traversal was reset. 653 - * Return false if a yield was not needed. 648 + * Returns true if this function yielded. 654 649 */ 655 - static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, 656 - struct tdp_iter *iter, bool flush, 657 - bool shared) 650 + static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 651 + struct tdp_iter *iter, 652 + bool flush, bool shared) 658 653 { 654 + WARN_ON(iter->yielded); 655 + 659 656 /* Ensure forward progress has been made before yielding. */ 660 657 if (iter->next_last_level_gfn == iter->yielded_gfn) 661 658 return false; ··· 676 671 677 672 WARN_ON(iter->gfn > iter->next_last_level_gfn); 678 673 679 - tdp_iter_restart(iter); 680 - 681 - return true; 674 + iter->yielded = true; 682 675 } 683 676 684 - return false; 677 + return iter->yielded; 685 678 } 686 679 687 680 /*
+12 -9
arch/x86/kvm/svm/svm.c
··· 1585 1585 to_svm(vcpu)->vmcb->save.rflags = rflags; 1586 1586 } 1587 1587 1588 + static bool svm_get_if_flag(struct kvm_vcpu *vcpu) 1589 + { 1590 + struct vmcb *vmcb = to_svm(vcpu)->vmcb; 1591 + 1592 + return sev_es_guest(vcpu->kvm) 1593 + ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK 1594 + : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; 1595 + } 1596 + 1588 1597 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1589 1598 { 1590 1599 switch (reg) { ··· 3577 3568 if (!gif_set(svm)) 3578 3569 return true; 3579 3570 3580 - if (sev_es_guest(vcpu->kvm)) { 3581 - /* 3582 - * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask 3583 - * bit to determine the state of the IF flag. 3584 - */ 3585 - if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK)) 3586 - return true; 3587 - } else if (is_guest_mode(vcpu)) { 3571 + if (is_guest_mode(vcpu)) { 3588 3572 /* As long as interrupts are being delivered... */ 3589 3573 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) 3590 3574 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) ··· 3588 3586 if (nested_exit_on_intr(svm)) 3589 3587 return false; 3590 3588 } else { 3591 - if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) 3589 + if (!svm_get_if_flag(vcpu)) 3592 3590 return true; 3593 3591 } 3594 3592 ··· 4623 4621 .cache_reg = svm_cache_reg, 4624 4622 .get_rflags = svm_get_rflags, 4625 4623 .set_rflags = svm_set_rflags, 4624 + .get_if_flag = svm_get_if_flag, 4626 4625 4627 4626 .tlb_flush_all = svm_flush_tlb, 4628 4627 .tlb_flush_current = svm_flush_tlb,
+32 -13
arch/x86/kvm/vmx/vmx.c
··· 1363 1363 vmx->emulation_required = vmx_emulation_required(vcpu); 1364 1364 } 1365 1365 1366 + static bool vmx_get_if_flag(struct kvm_vcpu *vcpu) 1367 + { 1368 + return vmx_get_rflags(vcpu) & X86_EFLAGS_IF; 1369 + } 1370 + 1366 1371 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1367 1372 { 1368 1373 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); ··· 3964 3959 if (pi_test_and_set_on(&vmx->pi_desc)) 3965 3960 return 0; 3966 3961 3967 - if (vcpu != kvm_get_running_vcpu() && 3968 - !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) 3962 + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) 3969 3963 kvm_vcpu_kick(vcpu); 3970 3964 3971 3965 return 0; ··· 5881 5877 vmx_flush_pml_buffer(vcpu); 5882 5878 5883 5879 /* 5884 - * We should never reach this point with a pending nested VM-Enter, and 5885 - * more specifically emulation of L2 due to invalid guest state (see 5886 - * below) should never happen as that means we incorrectly allowed a 5887 - * nested VM-Enter with an invalid vmcs12. 5880 + * KVM should never reach this point with a pending nested VM-Enter. 5881 + * More specifically, short-circuiting VM-Entry to emulate L2 due to 5882 + * invalid guest state should never happen as that means KVM knowingly 5883 + * allowed a nested VM-Enter with an invalid vmcs12. More below. 5888 5884 */ 5889 5885 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) 5890 5886 return -EIO; 5891 - 5892 - /* If guest state is invalid, start emulating */ 5893 - if (vmx->emulation_required) 5894 - return handle_invalid_guest_state(vcpu); 5895 5887 5896 5888 if (is_guest_mode(vcpu)) { 5897 5889 /* ··· 5910 5910 */ 5911 5911 nested_mark_vmcs12_pages_dirty(vcpu); 5912 5912 5913 + /* 5914 + * Synthesize a triple fault if L2 state is invalid. In normal 5915 + * operation, nested VM-Enter rejects any attempt to enter L2 5916 + * with invalid state. However, those checks are skipped if 5917 + * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If 5918 + * L2 state is invalid, it means either L1 modified SMRAM state 5919 + * or userspace provided bad state. Synthesize TRIPLE_FAULT as 5920 + * doing so is architecturally allowed in the RSM case, and is 5921 + * the least awful solution for the userspace case without 5922 + * risking false positives. 5923 + */ 5924 + if (vmx->emulation_required) { 5925 + nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 5926 + return 1; 5927 + } 5928 + 5913 5929 if (nested_vmx_reflect_vmexit(vcpu)) 5914 5930 return 1; 5915 5931 } 5932 + 5933 + /* If guest state is invalid, start emulating. L2 is handled above. */ 5934 + if (vmx->emulation_required) 5935 + return handle_invalid_guest_state(vcpu); 5916 5936 5917 5937 if (exit_reason.failed_vmentry) { 5918 5938 dump_vmcs(vcpu); ··· 6628 6608 * consistency check VM-Exit due to invalid guest state and bail. 6629 6609 */ 6630 6610 if (unlikely(vmx->emulation_required)) { 6631 - 6632 - /* We don't emulate invalid state of a nested guest */ 6633 - vmx->fail = is_guest_mode(vcpu); 6611 + vmx->fail = 0; 6634 6612 6635 6613 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; 6636 6614 vmx->exit_reason.failed_vmentry = 1; ··· 7597 7579 .cache_reg = vmx_cache_reg, 7598 7580 .get_rflags = vmx_get_rflags, 7599 7581 .set_rflags = vmx_set_rflags, 7582 + .get_if_flag = vmx_get_if_flag, 7600 7583 7601 7584 .tlb_flush_all = vmx_flush_tlb_all, 7602 7585 .tlb_flush_current = vmx_flush_tlb_current,
+3 -10
arch/x86/kvm/x86.c
··· 1331 1331 MSR_IA32_UMWAIT_CONTROL, 1332 1332 1333 1333 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1334 - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, 1334 + MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1335 1335 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1336 1336 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1337 1337 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, ··· 3413 3413 3414 3414 if (!msr_info->host_initiated) 3415 3415 return 1; 3416 - if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent)) 3416 + if (kvm_get_msr_feature(&msr_ent)) 3417 3417 return 1; 3418 3418 if (data & ~msr_ent.data) 3419 3419 return 1; ··· 9001 9001 { 9002 9002 struct kvm_run *kvm_run = vcpu->run; 9003 9003 9004 - /* 9005 - * if_flag is obsolete and useless, so do not bother 9006 - * setting it for SEV-ES guests. Userspace can just 9007 - * use kvm_run->ready_for_interrupt_injection. 9008 - */ 9009 - kvm_run->if_flag = !vcpu->arch.guest_state_protected 9010 - && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 9011 - 9004 + kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9012 9005 kvm_run->cr8 = kvm_get_cr8(vcpu); 9013 9006 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9014 9007
+8 -1
block/blk-iocost.c
··· 2311 2311 hwm = current_hweight_max(iocg); 2312 2312 new_hwi = hweight_after_donation(iocg, old_hwi, hwm, 2313 2313 usage, &now); 2314 - if (new_hwi < hwm) { 2314 + /* 2315 + * Donation calculation assumes hweight_after_donation 2316 + * to be positive, a condition that a donor w/ hwa < 2 2317 + * can't meet. Don't bother with donation if hwa is 2318 + * below 2. It's not gonna make a meaningful difference 2319 + * anyway. 2320 + */ 2321 + if (new_hwi < hwm && hwa >= 2) { 2315 2322 iocg->hweight_donating = hwa; 2316 2323 iocg->hweight_after_donation = new_hwi; 2317 2324 list_add(&iocg->surplus_list, &surpluses);
+1 -1
drivers/android/binder_alloc.c
··· 671 671 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); 672 672 673 673 if (buffer->async_transaction) { 674 - alloc->free_async_space += size + sizeof(struct binder_buffer); 674 + alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); 675 675 676 676 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 677 677 "%d: binder_free_buf size %zd async free %zd\n",
+13 -2
drivers/ata/libata-scsi.c
··· 2859 2859 goto invalid_fld; 2860 2860 } 2861 2861 2862 - if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0) 2863 - tf->protocol = ATA_PROT_NCQ_NODATA; 2862 + if ((cdb[2 + cdb_offset] & 0x3) == 0) { 2863 + /* 2864 + * When T_LENGTH is zero (No data is transferred), dir should 2865 + * be DMA_NONE. 2866 + */ 2867 + if (scmd->sc_data_direction != DMA_NONE) { 2868 + fp = 2 + cdb_offset; 2869 + goto invalid_fld; 2870 + } 2871 + 2872 + if (ata_is_ncq(tf->protocol)) 2873 + tf->protocol = ATA_PROT_NCQ_NODATA; 2874 + } 2864 2875 2865 2876 /* enable LBA */ 2866 2877 tf->flags |= ATA_TFLAG_LBA;
+4 -1
drivers/auxdisplay/charlcd.c
··· 37 37 bool must_clear; 38 38 39 39 /* contains the LCD config state */ 40 - unsigned long int flags; 40 + unsigned long flags; 41 41 42 42 /* Current escape sequence and it's length or -1 if outside */ 43 43 struct { ··· 578 578 * Since charlcd_init_display() needs to write data, we have to 579 579 * enable mark the LCD initialized just before. 580 580 */ 581 + if (WARN_ON(!lcd->ops->init_display)) 582 + return -EINVAL; 583 + 581 584 ret = lcd->ops->init_display(lcd); 582 585 if (ret) 583 586 return ret;
+1 -1
drivers/base/power/main.c
··· 1902 1902 device_block_probing(); 1903 1903 1904 1904 mutex_lock(&dpm_list_mtx); 1905 - while (!list_empty(&dpm_list)) { 1905 + while (!list_empty(&dpm_list) && !error) { 1906 1906 struct device *dev = to_device(dpm_list.next); 1907 1907 1908 1908 get_device(dev);
+12 -3
drivers/block/xen-blkfront.c
··· 1512 1512 unsigned long flags; 1513 1513 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; 1514 1514 struct blkfront_info *info = rinfo->dev_info; 1515 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1515 1516 1516 - if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 1517 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { 1518 + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); 1517 1519 return IRQ_HANDLED; 1520 + } 1518 1521 1519 1522 spin_lock_irqsave(&rinfo->ring_lock, flags); 1520 1523 again: ··· 1532 1529 for (i = rinfo->ring.rsp_cons; i != rp; i++) { 1533 1530 unsigned long id; 1534 1531 unsigned int op; 1532 + 1533 + eoiflag = 0; 1535 1534 1536 1535 RING_COPY_RESPONSE(&rinfo->ring, i, &bret); 1537 1536 id = bret.id; ··· 1651 1646 1652 1647 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 1653 1648 1649 + xen_irq_lateeoi(irq, eoiflag); 1650 + 1654 1651 return IRQ_HANDLED; 1655 1652 1656 1653 err: 1657 1654 info->connected = BLKIF_STATE_ERROR; 1658 1655 1659 1656 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 1657 + 1658 + /* No EOI in order to avoid further interrupts. */ 1660 1659 1661 1660 pr_alert("%s disabled for further use\n", info->gd->disk_name); 1662 1661 return IRQ_HANDLED; ··· 1701 1692 if (err) 1702 1693 goto fail; 1703 1694 1704 - err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, 1705 - "blkif", rinfo); 1695 + err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, 1696 + 0, "blkif", rinfo); 1706 1697 if (err <= 0) { 1707 1698 xenbus_dev_fatal(dev, err, 1708 1699 "bind_evtchn_to_irqhandler failed");
+4 -4
drivers/bus/sunxi-rsb.c
··· 687 687 688 688 static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb) 689 689 { 690 - /* Keep the clock and PM reference counts consistent. */ 691 - if (pm_runtime_status_suspended(rsb->dev)) 692 - pm_runtime_resume(rsb->dev); 693 690 reset_control_assert(rsb->rstc); 694 - clk_disable_unprepare(rsb->clk); 691 + 692 + /* Keep the clock and PM reference counts consistent. */ 693 + if (!pm_runtime_status_suspended(rsb->dev)) 694 + clk_disable_unprepare(rsb->clk); 695 695 } 696 696 697 697 static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
+14 -9
drivers/char/ipmi/ipmi_msghandler.c
··· 3031 3031 * with removing the device attributes while reading a device 3032 3032 * attribute. 3033 3033 */ 3034 - schedule_work(&bmc->remove_work); 3034 + queue_work(remove_work_wq, &bmc->remove_work); 3035 3035 } 3036 3036 3037 3037 /* ··· 5392 5392 if (initialized) 5393 5393 goto out; 5394 5394 5395 - init_srcu_struct(&ipmi_interfaces_srcu); 5395 + rv = init_srcu_struct(&ipmi_interfaces_srcu); 5396 + if (rv) 5397 + goto out; 5398 + 5399 + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5400 + if (!remove_work_wq) { 5401 + pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5402 + rv = -ENOMEM; 5403 + goto out_wq; 5404 + } 5396 5405 5397 5406 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5398 5407 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5399 5408 5400 5409 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5401 5410 5402 - remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5403 - if (!remove_work_wq) { 5404 - pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5405 - rv = -ENOMEM; 5406 - goto out; 5407 - } 5408 - 5409 5411 initialized = true; 5410 5412 5413 + out_wq: 5414 + if (rv) 5415 + cleanup_srcu_struct(&ipmi_interfaces_srcu); 5411 5416 out: 5412 5417 mutex_unlock(&ipmi_interfaces_mutex); 5413 5418 return rv;
+4 -3
drivers/char/ipmi/ipmi_ssif.c
··· 1659 1659 } 1660 1660 } 1661 1661 1662 + ssif_info->client = client; 1663 + i2c_set_clientdata(client, ssif_info); 1664 + 1662 1665 rv = ssif_check_and_remove(client, ssif_info); 1663 1666 /* If rv is 0 and addr source is not SI_ACPI, continue probing */ 1664 1667 if (!rv && ssif_info->addr_source == SI_ACPI) { ··· 1681 1678 "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", 1682 1679 ipmi_addr_src_to_str(ssif_info->addr_source), 1683 1680 client->addr, client->adapter->name, slave_addr); 1684 - 1685 - ssif_info->client = client; 1686 - i2c_set_clientdata(client, ssif_info); 1687 1681 1688 1682 /* Now check for system interface capabilities */ 1689 1683 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ··· 1881 1881 1882 1882 dev_err(&ssif_info->client->dev, 1883 1883 "Unable to start IPMI SSIF: %d\n", rv); 1884 + i2c_set_clientdata(client, NULL); 1884 1885 kfree(ssif_info); 1885 1886 } 1886 1887 kfree(resp);
+7
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
··· 211 211 return adf_4xxx_fw_config[obj_num].ae_mask; 212 212 } 213 213 214 + static u32 get_vf2pf_sources(void __iomem *pmisc_addr) 215 + { 216 + /* For the moment do not report vf2pf sources */ 217 + return 0; 218 + } 219 + 214 220 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) 215 221 { 216 222 hw_data->dev_class = &adf_4xxx_class; ··· 260 254 hw_data->set_msix_rttable = set_msix_default_rttable; 261 255 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; 262 256 hw_data->enable_pfvf_comms = pfvf_comms_disabled; 257 + hw_data->get_vf2pf_sources = get_vf2pf_sources; 263 258 hw_data->disable_iov = adf_disable_sriov; 264 259 hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION; 265 260
+2 -2
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
··· 373 373 struct axi_dma_desc *first) 374 374 { 375 375 u32 priority = chan->chip->dw->hdata->priority[chan->id]; 376 - struct axi_dma_chan_config config; 376 + struct axi_dma_chan_config config = {}; 377 377 u32 irq_mask; 378 378 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 379 379 ··· 391 391 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; 392 392 config.prior = priority; 393 393 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; 394 - config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; 394 + config.hs_sel_src = DWAXIDMAC_HS_SEL_HW; 395 395 switch (chan->direction) { 396 396 case DMA_MEM_TO_DEV: 397 397 dw_axi_dma_set_byte_halfword(chan, true);
+1 -9
drivers/dma/dw-edma/dw-edma-pcie.c
··· 187 187 188 188 /* DMA configuration */ 189 189 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 190 - if (!err) { 190 + if (err) { 191 191 pci_err(pdev, "DMA mask 64 set failed\n"); 192 192 return err; 193 - } else { 194 - pci_err(pdev, "DMA mask 64 set failed\n"); 195 - 196 - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 197 - if (err) { 198 - pci_err(pdev, "DMA mask 32 set failed\n"); 199 - return err; 200 - } 201 193 } 202 194 203 195 /* Data structure allocation */
+1 -1
drivers/dma/idxd/irq.c
··· 137 137 INIT_WORK(&idxd->work, idxd_device_reinit); 138 138 queue_work(idxd->wq, &idxd->work); 139 139 } else { 140 - spin_lock(&idxd->dev_lock); 141 140 idxd->state = IDXD_DEV_HALTED; 142 141 idxd_wqs_quiesce(idxd); 143 142 idxd_wqs_unmap_portal(idxd); 143 + spin_lock(&idxd->dev_lock); 144 144 idxd_device_clear_state(idxd); 145 145 dev_err(&idxd->pdev->dev, 146 146 "idxd halted, need %s.\n",
+17 -1
drivers/dma/idxd/submit.c
··· 106 106 { 107 107 struct idxd_desc *d, *t, *found = NULL; 108 108 struct llist_node *head; 109 + LIST_HEAD(flist); 109 110 110 111 desc->completion->status = IDXD_COMP_DESC_ABORT; 111 112 /* ··· 121 120 found = desc; 122 121 continue; 123 122 } 124 - list_add_tail(&desc->list, &ie->work_list); 123 + 124 + if (d->completion->status) 125 + list_add_tail(&d->list, &flist); 126 + else 127 + list_add_tail(&d->list, &ie->work_list); 125 128 } 126 129 } 127 130 ··· 135 130 136 131 if (found) 137 132 complete_desc(found, IDXD_COMPLETE_ABORT); 133 + 134 + /* 135 + * complete_desc() will return desc to allocator and the desc can be 136 + * acquired by a different process and the desc->list can be modified. 137 + * Delete desc from list so the list trasversing does not get corrupted 138 + * by the other process. 139 + */ 140 + list_for_each_entry_safe(d, t, &flist, list) { 141 + list_del_init(&d->list); 142 + complete_desc(d, IDXD_COMPLETE_NORMAL); 143 + } 138 144 } 139 145 140 146 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+1 -1
drivers/dma/st_fdma.c
··· 874 874 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); 875 875 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>"); 876 876 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); 877 - MODULE_ALIAS("platform: " DRIVER_NAME); 877 + MODULE_ALIAS("platform:" DRIVER_NAME);
+105 -48
drivers/dma/ti/k3-udma.c
··· 4534 4534 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4535 4535 if (IS_ERR(rm_res)) { 4536 4536 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4537 + irq_res.sets = 1; 4537 4538 } else { 4538 4539 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4539 4540 for (i = 0; i < rm_res->sets; i++) 4540 4541 udma_mark_resource_ranges(ud, ud->tchan_map, 4541 4542 &rm_res->desc[i], "tchan"); 4543 + irq_res.sets = rm_res->sets; 4542 4544 } 4543 - irq_res.sets = rm_res->sets; 4544 4545 4545 4546 /* rchan and matching default flow ranges */ 4546 4547 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4547 4548 if (IS_ERR(rm_res)) { 4548 4549 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4550 + irq_res.sets++; 4549 4551 } else { 4550 4552 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4551 4553 for (i = 0; i < rm_res->sets; i++) 4552 4554 udma_mark_resource_ranges(ud, ud->rchan_map, 4553 4555 &rm_res->desc[i], "rchan"); 4556 + irq_res.sets += rm_res->sets; 4554 4557 } 4555 4558 4556 - irq_res.sets += rm_res->sets; 4557 4559 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4560 + if (!irq_res.desc) 4561 + return -ENOMEM; 4558 4562 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4559 - for (i = 0; i < rm_res->sets; i++) { 4560 - irq_res.desc[i].start = rm_res->desc[i].start; 4561 - irq_res.desc[i].num = rm_res->desc[i].num; 4562 - irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4563 - irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4563 + if (IS_ERR(rm_res)) { 4564 + irq_res.desc[0].start = 0; 4565 + irq_res.desc[0].num = ud->tchan_cnt; 4566 + i = 1; 4567 + } else { 4568 + for (i = 0; i < rm_res->sets; i++) { 4569 + irq_res.desc[i].start = rm_res->desc[i].start; 4570 + irq_res.desc[i].num = rm_res->desc[i].num; 4571 + irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4572 + irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4573 + } 4564 4574 } 4565 4575 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4566 - for (j = 0; j < rm_res->sets; j++, i++) { 4567 - if (rm_res->desc[j].num) { 4568 - irq_res.desc[i].start = rm_res->desc[j].start + 4569 - ud->soc_data->oes.udma_rchan; 4570 - irq_res.desc[i].num = rm_res->desc[j].num; 4571 - } 4572 - if (rm_res->desc[j].num_sec) { 4573 - irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4574 - ud->soc_data->oes.udma_rchan; 4575 - irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4576 + if (IS_ERR(rm_res)) { 4577 + irq_res.desc[i].start = 0; 4578 + irq_res.desc[i].num = ud->rchan_cnt; 4579 + } else { 4580 + for (j = 0; j < rm_res->sets; j++, i++) { 4581 + if (rm_res->desc[j].num) { 4582 + irq_res.desc[i].start = rm_res->desc[j].start + 4583 + ud->soc_data->oes.udma_rchan; 4584 + irq_res.desc[i].num = rm_res->desc[j].num; 4585 + } 4586 + if (rm_res->desc[j].num_sec) { 4587 + irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4588 + ud->soc_data->oes.udma_rchan; 4589 + irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4590 + } 4576 4591 } 4577 4592 } 4578 4593 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); ··· 4705 4690 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4706 4691 if (IS_ERR(rm_res)) { 4707 4692 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4693 + irq_res.sets++; 4708 4694 } else { 4709 4695 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4710 4696 for (i = 0; i < rm_res->sets; i++) 4711 4697 udma_mark_resource_ranges(ud, ud->bchan_map, 4712 4698 &rm_res->desc[i], 4713 4699 "bchan"); 4700 + irq_res.sets += rm_res->sets; 4714 4701 } 4715 - irq_res.sets += rm_res->sets; 4716 4702 } 4717 4703 4718 4704 /* tchan ranges */ ··· 4721 4705 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4722 4706 if (IS_ERR(rm_res)) { 4723 4707 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4708 + irq_res.sets += 2; 4724 4709 } else { 4725 4710 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4726 4711 for (i = 0; i < rm_res->sets; i++) 4727 4712 udma_mark_resource_ranges(ud, ud->tchan_map, 4728 4713 &rm_res->desc[i], 4729 4714 "tchan"); 4715 + irq_res.sets += rm_res->sets * 2; 4730 4716 } 4731 - irq_res.sets += rm_res->sets * 2; 4732 4717 } 4733 4718 4734 4719 /* rchan ranges */ ··· 4737 4720 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4738 4721 if (IS_ERR(rm_res)) { 4739 4722 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4723 + irq_res.sets += 2; 4740 4724 } else { 4741 4725 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4742 4726 for (i = 0; i < rm_res->sets; i++) 4743 4727 udma_mark_resource_ranges(ud, ud->rchan_map, 4744 4728 &rm_res->desc[i], 4745 4729 "rchan"); 4730 + irq_res.sets += rm_res->sets * 2; 4746 4731 } 4747 - irq_res.sets += rm_res->sets * 2; 4748 4732 } 4749 4733 4750 4734 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4735 + if (!irq_res.desc) 4736 + return -ENOMEM; 4751 4737 if (ud->bchan_cnt) { 4752 4738 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4753 - for (i = 0; i < rm_res->sets; i++) { 4754 - irq_res.desc[i].start = rm_res->desc[i].start + 4755 - oes->bcdma_bchan_ring; 4756 - irq_res.desc[i].num = rm_res->desc[i].num; 4739 + if (IS_ERR(rm_res)) { 4740 + irq_res.desc[0].start = oes->bcdma_bchan_ring; 4741 + irq_res.desc[0].num = ud->bchan_cnt; 4742 + i = 1; 4743 + } else { 4744 + for (i = 0; i < rm_res->sets; i++) { 4745 + irq_res.desc[i].start = rm_res->desc[i].start + 4746 + oes->bcdma_bchan_ring; 4747 + irq_res.desc[i].num = rm_res->desc[i].num; 4748 + } 4757 4749 } 4758 4750 } 4759 4751 if (ud->tchan_cnt) { 4760 4752 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4761 - for (j = 0; j < rm_res->sets; j++, i += 2) { 4762 - irq_res.desc[i].start = rm_res->desc[j].start + 4763 - oes->bcdma_tchan_data; 4764 - irq_res.desc[i].num = rm_res->desc[j].num; 4753 + if (IS_ERR(rm_res)) { 4754 + irq_res.desc[i].start = oes->bcdma_tchan_data; 4755 + irq_res.desc[i].num = ud->tchan_cnt; 4756 + irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4757 + irq_res.desc[i + 1].num = ud->tchan_cnt; 4758 + i += 2; 4759 + } else { 4760 + for (j = 0; j < rm_res->sets; j++, i += 2) { 4761 + irq_res.desc[i].start = rm_res->desc[j].start + 4762 + oes->bcdma_tchan_data; 4763 + irq_res.desc[i].num = rm_res->desc[j].num; 4765 4764 4766 - irq_res.desc[i + 1].start = rm_res->desc[j].start + 4767 - oes->bcdma_tchan_ring; 4768 - irq_res.desc[i + 1].num = rm_res->desc[j].num; 4765 + irq_res.desc[i + 1].start = rm_res->desc[j].start + 4766 + oes->bcdma_tchan_ring; 4767 + irq_res.desc[i + 1].num = rm_res->desc[j].num; 4768 + } 4769 4769 } 4770 4770 } 4771 4771 if (ud->rchan_cnt) { 4772 4772 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4773 - for (j = 0; j < rm_res->sets; j++, i += 2) { 4774 - irq_res.desc[i].start = rm_res->desc[j].start + 4775 - oes->bcdma_rchan_data; 4776 - irq_res.desc[i].num = rm_res->desc[j].num; 4773 + if (IS_ERR(rm_res)) { 4774 + irq_res.desc[i].start = oes->bcdma_rchan_data; 4775 + irq_res.desc[i].num = ud->rchan_cnt; 4776 + irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4777 + irq_res.desc[i + 1].num = ud->rchan_cnt; 4778 + i += 2; 4779 + } else { 4780 + for (j = 0; j < rm_res->sets; j++, i += 2) { 4781 + irq_res.desc[i].start = rm_res->desc[j].start + 4782 + oes->bcdma_rchan_data; 4783 + irq_res.desc[i].num = rm_res->desc[j].num; 4777 4784 4778 - irq_res.desc[i + 1].start = rm_res->desc[j].start + 4779 - oes->bcdma_rchan_ring; 4780 - irq_res.desc[i + 1].num = rm_res->desc[j].num; 4785 + irq_res.desc[i + 1].start = rm_res->desc[j].start + 4786 + oes->bcdma_rchan_ring; 4787 + irq_res.desc[i + 1].num = rm_res->desc[j].num; 4788 + } 4781 4789 } 4782 4790 } 4783 4791 ··· 4900 4858 if (IS_ERR(rm_res)) { 4901 4859 /* all rflows are assigned exclusively to Linux */ 4902 4860 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4861 + irq_res.sets = 1; 4903 4862 } else { 4904 4863 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4905 4864 for (i = 0; i < rm_res->sets; i++) 4906 4865 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4907 4866 &rm_res->desc[i], "rflow"); 4867 + irq_res.sets = rm_res->sets; 4908 4868 } 4909 - irq_res.sets = rm_res->sets; 4910 4869 4911 4870 /* tflow ranges */ 4912 4871 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4913 4872 if (IS_ERR(rm_res)) { 4914 4873 /* all tflows are assigned exclusively to Linux */ 4915 4874 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4875 + irq_res.sets++; 4916 4876 } else { 4917 4877 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4918 4878 for (i = 0; i < rm_res->sets; i++) 4919 4879 udma_mark_resource_ranges(ud, ud->tflow_map, 4920 4880 &rm_res->desc[i], "tflow"); 4881 + irq_res.sets += rm_res->sets; 4921 4882 } 4922 - irq_res.sets += rm_res->sets; 4923 4883 4924 4884 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4885 + if (!irq_res.desc) 4886 + return -ENOMEM; 4925 4887 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4926 - for (i = 0; i < rm_res->sets; i++) { 4927 - irq_res.desc[i].start = rm_res->desc[i].start + 4928 - oes->pktdma_tchan_flow; 4929 - irq_res.desc[i].num = rm_res->desc[i].num; 4888 + if (IS_ERR(rm_res)) { 4889 + irq_res.desc[0].start = oes->pktdma_tchan_flow; 4890 + irq_res.desc[0].num = ud->tflow_cnt; 4891 + i = 1; 4892 + } else { 4893 + for (i = 0; i < rm_res->sets; i++) { 4894 + irq_res.desc[i].start = rm_res->desc[i].start + 4895 + oes->pktdma_tchan_flow; 4896 + irq_res.desc[i].num = rm_res->desc[i].num; 4897 + } 4930 4898 } 4931 4899 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4932 - for (j = 0; j < rm_res->sets; j++, i++) { 4933 - irq_res.desc[i].start = rm_res->desc[j].start + 4934 - oes->pktdma_rchan_flow; 4935 - irq_res.desc[i].num = rm_res->desc[j].num; 4900 + if (IS_ERR(rm_res)) { 4901 + irq_res.desc[i].start = oes->pktdma_rchan_flow; 4902 + irq_res.desc[i].num = ud->rflow_cnt; 4903 + } else { 4904 + for (j = 0; j < rm_res->sets; j++, i++) { 4905 + irq_res.desc[i].start = rm_res->desc[j].start + 4906 + oes->pktdma_rchan_flow; 4907 + irq_res.desc[i].num = rm_res->desc[j].num; 4908 + } 4936 4909 } 4937 4910 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4938 4911 kfree(irq_res.desc);
+9 -10
drivers/gpio/gpio-dln2.c
··· 46 46 struct dln2_gpio { 47 47 struct platform_device *pdev; 48 48 struct gpio_chip gpio; 49 + struct irq_chip irqchip; 49 50 50 51 /* 51 52 * Cache pin direction to save us one transfer, since the hardware has ··· 384 383 mutex_unlock(&dln2->irq_lock); 385 384 } 386 385 387 - static struct irq_chip dln2_gpio_irqchip = { 388 - .name = "dln2-irq", 389 - .irq_mask = dln2_irq_mask, 390 - .irq_unmask = dln2_irq_unmask, 391 - .irq_set_type = dln2_irq_set_type, 392 - .irq_bus_lock = dln2_irq_bus_lock, 393 - .irq_bus_sync_unlock = dln2_irq_bus_unlock, 394 - }; 395 - 396 386 static void dln2_gpio_event(struct platform_device *pdev, u16 echo, 397 387 const void *data, int len) 398 388 { ··· 465 473 dln2->gpio.direction_output = dln2_gpio_direction_output; 466 474 dln2->gpio.set_config = dln2_gpio_set_config; 467 475 476 + dln2->irqchip.name = "dln2-irq", 477 + dln2->irqchip.irq_mask = dln2_irq_mask, 478 + dln2->irqchip.irq_unmask = dln2_irq_unmask, 479 + dln2->irqchip.irq_set_type = dln2_irq_set_type, 480 + dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock, 481 + dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock, 482 + 468 483 girq = &dln2->gpio.irq; 469 - girq->chip = &dln2_gpio_irqchip; 484 + girq->chip = &dln2->irqchip; 470 485 /* The event comes from the outside so no parent handler */ 471 486 girq->parent_handler = NULL; 472 487 girq->num_parents = 0;
+1 -5
drivers/gpio/gpio-virtio.c
··· 100 100 virtqueue_kick(vgpio->request_vq); 101 101 mutex_unlock(&vgpio->lock); 102 102 103 - if (!wait_for_completion_timeout(&line->completion, HZ)) { 104 - dev_err(dev, "GPIO operation timed out\n"); 105 - ret = -ETIMEDOUT; 106 - goto out; 107 - } 103 + wait_for_completion(&line->completion); 108 104 109 105 if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) { 110 106 dev_err(dev, "GPIO request failed: %d\n", gpio);
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 3070 3070 AMD_PG_SUPPORT_CP | 3071 3071 AMD_PG_SUPPORT_GDS | 3072 3072 AMD_PG_SUPPORT_RLC_SMU_HS)) { 3073 - WREG32(mmRLC_JUMP_TABLE_RESTORE, 3074 - adev->gfx.rlc.cp_table_gpu_addr >> 8); 3073 + WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE, 3074 + adev->gfx.rlc.cp_table_gpu_addr >> 8); 3075 3075 gfx_v9_0_init_gfx_power_gating(adev); 3076 3076 } 3077 3077 }
-1
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
··· 162 162 ENABLE_ADVANCED_DRIVER_MODEL, 1); 163 163 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 164 164 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 165 - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 166 165 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 167 166 MTYPE, MTYPE_UC);/* XXX for emulation. */ 168 167 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
-1
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
··· 196 196 ENABLE_ADVANCED_DRIVER_MODEL, 1); 197 197 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 198 198 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 199 - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 200 199 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 201 200 MTYPE, MTYPE_UC); /* UC, uncached */ 202 201
-1
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
··· 197 197 ENABLE_ADVANCED_DRIVER_MODEL, 1); 198 198 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 199 199 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 200 - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 201 200 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 202 201 MTYPE, MTYPE_UC); /* UC, uncached */ 203 202
+8
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1808 1808 return 0; 1809 1809 } 1810 1810 1811 + /* 1812 + * Pair the operations did in gmc_v9_0_hw_init and thus maintain 1813 + * a correct cached state for GMC. Otherwise, the "gate" again 1814 + * operation on S3 resuming will fail due to wrong cached state. 1815 + */ 1816 + if (adev->mmhub.funcs->update_power_gating) 1817 + adev->mmhub.funcs->update_power_gating(adev, false); 1818 + 1811 1819 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1812 1820 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1813 1821
+4 -5
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
··· 145 145 ENABLE_ADVANCED_DRIVER_MODEL, 1); 146 146 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 147 147 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 148 - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 149 148 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 150 149 MTYPE, MTYPE_UC);/* XXX for emulation. */ 151 150 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); ··· 301 302 if (amdgpu_sriov_vf(adev)) 302 303 return; 303 304 304 - if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { 305 - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); 306 - 307 - } 305 + if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB) 306 + amdgpu_dpm_set_powergating_by_smu(adev, 307 + AMD_IP_BLOCK_TYPE_GMC, 308 + enable); 308 309 } 309 310 310 311 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
-1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
··· 165 165 ENABLE_ADVANCED_DRIVER_MODEL, 1); 166 166 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 167 167 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 168 - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 169 168 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 170 169 MTYPE, MTYPE_UC);/* XXX for emulation. */ 171 170 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
-1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 267 267 ENABLE_ADVANCED_DRIVER_MODEL, 1); 268 268 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 269 269 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 270 - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 271 270 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 272 271 MTYPE, MTYPE_UC); /* UC, uncached */ 273 272
-1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 194 194 ENABLE_ADVANCED_DRIVER_MODEL, 1); 195 195 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 196 196 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 197 - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 198 197 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 199 198 MTYPE, MTYPE_UC); /* UC, uncached */ 200 199
-2
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
··· 190 190 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, 191 191 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 192 192 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, 193 - ECO_BITS, 0); 194 - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, 195 193 MTYPE, MTYPE_UC);/* XXX for emulation. */ 196 194 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, 197 195 ATC_EN, 1);
+5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1051 1051 return 0; 1052 1052 } 1053 1053 1054 + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1055 + status = dmub_srv_hw_reset(dmub_srv); 1056 + if (status != DMUB_STATUS_OK) 1057 + DRM_WARN("Error resetting DMUB HW: %d\n", status); 1058 + 1054 1059 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1055 1060 1056 1061 fw_inst_const = dmub_fw->data +
+1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
··· 101 101 .z10_restore = dcn31_z10_restore, 102 102 .z10_save_init = dcn31_z10_save_init, 103 103 .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, 104 + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, 104 105 .update_visual_confirm_color = dcn20_update_visual_confirm_color, 105 106 }; 106 107
+6 -1
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 1328 1328 pp_dpm_powergate_vce(handle, gate); 1329 1329 break; 1330 1330 case AMD_IP_BLOCK_TYPE_GMC: 1331 - pp_dpm_powergate_mmhub(handle); 1331 + /* 1332 + * For now, this is only used on PICASSO. 1333 + * And only "gate" operation is supported. 1334 + */ 1335 + if (gate) 1336 + pp_dpm_powergate_mmhub(handle); 1332 1337 break; 1333 1338 case AMD_IP_BLOCK_TYPE_GFX: 1334 1339 ret = pp_dpm_powergate_gfx(handle, gate);
+3
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
··· 191 191 kfree(smu_table->watermarks_table); 192 192 smu_table->watermarks_table = NULL; 193 193 194 + kfree(smu_table->gpu_metrics_table); 195 + smu_table->gpu_metrics_table = NULL; 196 + 194 197 return 0; 195 198 } 196 199
+3
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 198 198 199 199 int smu_v13_0_check_fw_version(struct smu_context *smu) 200 200 { 201 + struct amdgpu_device *adev = smu->adev; 201 202 uint32_t if_version = 0xff, smu_version = 0xff; 202 203 uint16_t smu_major; 203 204 uint8_t smu_minor, smu_debug; ··· 211 210 smu_major = (smu_version >> 16) & 0xffff; 212 211 smu_minor = (smu_version >> 8) & 0xff; 213 212 smu_debug = (smu_version >> 0) & 0xff; 213 + if (smu->is_apu) 214 + adev->pm.fw_version = smu_version; 214 215 215 216 switch (smu->adev->ip_versions[MP1_HWIP][0]) { 216 217 case IP_VERSION(13, 0, 2):
+4 -1
drivers/gpu/drm/ast/ast_mode.c
··· 1121 1121 if (crtc->state) 1122 1122 crtc->funcs->atomic_destroy_state(crtc, crtc->state); 1123 1123 1124 - __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); 1124 + if (ast_state) 1125 + __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); 1126 + else 1127 + __drm_atomic_helper_crtc_reset(crtc, NULL); 1125 1128 } 1126 1129 1127 1130 static struct drm_crtc_state *
+7 -1
drivers/gpu/drm/drm_fb_helper.c
··· 1743 1743 sizes->fb_width, sizes->fb_height); 1744 1744 1745 1745 info->par = fb_helper; 1746 - snprintf(info->fix.id, sizeof(info->fix.id), "%s", 1746 + /* 1747 + * The DRM drivers fbdev emulation device name can be confusing if the 1748 + * driver name also has a "drm" suffix on it. Leading to names such as 1749 + * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't 1750 + * be changed due user-space tools (e.g: pm-utils) matching against it. 1751 + */ 1752 + snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb", 1747 1753 fb_helper->dev->driver->name); 1748 1754 1749 1755 }
+1 -1
drivers/gpu/drm/i915/display/intel_dmc.c
··· 596 596 continue; 597 597 598 598 offset = readcount + dmc->dmc_info[id].dmc_offset * 4; 599 - if (fw->size - offset < 0) { 599 + if (offset > fw->size) { 600 600 drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); 601 601 continue; 602 602 }
+3 -3
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
··· 1662 1662 GEM_BUG_ON(intel_context_is_parent(cn)); 1663 1663 1664 1664 list_del_init(&cn->guc_id.link); 1665 - ce->guc_id = cn->guc_id; 1665 + ce->guc_id.id = cn->guc_id.id; 1666 1666 1667 - spin_lock(&ce->guc_state.lock); 1667 + spin_lock(&cn->guc_state.lock); 1668 1668 clr_context_registered(cn); 1669 - spin_unlock(&ce->guc_state.lock); 1669 + spin_unlock(&cn->guc_state.lock); 1670 1670 1671 1671 set_context_guc_id_invalid(cn); 1672 1672
+7 -5
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1224 1224 return MODE_BAD; 1225 1225 } 1226 1226 1227 - if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode)) 1228 - return MODE_BAD; 1227 + if (hdmi->conf) { 1228 + if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode)) 1229 + return MODE_BAD; 1229 1230 1230 - if (hdmi->conf->max_mode_clock && 1231 - mode->clock > hdmi->conf->max_mode_clock) 1232 - return MODE_CLOCK_HIGH; 1231 + if (hdmi->conf->max_mode_clock && 1232 + mode->clock > hdmi->conf->max_mode_clock) 1233 + return MODE_CLOCK_HIGH; 1234 + } 1233 1235 1234 1236 if (mode->clock < 27000) 1235 1237 return MODE_CLOCK_LOW;
+1 -1
drivers/gpu/drm/tiny/simpledrm.c
··· 458 458 { 459 459 struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) }; 460 460 461 - mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay; 461 + mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */; 462 462 drm_mode_set_name(&mode); 463 463 464 464 return mode;
+15
drivers/hid/hid-holtek-mouse.c
··· 65 65 static int holtek_mouse_probe(struct hid_device *hdev, 66 66 const struct hid_device_id *id) 67 67 { 68 + int ret; 69 + 68 70 if (!hid_is_usb(hdev)) 69 71 return -EINVAL; 72 + 73 + ret = hid_parse(hdev); 74 + if (ret) { 75 + hid_err(hdev, "hid parse failed: %d\n", ret); 76 + return ret; 77 + } 78 + 79 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 80 + if (ret) { 81 + hid_err(hdev, "hw start failed: %d\n", ret); 82 + return ret; 83 + } 84 + 70 85 return 0; 71 86 } 72 87
+3
drivers/hid/hid-vivaldi.c
··· 57 57 int ret; 58 58 59 59 drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); 60 + if (!drvdata) 61 + return -ENOMEM; 62 + 60 63 hid_set_drvdata(hdev, drvdata); 61 64 62 65 ret = hid_parse(hdev);
+62 -44
drivers/hwmon/lm90.c
··· 35 35 * explicitly as max6659, or if its address is not 0x4c. 36 36 * These chips lack the remote temperature offset feature. 37 37 * 38 - * This driver also supports the MAX6654 chip made by Maxim. This chip can 39 - * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is 40 - * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available 41 - * by setting the configuration register accordingly, and is done during 42 - * initialization. Extended precision is only available at conversion rates 43 - * of 1 Hz and slower. Note that extended precision is not enabled by 44 - * default, as this driver initializes all chips to 2 Hz by design. 38 + * This driver also supports the MAX6654 chip made by Maxim. This chip can be 39 + * at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar 40 + * to MAX6657/MAX6658/MAX6659, but does not support critical temperature 41 + * limits. Extended range is available by setting the configuration register 42 + * accordingly, and is done during initialization. Extended precision is only 43 + * available at conversion rates of 1 Hz and slower. Note that extended 44 + * precision is not enabled by default, as this driver initializes all chips 45 + * to 2 Hz by design. 45 46 * 46 47 * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and 47 48 * MAX6692 chips made by Maxim. These are again similar to the LM86, ··· 189 188 #define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */ 190 189 #define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/ 191 190 #define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */ 191 + #define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */ 192 + #define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */ 192 193 193 194 /* LM90 status */ 194 195 #define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */ ··· 200 197 #define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */ 201 198 #define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */ 202 199 #define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */ 200 + #define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */ 203 201 204 202 #define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */ 205 203 #define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */ ··· 358 354 static const struct lm90_params lm90_params[] = { 359 355 [adm1032] = { 360 356 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 361 - | LM90_HAVE_BROKEN_ALERT, 357 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, 362 358 .alert_alarms = 0x7c, 363 359 .max_convrate = 10, 364 360 }, 365 361 [adt7461] = { 366 362 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 367 - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP, 363 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP 364 + | LM90_HAVE_CRIT, 368 365 .alert_alarms = 0x7c, 369 366 .max_convrate = 10, 370 367 }, 371 368 [g781] = { 372 369 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 373 - | LM90_HAVE_BROKEN_ALERT, 370 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, 374 371 .alert_alarms = 0x7c, 375 372 .max_convrate = 8, 376 373 }, 377 374 [lm86] = { 378 - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 375 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 376 + | LM90_HAVE_CRIT, 379 377 .alert_alarms = 0x7b, 380 378 .max_convrate = 9, 381 379 }, 382 380 [lm90] = { 383 - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 381 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 382 + | LM90_HAVE_CRIT, 384 383 .alert_alarms = 0x7b, 385 384 .max_convrate = 9, 386 385 }, 387 386 [lm99] = { 388 - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 387 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 388 + | LM90_HAVE_CRIT, 389 389 .alert_alarms = 0x7b, 390 390 .max_convrate = 9, 391 391 }, 392 392 [max6646] = { 393 + .flags = LM90_HAVE_CRIT, 393 394 .alert_alarms = 0x7c, 394 395 .max_convrate = 6, 395 396 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, ··· 405 396 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, 406 397 }, 407 398 [max6657] = { 408 - .flags = LM90_PAUSE_FOR_CONFIG, 399 + .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT, 409 400 .alert_alarms = 0x7c, 410 401 .max_convrate = 8, 411 402 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, 412 403 }, 413 404 [max6659] = { 414 - .flags = LM90_HAVE_EMERGENCY, 405 + .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT, 415 406 .alert_alarms = 0x7c, 416 407 .max_convrate = 8, 417 408 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, 418 409 }, 419 410 [max6680] = { 420 - .flags = LM90_HAVE_OFFSET, 411 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT 412 + | LM90_HAVE_CRIT_ALRM_SWP, 421 413 .alert_alarms = 0x7c, 422 414 .max_convrate = 7, 423 415 }, 424 416 [max6696] = { 425 417 .flags = LM90_HAVE_EMERGENCY 426 - | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3, 418 + | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT, 427 419 .alert_alarms = 0x1c7c, 428 420 .max_convrate = 6, 429 421 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, 430 422 }, 431 423 [w83l771] = { 432 - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 424 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT, 433 425 .alert_alarms = 0x7c, 434 426 .max_convrate = 8, 435 427 }, 436 428 [sa56004] = { 437 - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 429 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT, 438 430 .alert_alarms = 0x7b, 439 431 .max_convrate = 9, 440 432 .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL, 441 433 }, 442 434 [tmp451] = { 443 435 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 444 - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP, 436 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT, 445 437 .alert_alarms = 0x7c, 446 438 .max_convrate = 9, 447 439 .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL, 448 440 }, 449 441 [tmp461] = { 450 442 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT 451 - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP, 443 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT, 452 444 .alert_alarms = 0x7c, 453 445 .max_convrate = 9, 454 446 .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL, ··· 678 668 struct i2c_client *client = data->client; 679 669 int val; 680 670 681 - val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT); 682 - if (val < 0) 683 - return val; 684 - data->temp8[LOCAL_CRIT] = val; 671 + if (data->flags & LM90_HAVE_CRIT) { 672 + val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT); 673 + if (val < 0) 674 + return val; 675 + data->temp8[LOCAL_CRIT] = val; 685 676 686 - val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); 687 - if (val < 0) 688 - return val; 689 - data->temp8[REMOTE_CRIT] = val; 677 + val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); 678 + if (val < 0) 679 + return val; 680 + data->temp8[REMOTE_CRIT] = val; 690 681 691 - val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST); 692 - if (val < 0) 693 - return val; 694 - data->temp_hyst = val; 682 + val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST); 683 + if (val < 0) 684 + return val; 685 + data->temp_hyst = val; 686 + } 695 687 696 688 val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); 697 689 if (val < 0) ··· 821 809 val = lm90_read_reg(client, LM90_REG_R_STATUS); 822 810 if (val < 0) 823 811 return val; 824 - data->alarms = val; /* lower 8 bit of alarms */ 812 + data->alarms = val & ~LM90_STATUS_BUSY; 825 813 826 814 if (data->kind == max6696) { 827 815 val = lm90_select_remote_channel(data, 1); ··· 1172 1160 else 1173 1161 temp = temp_from_s8(data->temp8[LOCAL_CRIT]); 1174 1162 1175 - /* prevent integer underflow */ 1176 - val = max(val, -128000l); 1163 + /* prevent integer overflow/underflow */ 1164 + val = clamp_val(val, -128000l, 255000l); 1177 1165 1178 1166 data->temp_hyst = hyst_to_reg(temp - val); 1179 1167 err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST, ··· 1204 1192 static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 }; 1205 1193 static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 }; 1206 1194 static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 }; 1195 + static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 }; 1207 1196 static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 }; 1208 1197 static const u8 lm90_fault_bits[3] = { 0, 2, 10 }; 1209 1198 ··· 1230 1217 *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1; 1231 1218 break; 1232 1219 case hwmon_temp_crit_alarm: 1233 - *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1; 1220 + if (data->flags & LM90_HAVE_CRIT_ALRM_SWP) 1221 + *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1; 1222 + else 1223 + *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1; 1234 1224 break; 1235 1225 case hwmon_temp_emergency_alarm: 1236 1226 *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1; ··· 1481 1465 if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0) 1482 1466 return -ENODEV; 1483 1467 1484 - if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) { 1468 + if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) { 1485 1469 config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2); 1486 1470 if (config2 < 0) 1487 1471 return -ENODEV; 1488 - } else 1489 - config2 = 0; /* Make compiler happy */ 1472 + } 1490 1473 1491 1474 if ((address == 0x4C || address == 0x4D) 1492 1475 && man_id == 0x01) { /* National Semiconductor */ ··· 1918 1903 info->config = data->channel_config; 1919 1904 1920 1905 data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | 1921 - HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM | 1922 - HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM; 1906 + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM; 1923 1907 data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | 1924 - HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM | 1925 - HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT; 1908 + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT; 1909 + 1910 + if (data->flags & LM90_HAVE_CRIT) { 1911 + data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST; 1912 + data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST; 1913 + } 1926 1914 1927 1915 if (data->flags & LM90_HAVE_OFFSET) 1928 1916 data->channel_config[1] |= HWMON_T_OFFSET;
+57 -7
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 1594 1594 { 1595 1595 struct hns_roce_cmq_desc desc; 1596 1596 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1597 + u32 clock_cycles_of_1us; 1597 1598 1598 1599 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, 1599 1600 false); 1600 1601 1601 - hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8); 1602 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 1603 + clock_cycles_of_1us = HNS_ROCE_1NS_CFG; 1604 + else 1605 + clock_cycles_of_1us = HNS_ROCE_1US_CFG; 1606 + 1607 + hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us); 1602 1608 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT); 1603 1609 1604 1610 return hns_roce_cmq_send(hr_dev, &desc, 1); ··· 4808 4802 return ret; 4809 4803 } 4810 4804 4805 + static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) 4806 + { 4807 + #define QP_ACK_TIMEOUT_MAX_HIP08 20 4808 + #define QP_ACK_TIMEOUT_OFFSET 10 4809 + #define QP_ACK_TIMEOUT_MAX 31 4810 + 4811 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 4812 + if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { 4813 + ibdev_warn(&hr_dev->ib_dev, 4814 + "Local ACK timeout shall be 0 to 20.\n"); 4815 + return false; 4816 + } 4817 + *timeout += QP_ACK_TIMEOUT_OFFSET; 4818 + } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { 4819 + if (*timeout > QP_ACK_TIMEOUT_MAX) { 4820 + ibdev_warn(&hr_dev->ib_dev, 4821 + "Local ACK timeout shall be 0 to 31.\n"); 4822 + return false; 4823 + } 4824 + } 4825 + 4826 + return true; 4827 + } 4828 + 4811 4829 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, 4812 4830 const struct ib_qp_attr *attr, 4813 4831 int attr_mask, ··· 4841 4811 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4842 4812 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4843 4813 int ret = 0; 4814 + u8 timeout; 4844 4815 4845 4816 if (attr_mask & IB_QP_AV) { 4846 4817 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, ··· 4851 4820 } 4852 4821 4853 4822 if (attr_mask & IB_QP_TIMEOUT) { 4854 - if (attr->timeout < 31) { 4855 - hr_reg_write(context, QPC_AT, attr->timeout); 4823 + timeout = attr->timeout; 4824 + if (check_qp_timeout_cfg_range(hr_dev, &timeout)) { 4825 + hr_reg_write(context, QPC_AT, timeout); 4856 4826 hr_reg_clear(qpc_mask, QPC_AT); 4857 - } else { 4858 - ibdev_warn(&hr_dev->ib_dev, 4859 - "Local ACK timeout shall be 0 to 30.\n"); 4860 4827 } 4861 4828 } 4862 4829 ··· 4911 4882 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); 4912 4883 4913 4884 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 4914 - hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer); 4885 + hr_reg_write(context, QPC_MIN_RNR_TIME, 4886 + hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? 4887 + HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer); 4915 4888 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME); 4916 4889 } 4917 4890 ··· 5530 5499 5531 5500 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count); 5532 5501 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT); 5502 + 5503 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 5504 + if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 5505 + dev_info(hr_dev->dev, 5506 + "cq_period(%u) reached the upper limit, adjusted to 65.\n", 5507 + cq_period); 5508 + cq_period = HNS_ROCE_MAX_CQ_PERIOD; 5509 + } 5510 + cq_period *= HNS_ROCE_CLOCK_ADJUST; 5511 + } 5533 5512 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); 5534 5513 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); 5535 5514 ··· 5934 5893 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); 5935 5894 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); 5936 5895 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); 5896 + 5897 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 5898 + if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 5899 + dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n", 5900 + eq->eq_period); 5901 + eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; 5902 + } 5903 + eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; 5904 + } 5937 5905 5938 5906 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period); 5939 5907 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
+8
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
··· 1444 1444 struct list_head node; /* all dips are on a list */ 1445 1445 }; 1446 1446 1447 + /* only for RNR timeout issue of HIP08 */ 1448 + #define HNS_ROCE_CLOCK_ADJUST 1000 1449 + #define HNS_ROCE_MAX_CQ_PERIOD 65 1450 + #define HNS_ROCE_MAX_EQ_PERIOD 65 1451 + #define HNS_ROCE_RNR_TIMER_10NS 1 1452 + #define HNS_ROCE_1US_CFG 999 1453 + #define HNS_ROCE_1NS_CFG 0 1454 + 1447 1455 #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0 1448 1456 #define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0 1449 1457 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+1 -1
drivers/infiniband/hw/hns/hns_roce_srq.c
··· 259 259 260 260 static void free_srq_wrid(struct hns_roce_srq *srq) 261 261 { 262 - kfree(srq->wrid); 262 + kvfree(srq->wrid); 263 263 srq->wrid = NULL; 264 264 } 265 265
+1 -1
drivers/infiniband/hw/qib/qib_user_sdma.c
··· 941 941 &addrlimit) || 942 942 addrlimit > type_max(typeof(pkt->addrlimit))) { 943 943 ret = -EINVAL; 944 - goto free_pbc; 944 + goto free_pkt; 945 945 } 946 946 pkt->addrlimit = addrlimit; 947 947
+12 -9
drivers/input/misc/iqs626a.c
··· 456 456 unsigned int suspend_mode; 457 457 }; 458 458 459 - static int iqs626_parse_events(struct iqs626_private *iqs626, 460 - const struct fwnode_handle *ch_node, 461 - enum iqs626_ch_id ch_id) 459 + static noinline_for_stack int 460 + iqs626_parse_events(struct iqs626_private *iqs626, 461 + const struct fwnode_handle *ch_node, 462 + enum iqs626_ch_id ch_id) 462 463 { 463 464 struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg; 464 465 struct i2c_client *client = iqs626->client; ··· 605 604 return 0; 606 605 } 607 606 608 - static int iqs626_parse_ati_target(struct iqs626_private *iqs626, 609 - const struct fwnode_handle *ch_node, 610 - enum iqs626_ch_id ch_id) 607 + static noinline_for_stack int 608 + iqs626_parse_ati_target(struct iqs626_private *iqs626, 609 + const struct fwnode_handle *ch_node, 610 + enum iqs626_ch_id ch_id) 611 611 { 612 612 struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg; 613 613 struct i2c_client *client = iqs626->client; ··· 887 885 return 0; 888 886 } 889 887 890 - static int iqs626_parse_channel(struct iqs626_private *iqs626, 891 - const struct fwnode_handle *ch_node, 892 - enum iqs626_ch_id ch_id) 888 + static noinline_for_stack int 889 + iqs626_parse_channel(struct iqs626_private *iqs626, 890 + const struct fwnode_handle *ch_node, 891 + enum iqs626_ch_id ch_id) 893 892 { 894 893 struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg; 895 894 struct i2c_client *client = iqs626->client;
+7 -1
drivers/input/mouse/elantech.c
··· 1588 1588 */ 1589 1589 static int elantech_change_report_id(struct psmouse *psmouse) 1590 1590 { 1591 - unsigned char param[2] = { 0x10, 0x03 }; 1591 + /* 1592 + * NOTE: the code is expecting to receive param[] as an array of 3 1593 + * items (see __ps2_command()), even if in this case only 2 are 1594 + * actually needed. Make sure the array size is 3 to avoid potential 1595 + * stack out-of-bound accesses. 1596 + */ 1597 + unsigned char param[3] = { 0x10, 0x03 }; 1592 1598 1593 1599 if (elantech_write_reg_params(psmouse, 0x7, param) || 1594 1600 elantech_read_reg_params(psmouse, 0x7, param) ||
+21
drivers/input/serio/i8042-x86ia64io.h
··· 995 995 { } 996 996 }; 997 997 998 + static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = { 999 + { 1000 + /* ASUS ZenBook UX425UA */ 1001 + .matches = { 1002 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1003 + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), 1004 + }, 1005 + }, 1006 + { 1007 + /* ASUS ZenBook UM325UA */ 1008 + .matches = { 1009 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1010 + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), 1011 + }, 1012 + }, 1013 + { } 1014 + }; 1015 + 998 1016 #endif /* CONFIG_X86 */ 999 1017 1000 1018 #ifdef CONFIG_PNP ··· 1332 1314 1333 1315 if (dmi_check_system(i8042_dmi_kbdreset_table)) 1334 1316 i8042_kbdreset = true; 1317 + 1318 + if (dmi_check_system(i8042_dmi_probe_defer_table)) 1319 + i8042_probe_defer = true; 1335 1320 1336 1321 /* 1337 1322 * A20 was already enabled during early kernel init. But some buggy
+35 -19
drivers/input/serio/i8042.c
··· 45 45 module_param_named(unlock, i8042_unlock, bool, 0); 46 46 MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); 47 47 48 + static bool i8042_probe_defer; 49 + module_param_named(probe_defer, i8042_probe_defer, bool, 0); 50 + MODULE_PARM_DESC(probe_defer, "Allow deferred probing."); 51 + 48 52 enum i8042_controller_reset_mode { 49 53 I8042_RESET_NEVER, 50 54 I8042_RESET_ALWAYS, ··· 715 711 * LCS/Telegraphics. 716 712 */ 717 713 718 - static int __init i8042_check_mux(void) 714 + static int i8042_check_mux(void) 719 715 { 720 716 unsigned char mux_version; 721 717 ··· 744 740 /* 745 741 * The following is used to test AUX IRQ delivery. 746 742 */ 747 - static struct completion i8042_aux_irq_delivered __initdata; 748 - static bool i8042_irq_being_tested __initdata; 743 + static struct completion i8042_aux_irq_delivered; 744 + static bool i8042_irq_being_tested; 749 745 750 - static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id) 746 + static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id) 751 747 { 752 748 unsigned long flags; 753 749 unsigned char str, data; ··· 774 770 * verifies success by readinng CTR. Used when testing for presence of AUX 775 771 * port. 776 772 */ 777 - static int __init i8042_toggle_aux(bool on) 773 + static int i8042_toggle_aux(bool on) 778 774 { 779 775 unsigned char param; 780 776 int i; ··· 802 798 * the presence of an AUX interface. 803 799 */ 804 800 805 - static int __init i8042_check_aux(void) 801 + static int i8042_check_aux(void) 806 802 { 807 803 int retval = -1; 808 804 bool irq_registered = false; ··· 1009 1005 1010 1006 if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) { 1011 1007 pr_err("Can't read CTR while initializing i8042\n"); 1012 - return -EIO; 1008 + return i8042_probe_defer ? -EPROBE_DEFER : -EIO; 1013 1009 } 1014 1010 1015 1011 } while (n < 2 || ctr[0] != ctr[1]); ··· 1324 1320 i8042_controller_reset(false); 1325 1321 } 1326 1322 1327 - static int __init i8042_create_kbd_port(void) 1323 + static int i8042_create_kbd_port(void) 1328 1324 { 1329 1325 struct serio *serio; 1330 1326 struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO]; ··· 1353 1349 return 0; 1354 1350 } 1355 1351 1356 - static int __init i8042_create_aux_port(int idx) 1352 + static int i8042_create_aux_port(int idx) 1357 1353 { 1358 1354 struct serio *serio; 1359 1355 int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx; ··· 1390 1386 return 0; 1391 1387 } 1392 1388 1393 - static void __init i8042_free_kbd_port(void) 1389 + static void i8042_free_kbd_port(void) 1394 1390 { 1395 1391 kfree(i8042_ports[I8042_KBD_PORT_NO].serio); 1396 1392 i8042_ports[I8042_KBD_PORT_NO].serio = NULL; 1397 1393 } 1398 1394 1399 - static void __init i8042_free_aux_ports(void) 1395 + static void i8042_free_aux_ports(void) 1400 1396 { 1401 1397 int i; 1402 1398 ··· 1406 1402 } 1407 1403 } 1408 1404 1409 - static void __init i8042_register_ports(void) 1405 + static void i8042_register_ports(void) 1410 1406 { 1411 1407 int i; 1412 1408 ··· 1447 1443 i8042_aux_irq_registered = i8042_kbd_irq_registered = false; 1448 1444 } 1449 1445 1450 - static int __init i8042_setup_aux(void) 1446 + static int i8042_setup_aux(void) 1451 1447 { 1452 1448 int (*aux_enable)(void); 1453 1449 int error; ··· 1489 1485 return error; 1490 1486 } 1491 1487 1492 - static int __init i8042_setup_kbd(void) 1488 + static int i8042_setup_kbd(void) 1493 1489 { 1494 1490 int error; 1495 1491 ··· 1539 1535 return 0; 1540 1536 } 1541 1537 1542 - static int __init i8042_probe(struct platform_device *dev) 1538 + static int i8042_probe(struct platform_device *dev) 1543 1539 { 1544 1540 int error; 1545 1541 ··· 1604 1600 .pm = &i8042_pm_ops, 1605 1601 #endif 1606 1602 }, 1603 + .probe = i8042_probe, 1607 1604 .remove = i8042_remove, 1608 1605 .shutdown = i8042_shutdown, 1609 1606 }; ··· 1615 1610 1616 1611 static int __init i8042_init(void) 1617 1612 { 1618 - struct platform_device *pdev; 1619 1613 int err; 1620 1614 1621 1615 dbg_init(); ··· 1630 1626 /* Set this before creating the dev to allow i8042_command to work right away */ 1631 1627 i8042_present = true; 1632 1628 1633 - pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0); 1634 - if (IS_ERR(pdev)) { 1635 - err = PTR_ERR(pdev); 1629 + err = platform_driver_register(&i8042_driver); 1630 + if (err) 1636 1631 goto err_platform_exit; 1632 + 1633 + i8042_platform_device = platform_device_alloc("i8042", -1); 1634 + if (!i8042_platform_device) { 1635 + err = -ENOMEM; 1636 + goto err_unregister_driver; 1637 1637 } 1638 + 1639 + err = platform_device_add(i8042_platform_device); 1640 + if (err) 1641 + goto err_free_device; 1638 1642 1639 1643 bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block); 1640 1644 panic_blink = i8042_panic_blink; 1641 1645 1642 1646 return 0; 1643 1647 1648 + err_free_device: 1649 + platform_device_put(i8042_platform_device); 1650 + err_unregister_driver: 1651 + platform_driver_unregister(&i8042_driver); 1644 1652 err_platform_exit: 1645 1653 i8042_platform_exit(); 1646 1654 return err;
+1 -1
drivers/input/touchscreen/atmel_mxt_ts.c
··· 1882 1882 if (error) { 1883 1883 dev_err(&client->dev, "Error %d parsing object table\n", error); 1884 1884 mxt_free_object_table(data); 1885 - goto err_free_mem; 1885 + return error; 1886 1886 } 1887 1887 1888 1888 data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
+45 -1
drivers/input/touchscreen/elants_i2c.c
··· 117 117 #define ELAN_POWERON_DELAY_USEC 500 118 118 #define ELAN_RESET_DELAY_MSEC 20 119 119 120 + /* FW boot code version */ 121 + #define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C 0x72 122 + #define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C 0x82 123 + #define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C 0x92 124 + #define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C 0x6D 125 + #define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C 0x6E 126 + #define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C 0x77 127 + #define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C 0x78 128 + #define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB 0x67 129 + #define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB 0x68 130 + #define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB 0x74 131 + #define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB 0x75 132 + 120 133 enum elants_chip_id { 121 134 EKTH3500, 122 135 EKTF3624, ··· 749 736 return 0; 750 737 } 751 738 739 + static bool elants_i2c_should_check_remark_id(struct elants_data *ts) 740 + { 741 + struct i2c_client *client = ts->client; 742 + const u8 bootcode_version = ts->iap_version; 743 + bool check; 744 + 745 + /* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */ 746 + if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) || 747 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) || 748 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) || 749 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) || 750 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) || 751 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) || 752 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) || 753 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) || 754 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) || 755 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) || 756 + (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) { 757 + dev_dbg(&client->dev, 758 + "eKTH3900/eKTH5312(0x%02x) are not support remark id\n", 759 + bootcode_version); 760 + check = false; 761 + } else if (bootcode_version >= 0x60) { 762 + check = true; 763 + } else { 764 + check = false; 765 + } 766 + 767 + return check; 768 + } 769 + 752 770 static int elants_i2c_do_update_firmware(struct i2c_client *client, 753 771 const struct firmware *fw, 754 772 bool force) ··· 793 749 u16 send_id; 794 750 int page, n_fw_pages; 795 751 int error; 796 - bool check_remark_id = ts->iap_version >= 0x60; 752 + bool check_remark_id = elants_i2c_should_check_remark_id(ts); 797 753 798 754 /* Recovery mode detection! */ 799 755 if (force) {
+26 -5
drivers/input/touchscreen/goodix.c
··· 102 102 { .id = "911", .data = &gt911_chip_data }, 103 103 { .id = "9271", .data = &gt911_chip_data }, 104 104 { .id = "9110", .data = &gt911_chip_data }, 105 + { .id = "9111", .data = &gt911_chip_data }, 105 106 { .id = "927", .data = &gt911_chip_data }, 106 107 { .id = "928", .data = &gt911_chip_data }, 107 108 ··· 651 650 652 651 usleep_range(6000, 10000); /* T4: > 5ms */ 653 652 654 - /* end select I2C slave addr */ 655 - error = gpiod_direction_input(ts->gpiod_rst); 656 - if (error) 657 - goto error; 653 + /* 654 + * Put the reset pin back in to input / high-impedance mode to save 655 + * power. Only do this in the non ACPI case since some ACPI boards 656 + * don't have a pull-up, so there the reset pin must stay active-high. 657 + */ 658 + if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) { 659 + error = gpiod_direction_input(ts->gpiod_rst); 660 + if (error) 661 + goto error; 662 + } 658 663 659 664 return 0; 660 665 ··· 794 787 return -EINVAL; 795 788 } 796 789 790 + /* 791 + * Normally we put the reset pin in input / high-impedance mode to save 792 + * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI 793 + * case, leave the pin as is. This results in the pin not being touched 794 + * at all on x86/ACPI boards, except when needed for error-recover. 795 + */ 796 + ts->gpiod_rst_flags = GPIOD_ASIS; 797 + 797 798 return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping); 798 799 } 799 800 #else ··· 826 811 if (!ts->client) 827 812 return -EINVAL; 828 813 dev = &ts->client->dev; 814 + 815 + /* 816 + * By default we request the reset pin as input, leaving it in 817 + * high-impedance when not resetting the controller to save power. 818 + */ 819 + ts->gpiod_rst_flags = GPIOD_IN; 829 820 830 821 ts->avdd28 = devm_regulator_get(dev, "AVDD28"); 831 822 if (IS_ERR(ts->avdd28)) { ··· 870 849 ts->gpiod_int = gpiod; 871 850 872 851 /* Get the reset line GPIO pin number */ 873 - gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN); 852 + gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags); 874 853 if (IS_ERR(gpiod)) { 875 854 error = PTR_ERR(gpiod); 876 855 if (error != -EPROBE_DEFER)
+1
drivers/input/touchscreen/goodix.h
··· 87 87 struct gpio_desc *gpiod_rst; 88 88 int gpio_count; 89 89 int gpio_int_idx; 90 + enum gpiod_flags gpiod_rst_flags; 90 91 char id[GOODIX_ID_MAX_LEN + 1]; 91 92 char cfg_name[64]; 92 93 u16 version;
+1 -1
drivers/input/touchscreen/goodix_fwupload.c
··· 207 207 208 208 error = goodix_reset_no_int_sync(ts); 209 209 if (error) 210 - return error; 210 + goto release; 211 211 212 212 error = goodix_enter_upload_mode(ts->client); 213 213 if (error)
+3 -3
drivers/isdn/mISDN/core.c
··· 381 381 err = mISDN_inittimer(&debug); 382 382 if (err) 383 383 goto error2; 384 - err = l1_init(&debug); 384 + err = Isdnl1_Init(&debug); 385 385 if (err) 386 386 goto error3; 387 387 err = Isdnl2_Init(&debug); ··· 395 395 error5: 396 396 Isdnl2_cleanup(); 397 397 error4: 398 - l1_cleanup(); 398 + Isdnl1_cleanup(); 399 399 error3: 400 400 mISDN_timer_cleanup(); 401 401 error2: ··· 408 408 { 409 409 misdn_sock_cleanup(); 410 410 Isdnl2_cleanup(); 411 - l1_cleanup(); 411 + Isdnl1_cleanup(); 412 412 mISDN_timer_cleanup(); 413 413 class_unregister(&mISDN_class); 414 414
+2 -2
drivers/isdn/mISDN/core.h
··· 60 60 extern int mISDN_inittimer(u_int *); 61 61 extern void mISDN_timer_cleanup(void); 62 62 63 - extern int l1_init(u_int *); 64 - extern void l1_cleanup(void); 63 + extern int Isdnl1_Init(u_int *); 64 + extern void Isdnl1_cleanup(void); 65 65 extern int Isdnl2_Init(u_int *); 66 66 extern void Isdnl2_cleanup(void); 67 67
+2 -2
drivers/isdn/mISDN/layer1.c
··· 398 398 EXPORT_SYMBOL(create_l1); 399 399 400 400 int 401 - l1_init(u_int *deb) 401 + Isdnl1_Init(u_int *deb) 402 402 { 403 403 debug = deb; 404 404 l1fsm_s.state_count = L1S_STATE_COUNT; ··· 409 409 } 410 410 411 411 void 412 - l1_cleanup(void) 412 + Isdnl1_cleanup(void) 413 413 { 414 414 mISDN_FsmFree(&l1fsm_s); 415 415 }
+2 -1
drivers/md/bcache/super.c
··· 1139 1139 static void cached_dev_detach_finish(struct work_struct *w) 1140 1140 { 1141 1141 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 1142 + struct cache_set *c = dc->disk.c; 1142 1143 1143 1144 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 1144 1145 BUG_ON(refcount_read(&dc->count)); ··· 1157 1156 1158 1157 bcache_device_detach(&dc->disk); 1159 1158 list_move(&dc->list, &uncached_devices); 1160 - calc_cached_dev_sectors(dc->disk.c); 1159 + calc_cached_dev_sectors(c); 1161 1160 1162 1161 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1163 1162 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
+6 -1
drivers/mmc/core/core.c
··· 2264 2264 _mmc_detect_change(host, 0, false); 2265 2265 } 2266 2266 2267 - void mmc_stop_host(struct mmc_host *host) 2267 + void __mmc_stop_host(struct mmc_host *host) 2268 2268 { 2269 2269 if (host->slot.cd_irq >= 0) { 2270 2270 mmc_gpio_set_cd_wake(host, false); ··· 2273 2273 2274 2274 host->rescan_disable = 1; 2275 2275 cancel_delayed_work_sync(&host->detect); 2276 + } 2277 + 2278 + void mmc_stop_host(struct mmc_host *host) 2279 + { 2280 + __mmc_stop_host(host); 2276 2281 2277 2282 /* clear pm flags now and let card drivers set them as needed */ 2278 2283 host->pm_flags = 0;
+1
drivers/mmc/core/core.h
··· 70 70 71 71 void mmc_rescan(struct work_struct *work); 72 72 void mmc_start_host(struct mmc_host *host); 73 + void __mmc_stop_host(struct mmc_host *host); 73 74 void mmc_stop_host(struct mmc_host *host); 74 75 75 76 void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+9
drivers/mmc/core/host.c
··· 80 80 kfree(host); 81 81 } 82 82 83 + static int mmc_host_classdev_shutdown(struct device *dev) 84 + { 85 + struct mmc_host *host = cls_dev_to_mmc_host(dev); 86 + 87 + __mmc_stop_host(host); 88 + return 0; 89 + } 90 + 83 91 static struct class mmc_host_class = { 84 92 .name = "mmc_host", 85 93 .dev_release = mmc_host_classdev_release, 94 + .shutdown_pre = mmc_host_classdev_shutdown, 86 95 .pm = MMC_HOST_CLASS_DEV_PM_OPS, 87 96 }; 88 97
+16
drivers/mmc/host/meson-mx-sdhc-mmc.c
··· 135 135 struct mmc_command *cmd) 136 136 { 137 137 struct meson_mx_sdhc_host *host = mmc_priv(mmc); 138 + bool manual_stop = false; 138 139 u32 ictl, send; 139 140 int pack_len; 140 141 ··· 173 172 else 174 173 /* software flush: */ 175 174 ictl |= MESON_SDHC_ICTL_DATA_XFER_OK; 175 + 176 + /* 177 + * Mimic the logic from the vendor driver where (only) 178 + * SD_IO_RW_EXTENDED commands with more than one block set the 179 + * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware 180 + * download in the brcmfmac driver for a BCM43362/1 card. 181 + * Without this sdio_memcpy_toio() (with a size of 219557 182 + * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set. 183 + */ 184 + manual_stop = cmd->data->blocks > 1 && 185 + cmd->opcode == SD_IO_RW_EXTENDED; 176 186 } else { 177 187 pack_len = 0; 178 188 179 189 ictl |= MESON_SDHC_ICTL_RESP_OK; 180 190 } 191 + 192 + regmap_update_bits(host->regmap, MESON_SDHC_MISC, 193 + MESON_SDHC_MISC_MANUAL_STOP, 194 + manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0); 181 195 182 196 if (cmd->opcode == MMC_STOP_TRANSMISSION) 183 197 send |= MESON_SDHC_SEND_DATA_STOP;
+2
drivers/mmc/host/mmci_stm32_sdmmc.c
··· 441 441 return -EINVAL; 442 442 } 443 443 444 + writel_relaxed(0, dlyb->base + DLYB_CR); 445 + 444 446 phase = end_of_len - max_len / 2; 445 447 sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false); 446 448
+26 -17
drivers/mmc/host/sdhci-tegra.c
··· 356 356 } 357 357 } 358 358 359 - static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 360 - struct mmc_ios *ios) 361 - { 362 - struct sdhci_host *host = mmc_priv(mmc); 363 - u32 val; 364 - 365 - val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 366 - 367 - if (ios->enhanced_strobe) 368 - val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 369 - else 370 - val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 371 - 372 - sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 373 - 374 - } 375 - 376 359 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 377 360 { 378 361 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ··· 774 791 tegra_sdhci_pad_autocalib(host); 775 792 tegra_host->pad_calib_required = false; 776 793 } 794 + } 795 + 796 + static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 797 + struct mmc_ios *ios) 798 + { 799 + struct sdhci_host *host = mmc_priv(mmc); 800 + u32 val; 801 + 802 + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 803 + 804 + if (ios->enhanced_strobe) { 805 + val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 806 + /* 807 + * When CMD13 is sent from mmc_select_hs400es() after 808 + * switching to HS400ES mode, the bus is operating at 809 + * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR. 810 + * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI 811 + * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host 812 + * controller CAR clock and the interface clock are rate matched. 813 + */ 814 + tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR); 815 + } else { 816 + val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 817 + } 818 + 819 + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 777 820 } 778 821 779 822 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
+8
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
··· 366 366 if (!buff->is_eop) { 367 367 buff_ = buff; 368 368 do { 369 + if (buff_->next >= self->size) { 370 + err = -EIO; 371 + goto err_exit; 372 + } 369 373 next_ = buff_->next, 370 374 buff_ = &self->buff_ring[next_]; 371 375 is_rsc_completed = ··· 393 389 (buff->is_lro && buff->is_cso_err)) { 394 390 buff_ = buff; 395 391 do { 392 + if (buff_->next >= self->size) { 393 + err = -EIO; 394 + goto err_exit; 395 + } 396 396 next_ = buff_->next, 397 397 buff_ = &self->buff_ring[next_]; 398 398
+8 -15
drivers/net/ethernet/atheros/ag71xx.c
··· 1862 1862 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); 1863 1863 if (IS_ERR(ag->mac_reset)) { 1864 1864 netif_err(ag, probe, ndev, "missing mac reset\n"); 1865 - err = PTR_ERR(ag->mac_reset); 1866 - goto err_free; 1865 + return PTR_ERR(ag->mac_reset); 1867 1866 } 1868 1867 1869 1868 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1870 - if (!ag->mac_base) { 1871 - err = -ENOMEM; 1872 - goto err_free; 1873 - } 1869 + if (!ag->mac_base) 1870 + return -ENOMEM; 1874 1871 1875 1872 ndev->irq = platform_get_irq(pdev, 0); 1876 1873 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, ··· 1875 1878 if (err) { 1876 1879 netif_err(ag, probe, ndev, "unable to request IRQ %d\n", 1877 1880 ndev->irq); 1878 - goto err_free; 1881 + return err; 1879 1882 } 1880 1883 1881 1884 ndev->netdev_ops = &ag71xx_netdev_ops; ··· 1903 1906 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, 1904 1907 sizeof(struct ag71xx_desc), 1905 1908 &ag->stop_desc_dma, GFP_KERNEL); 1906 - if (!ag->stop_desc) { 1907 - err = -ENOMEM; 1908 - goto err_free; 1909 - } 1909 + if (!ag->stop_desc) 1910 + return -ENOMEM; 1910 1911 1911 1912 ag->stop_desc->data = 0; 1912 1913 ag->stop_desc->ctrl = 0; ··· 1919 1924 err = of_get_phy_mode(np, &ag->phy_if_mode); 1920 1925 if (err) { 1921 1926 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); 1922 - goto err_free; 1927 + return err; 1923 1928 } 1924 1929 1925 1930 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); ··· 1927 1932 err = clk_prepare_enable(ag->clk_eth); 1928 1933 if (err) { 1929 1934 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); 1930 - goto err_free; 1935 + return err; 1931 1936 } 1932 1937 1933 1938 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); ··· 1963 1968 ag71xx_mdio_remove(ag); 1964 1969 err_put_clk: 1965 1970 clk_disable_unprepare(ag->clk_eth); 1966 - err_free: 1967 - free_netdev(ndev); 1968 1971 return err; 1969 1972 } 1970 1973
+7 -5
drivers/net/ethernet/freescale/fman/fman_port.c
··· 1805 1805 fman = dev_get_drvdata(&fm_pdev->dev); 1806 1806 if (!fman) { 1807 1807 err = -EINVAL; 1808 - goto return_err; 1808 + goto put_device; 1809 1809 } 1810 1810 1811 1811 err = of_property_read_u32(port_node, "cell-index", &val); ··· 1813 1813 dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", 1814 1814 __func__, port_node); 1815 1815 err = -EINVAL; 1816 - goto return_err; 1816 + goto put_device; 1817 1817 } 1818 1818 port_id = (u8)val; 1819 1819 port->dts_params.id = port_id; ··· 1847 1847 } else { 1848 1848 dev_err(port->dev, "%s: Illegal port type\n", __func__); 1849 1849 err = -EINVAL; 1850 - goto return_err; 1850 + goto put_device; 1851 1851 } 1852 1852 1853 1853 port->dts_params.type = port_type; ··· 1861 1861 dev_err(port->dev, "%s: incorrect qman-channel-id\n", 1862 1862 __func__); 1863 1863 err = -EINVAL; 1864 - goto return_err; 1864 + goto put_device; 1865 1865 } 1866 1866 port->dts_params.qman_channel_id = qman_channel_id; 1867 1867 } ··· 1871 1871 dev_err(port->dev, "%s: of_address_to_resource() failed\n", 1872 1872 __func__); 1873 1873 err = -ENOMEM; 1874 - goto return_err; 1874 + goto put_device; 1875 1875 } 1876 1876 1877 1877 port->dts_params.fman = fman; ··· 1896 1896 1897 1897 return 0; 1898 1898 1899 + put_device: 1900 + put_device(&fm_pdev->dev); 1899 1901 return_err: 1900 1902 of_node_put(port_node); 1901 1903 free_port:
+6
drivers/net/ethernet/intel/igc/igc_main.c
··· 5480 5480 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5481 5481 } 5482 5482 5483 + if (icr & IGC_ICR_TS) 5484 + igc_tsync_interrupt(adapter); 5485 + 5483 5486 napi_schedule(&q_vector->napi); 5484 5487 5485 5488 return IRQ_HANDLED; ··· 5525 5522 if (!test_bit(__IGC_DOWN, &adapter->state)) 5526 5523 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5527 5524 } 5525 + 5526 + if (icr & IGC_ICR_TS) 5527 + igc_tsync_interrupt(adapter); 5528 5528 5529 5529 napi_schedule(&q_vector->napi); 5530 5530
+14 -1
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 764 764 */ 765 765 static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) 766 766 { 767 - return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; 767 + if (!IS_ENABLED(CONFIG_X86_TSC)) 768 + return false; 769 + 770 + /* FIXME: it was noticed that enabling support for PCIe PTM in 771 + * some i225-V models could cause lockups when bringing the 772 + * interface up/down. There should be no downsides to 773 + * disabling crosstimestamping support for i225-V, as it 774 + * doesn't have any PTP support. That way we gain some time 775 + * while root causing the issue. 776 + */ 777 + if (adapter->pdev->device == IGC_DEV_ID_I225_V) 778 + return false; 779 + 780 + return pcie_ptm_enabled(adapter->pdev); 768 781 } 769 782 770 783 static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
+1 -1
drivers/net/ethernet/lantiq_xrx200.c
··· 224 224 skb->protocol = eth_type_trans(skb, net_dev); 225 225 netif_receive_skb(skb); 226 226 net_dev->stats.rx_packets++; 227 - net_dev->stats.rx_bytes += len - ETH_FCS_LEN; 227 + net_dev->stats.rx_bytes += len; 228 228 229 229 return 0; 230 230 }
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 782 782 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); 783 783 int ix; 784 784 int cpu; 785 + /* Sync between icosq recovery and XSK enable/disable. */ 786 + struct mutex icosq_recovery_lock; 785 787 }; 786 788 787 789 struct mlx5e_ptp; ··· 1023 1021 void mlx5e_destroy_rq(struct mlx5e_rq *rq); 1024 1022 1025 1023 struct mlx5e_sq_param; 1026 - int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, 1027 - struct mlx5e_sq_param *param, struct mlx5e_icosq *sq); 1028 - void mlx5e_close_icosq(struct mlx5e_icosq *sq); 1029 1024 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, 1030 1025 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, 1031 1026 struct mlx5e_xdpsq *sq, bool is_redirect);
+2
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
··· 30 30 void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); 31 31 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); 32 32 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); 33 + void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c); 34 + void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c); 33 35 34 36 #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 35 37
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
··· 66 66 67 67 static inline void 68 68 mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, 69 - struct sk_buff *skb) {} 69 + struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); } 70 70 71 71 #endif /* CONFIG_MLX5_CLS_ACT */ 72 72
+34 -1
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
··· 62 62 63 63 static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) 64 64 { 65 + struct mlx5e_rq *xskrq = NULL; 65 66 struct mlx5_core_dev *mdev; 66 67 struct mlx5e_icosq *icosq; 67 68 struct net_device *dev; ··· 71 70 int err; 72 71 73 72 icosq = ctx; 73 + 74 + mutex_lock(&icosq->channel->icosq_recovery_lock); 75 + 76 + /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */ 74 77 rq = &icosq->channel->rq; 78 + if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state)) 79 + xskrq = &icosq->channel->xskrq; 75 80 mdev = icosq->channel->mdev; 76 81 dev = icosq->channel->netdev; 77 82 err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); ··· 91 84 goto out; 92 85 93 86 mlx5e_deactivate_rq(rq); 87 + if (xskrq) 88 + mlx5e_deactivate_rq(xskrq); 89 + 94 90 err = mlx5e_wait_for_icosq_flush(icosq); 95 91 if (err) 96 92 goto out; ··· 107 97 goto out; 108 98 109 99 mlx5e_reset_icosq_cc_pc(icosq); 100 + 110 101 mlx5e_free_rx_in_progress_descs(rq); 102 + if (xskrq) 103 + mlx5e_free_rx_in_progress_descs(xskrq); 104 + 111 105 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); 112 106 mlx5e_activate_icosq(icosq); 113 - mlx5e_activate_rq(rq); 114 107 108 + mlx5e_activate_rq(rq); 115 109 rq->stats->recover++; 110 + 111 + if (xskrq) { 112 + mlx5e_activate_rq(xskrq); 113 + xskrq->stats->recover++; 114 + } 115 + 116 + mutex_unlock(&icosq->channel->icosq_recovery_lock); 117 + 116 118 return 0; 117 119 out: 118 120 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); 121 + mutex_unlock(&icosq->channel->icosq_recovery_lock); 119 122 return err; 120 123 } 121 124 ··· 727 704 snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn); 728 705 729 706 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); 707 + } 708 + 709 + void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c) 710 + { 711 + mutex_lock(&c->icosq_recovery_lock); 712 + } 713 + 714 + void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c) 715 + { 716 + mutex_unlock(&c->icosq_recovery_lock); 730 717 } 731 718 732 719 static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
+9 -1
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 466 466 return mlx5e_health_fmsg_named_obj_nest_end(fmsg); 467 467 } 468 468 469 + static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, 470 + void *ctx) 471 + { 472 + struct mlx5e_tx_timeout_ctx *to_ctx = ctx; 473 + 474 + return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq); 475 + } 476 + 469 477 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, 470 478 struct devlink_fmsg *fmsg) 471 479 { ··· 569 561 to_ctx.sq = sq; 570 562 err_ctx.ctx = &to_ctx; 571 563 err_ctx.recover = mlx5e_tx_reporter_timeout_recover; 572 - err_ctx.dump = mlx5e_tx_reporter_dump_sq; 564 + err_ctx.dump = mlx5e_tx_reporter_timeout_dump; 573 565 snprintf(err_str, sizeof(err_str), 574 566 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", 575 567 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+15 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
··· 4 4 #include "setup.h" 5 5 #include "en/params.h" 6 6 #include "en/txrx.h" 7 + #include "en/health.h" 7 8 8 9 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 9 10 * change unexpectedly, and mlx5e has a minimum valid stride size for striding ··· 171 170 172 171 void mlx5e_activate_xsk(struct mlx5e_channel *c) 173 172 { 173 + /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid 174 + * activating XSKRQ in the middle of recovery. 175 + */ 176 + mlx5e_reporter_icosq_suspend_recovery(c); 174 177 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 178 + mlx5e_reporter_icosq_resume_recovery(c); 179 + 175 180 /* TX queue is created active. */ 176 181 177 182 spin_lock_bh(&c->async_icosq_lock); ··· 187 180 188 181 void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 189 182 { 190 - mlx5e_deactivate_rq(&c->xskrq); 183 + /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the 184 + * middle of recovery. Suspend the recovery to avoid it. 185 + */ 186 + mlx5e_reporter_icosq_suspend_recovery(c); 187 + clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 188 + mlx5e_reporter_icosq_resume_recovery(c); 189 + synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ 190 + 191 191 /* TX queue is disabled on close. */ 192 192 }
+32 -16
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1087 1087 void mlx5e_close_rq(struct mlx5e_rq *rq) 1088 1088 { 1089 1089 cancel_work_sync(&rq->dim.work); 1090 - if (rq->icosq) 1091 - cancel_work_sync(&rq->icosq->recover_work); 1092 1090 cancel_work_sync(&rq->recover_work); 1093 1091 mlx5e_destroy_rq(rq); 1094 1092 mlx5e_free_rx_descs(rq); ··· 1214 1216 mlx5e_reporter_icosq_cqe_err(sq); 1215 1217 } 1216 1218 1219 + static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work) 1220 + { 1221 + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, 1222 + recover_work); 1223 + 1224 + /* Not implemented yet. */ 1225 + 1226 + netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n"); 1227 + } 1228 + 1217 1229 static int mlx5e_alloc_icosq(struct mlx5e_channel *c, 1218 1230 struct mlx5e_sq_param *param, 1219 - struct mlx5e_icosq *sq) 1231 + struct mlx5e_icosq *sq, 1232 + work_func_t recover_work_func) 1220 1233 { 1221 1234 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); 1222 1235 struct mlx5_core_dev *mdev = c->mdev; ··· 1248 1239 if (err) 1249 1240 goto err_sq_wq_destroy; 1250 1241 1251 - INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); 1242 + INIT_WORK(&sq->recover_work, recover_work_func); 1252 1243 1253 1244 return 0; 1254 1245 ··· 1584 1575 mlx5e_reporter_tx_err_cqe(sq); 1585 1576 } 1586 1577 1587 - int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, 1588 - struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) 1578 + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, 1579 + struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, 1580 + work_func_t recover_work_func) 1589 1581 { 1590 1582 struct mlx5e_create_sq_param csp = {}; 1591 1583 int err; 1592 1584 1593 - err = mlx5e_alloc_icosq(c, param, sq); 1585 + err = mlx5e_alloc_icosq(c, param, sq, recover_work_func); 1594 1586 if (err) 1595 1587 return err; 1596 1588 ··· 1630 1620 synchronize_net(); /* Sync with NAPI. */ 1631 1621 } 1632 1622 1633 - void mlx5e_close_icosq(struct mlx5e_icosq *sq) 1623 + static void mlx5e_close_icosq(struct mlx5e_icosq *sq) 1634 1624 { 1635 1625 struct mlx5e_channel *c = sq->channel; 1636 1626 ··· 2094 2084 2095 2085 spin_lock_init(&c->async_icosq_lock); 2096 2086 2097 - err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); 2087 + err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq, 2088 + mlx5e_async_icosq_err_cqe_work); 2098 2089 if (err) 2099 2090 goto err_close_xdpsq_cq; 2100 2091 2101 - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); 2092 + mutex_init(&c->icosq_recovery_lock); 2093 + 2094 + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq, 2095 + mlx5e_icosq_err_cqe_work); 2102 2096 if (err) 2103 2097 goto err_close_async_icosq; 2104 2098 ··· 2170 2156 mlx5e_close_xdpsq(&c->xdpsq); 2171 2157 if (c->xdp) 2172 2158 mlx5e_close_xdpsq(&c->rq_xdpsq); 2159 + /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */ 2160 + cancel_work_sync(&c->icosq.recover_work); 2173 2161 mlx5e_close_rq(&c->rq); 2174 2162 mlx5e_close_sqs(c); 2175 2163 mlx5e_close_icosq(&c->icosq); 2164 + mutex_destroy(&c->icosq_recovery_lock); 2176 2165 mlx5e_close_icosq(&c->async_icosq); 2177 2166 if (c->xdp) 2178 2167 mlx5e_close_cq(&c->rq_xdpsq.cq); ··· 3769 3752 3770 3753 static int mlx5e_handle_feature(struct net_device *netdev, 3771 3754 netdev_features_t *features, 3772 - netdev_features_t wanted_features, 3773 3755 netdev_features_t feature, 3774 3756 mlx5e_feature_handler feature_handler) 3775 3757 { 3776 - netdev_features_t changes = wanted_features ^ netdev->features; 3777 - bool enable = !!(wanted_features & feature); 3758 + netdev_features_t changes = *features ^ netdev->features; 3759 + bool enable = !!(*features & feature); 3778 3760 int err; 3779 3761 3780 3762 if (!(changes & feature)) ··· 3781 3765 3782 3766 err = feature_handler(netdev, enable); 3783 3767 if (err) { 3768 + MLX5E_SET_FEATURE(features, feature, !enable); 3784 3769 netdev_err(netdev, "%s feature %pNF failed, err %d\n", 3785 3770 enable ? "Enable" : "Disable", &feature, err); 3786 3771 return err; 3787 3772 } 3788 3773 3789 - MLX5E_SET_FEATURE(features, feature, enable); 3790 3774 return 0; 3791 3775 } 3792 3776 3793 3777 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) 3794 3778 { 3795 - netdev_features_t oper_features = netdev->features; 3779 + netdev_features_t oper_features = features; 3796 3780 int err = 0; 3797 3781 3798 3782 #define MLX5E_HANDLE_FEATURE(feature, handler) \ 3799 - mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) 3783 + mlx5e_handle_feature(netdev, &oper_features, feature, handler) 3800 3784 3801 3785 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); 3802 3786 err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+8 -11
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1187 1187 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) 1188 1188 goto offload_rule_0; 1189 1189 1190 - if (flow_flag_test(flow, CT)) { 1191 - mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); 1192 - return; 1193 - } 1194 - 1195 - if (flow_flag_test(flow, SAMPLE)) { 1196 - mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); 1197 - return; 1198 - } 1199 - 1200 1190 if (attr->esw_attr->split_count) 1201 1191 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 1202 1192 1193 + if (flow_flag_test(flow, CT)) 1194 + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); 1195 + else if (flow_flag_test(flow, SAMPLE)) 1196 + mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); 1197 + else 1203 1198 offload_rule_0: 1204 - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 1199 + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 1205 1200 } 1206 1201 1207 1202 struct mlx5_flow_handle * ··· 1430 1435 metadata); 1431 1436 if (err) 1432 1437 goto err_out; 1438 + 1439 + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1433 1440 } 1434 1441 } 1435 1442
+3
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
··· 121 121 122 122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) 123 123 { 124 + if (!mlx5_chains_prios_supported(chains)) 125 + return 1; 126 + 124 127 if (mlx5_chains_ignore_flow_level_supported(chains)) 125 128 return UINT_MAX; 126 129
+6 -5
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1853 1853 1854 1854 int mlx5_recover_device(struct mlx5_core_dev *dev) 1855 1855 { 1856 - int ret = -EIO; 1856 + if (!mlx5_core_is_sf(dev)) { 1857 + mlx5_pci_disable_device(dev); 1858 + if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED) 1859 + return -EIO; 1860 + } 1857 1861 1858 - mlx5_pci_disable_device(dev); 1859 - if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1860 - ret = mlx5_load_one(dev); 1861 - return ret; 1862 + return mlx5_load_one(dev); 1862 1863 } 1863 1864 1864 1865 static struct pci_driver mlx5_core_driver = {
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
··· 356 356 new_irq = irq_pool_create_irq(pool, affinity); 357 357 if (IS_ERR(new_irq)) { 358 358 if (!least_loaded_irq) { 359 - mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n", 360 - cpumask_first(affinity)); 359 + mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n", 360 + PTR_ERR(new_irq)); 361 361 mutex_unlock(&pool->lock); 362 362 return new_irq; 363 363 } ··· 398 398 cpumask_copy(irq->mask, affinity); 399 399 if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max && 400 400 cpumask_empty(irq->mask)) 401 - cpumask_set_cpu(0, irq->mask); 401 + cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask); 402 402 irq_set_affinity_hint(irq->irqn, irq->mask); 403 403 unlock: 404 404 mutex_unlock(&pool->lock);
+4 -5
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
··· 2 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 3 4 4 #include <linux/mlx5/eswitch.h> 5 + #include <linux/err.h> 5 6 #include "dr_types.h" 6 7 7 8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ ··· 73 72 } 74 73 75 74 dmn->uar = mlx5_get_uars_page(dmn->mdev); 76 - if (!dmn->uar) { 75 + if (IS_ERR(dmn->uar)) { 77 76 mlx5dr_err(dmn, "Couldn't allocate UAR\n"); 78 - ret = -ENOMEM; 77 + ret = PTR_ERR(dmn->uar); 79 78 goto clean_pd; 80 79 } 81 80 ··· 164 163 165 164 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) 166 165 { 167 - return dr_domain_query_vport(dmn, 168 - dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, 169 - false, 166 + return dr_domain_query_vport(dmn, 0, false, 170 167 &dmn->info.caps.vports.esw_manager_caps); 171 168 } 172 169
+1 -1
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 3135 3135 return -EINVAL; 3136 3136 } 3137 3137 3138 - lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 3138 + lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL); 3139 3139 if (!lif->dbid_inuse) { 3140 3140 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 3141 3141 return -ENOMEM;
+2 -2
drivers/net/phy/fixed_phy.c
··· 239 239 /* Check if we have a GPIO associated with this fixed phy */ 240 240 if (!gpiod) { 241 241 gpiod = fixed_phy_get_gpiod(np); 242 - if (IS_ERR(gpiod)) 243 - return ERR_CAST(gpiod); 242 + if (!gpiod) 243 + return ERR_PTR(-EINVAL); 244 244 } 245 245 246 246 /* Get the next available PHY address, up to PHY_MAX_ADDR */
+2 -2
drivers/net/usb/pegasus.c
··· 493 493 goto goon; 494 494 495 495 rx_status = buf[count - 2]; 496 - if (rx_status & 0x1e) { 496 + if (rx_status & 0x1c) { 497 497 netif_dbg(pegasus, rx_err, net, 498 498 "RX packet error %x\n", rx_status); 499 499 net->stats.rx_errors++; 500 - if (rx_status & 0x06) /* long or runt */ 500 + if (rx_status & 0x04) /* runt */ 501 501 net->stats.rx_length_errors++; 502 502 if (rx_status & 0x08) 503 503 net->stats.rx_crc_errors++;
+1
drivers/net/xen-netback/common.h
··· 203 203 unsigned int rx_queue_max; 204 204 unsigned int rx_queue_len; 205 205 unsigned long last_rx_time; 206 + unsigned int rx_slots_needed; 206 207 bool stalled; 207 208 208 209 struct xenvif_copy_state rx_copy;
+49 -28
drivers/net/xen-netback/rx.c
··· 33 33 #include <xen/xen.h> 34 34 #include <xen/events.h> 35 35 36 + /* 37 + * Update the needed ring page slots for the first SKB queued. 38 + * Note that any call sequence outside the RX thread calling this function 39 + * needs to wake up the RX thread via a call of xenvif_kick_thread() 40 + * afterwards in order to avoid a race with putting the thread to sleep. 41 + */ 42 + static void xenvif_update_needed_slots(struct xenvif_queue *queue, 43 + const struct sk_buff *skb) 44 + { 45 + unsigned int needed = 0; 46 + 47 + if (skb) { 48 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); 49 + if (skb_is_gso(skb)) 50 + needed++; 51 + if (skb->sw_hash) 52 + needed++; 53 + } 54 + 55 + WRITE_ONCE(queue->rx_slots_needed, needed); 56 + } 57 + 36 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) 37 59 { 38 60 RING_IDX prod, cons; 39 - struct sk_buff *skb; 40 - int needed; 41 - unsigned long flags; 61 + unsigned int needed; 42 62 43 - spin_lock_irqsave(&queue->rx_queue.lock, flags); 44 - 45 - skb = skb_peek(&queue->rx_queue); 46 - if (!skb) { 47 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags); 63 + needed = READ_ONCE(queue->rx_slots_needed); 64 + if (!needed) 48 65 return false; 49 - } 50 - 51 - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); 52 - if (skb_is_gso(skb)) 53 - needed++; 54 - if (skb->sw_hash) 55 - needed++; 56 - 57 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags); 58 66 59 67 do { 60 68 prod = queue->rx.sring->req_prod; ··· 88 80 89 81 spin_lock_irqsave(&queue->rx_queue.lock, flags); 90 82 91 - __skb_queue_tail(&queue->rx_queue, skb); 92 - 93 - queue->rx_queue_len += skb->len; 94 - if (queue->rx_queue_len > queue->rx_queue_max) { 83 + if (queue->rx_queue_len >= queue->rx_queue_max) { 95 84 struct net_device *dev = queue->vif->dev; 96 85 97 86 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 87 + kfree_skb(skb); 88 + queue->vif->dev->stats.rx_dropped++; 89 + } else { 90 + if (skb_queue_empty(&queue->rx_queue)) 91 + xenvif_update_needed_slots(queue, skb); 92 + 93 + __skb_queue_tail(&queue->rx_queue, skb); 94 + 95 + queue->rx_queue_len += skb->len; 98 96 } 99 97 100 98 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); ··· 114 100 115 101 skb = __skb_dequeue(&queue->rx_queue); 116 102 if (skb) { 103 + xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); 104 + 117 105 queue->rx_queue_len -= skb->len; 118 106 if (queue->rx_queue_len < queue->rx_queue_max) { 119 107 struct netdev_queue *txq; ··· 150 134 break; 151 135 xenvif_rx_dequeue(queue); 152 136 kfree_skb(skb); 137 + queue->vif->dev->stats.rx_dropped++; 153 138 } 154 139 } 155 140 ··· 504 487 xenvif_rx_copy_flush(queue); 505 488 } 506 489 507 - static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) 490 + static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) 508 491 { 509 492 RING_IDX prod, cons; 510 493 511 494 prod = queue->rx.sring->req_prod; 512 495 cons = queue->rx.req_cons; 513 496 497 + return prod - cons; 498 + } 499 + 500 + static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) 501 + { 502 + unsigned int needed = READ_ONCE(queue->rx_slots_needed); 503 + 514 504 return !queue->stalled && 515 - prod - cons < 1 && 505 + xenvif_rx_queue_slots(queue) < needed && 516 506 time_after(jiffies, 517 507 queue->last_rx_time + queue->vif->stall_timeout); 518 508 } 519 509 520 510 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) 521 511 { 522 - RING_IDX prod, cons; 512 + unsigned int needed = READ_ONCE(queue->rx_slots_needed); 523 513 524 - prod = queue->rx.sring->req_prod; 525 - cons = queue->rx.req_cons; 526 - 527 - return queue->stalled && prod - cons >= 1; 514 + return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; 528 515 } 529 516 530 517 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
+95 -32
drivers/net/xen-netfront.c
··· 148 148 grant_ref_t gref_rx_head; 149 149 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 150 150 151 + unsigned int rx_rsp_unconsumed; 152 + spinlock_t rx_cons_lock; 153 + 151 154 struct page_pool *page_pool; 152 155 struct xdp_rxq_info xdp_rxq; 153 156 }; ··· 379 376 return 0; 380 377 } 381 378 382 - static void xennet_tx_buf_gc(struct netfront_queue *queue) 379 + static bool xennet_tx_buf_gc(struct netfront_queue *queue) 383 380 { 384 381 RING_IDX cons, prod; 385 382 unsigned short id; 386 383 struct sk_buff *skb; 387 384 bool more_to_do; 385 + bool work_done = false; 388 386 const struct device *dev = &queue->info->netdev->dev; 389 387 390 388 BUG_ON(!netif_carrier_ok(queue->info->netdev)); ··· 401 397 402 398 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 403 399 struct xen_netif_tx_response txrsp; 400 + 401 + work_done = true; 404 402 405 403 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); 406 404 if (txrsp.status == XEN_NETIF_RSP_NULL) ··· 447 441 448 442 xennet_maybe_wake_tx(queue); 449 443 450 - return; 444 + return work_done; 451 445 452 446 err: 453 447 queue->info->broken = true; 454 448 dev_alert(dev, "Disabled for further use\n"); 449 + 450 + return work_done; 455 451 } 456 452 457 453 struct xennet_gnttab_make_txreq { ··· 842 834 return 0; 843 835 } 844 836 837 + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) 838 + { 839 + unsigned long flags; 840 + 841 + spin_lock_irqsave(&queue->rx_cons_lock, flags); 842 + queue->rx.rsp_cons = val; 843 + queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 844 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 845 + } 846 + 845 847 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 846 848 grant_ref_t ref) 847 849 { ··· 903 885 xennet_move_rx_slot(queue, skb, ref); 904 886 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 905 887 906 - queue->rx.rsp_cons = cons; 888 + xennet_set_rx_rsp_cons(queue, cons); 907 889 return err; 908 890 } 909 891 ··· 1057 1039 } 1058 1040 1059 1041 if (unlikely(err)) 1060 - queue->rx.rsp_cons = cons + slots; 1042 + xennet_set_rx_rsp_cons(queue, cons + slots); 1061 1043 1062 1044 return err; 1063 1045 } ··· 1111 1093 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 1112 1094 } 1113 1095 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 1114 - queue->rx.rsp_cons = ++cons + skb_queue_len(list); 1096 + xennet_set_rx_rsp_cons(queue, 1097 + ++cons + skb_queue_len(list)); 1115 1098 kfree_skb(nskb); 1116 1099 return -ENOENT; 1117 1100 } ··· 1125 1106 kfree_skb(nskb); 1126 1107 } 1127 1108 1128 - queue->rx.rsp_cons = cons; 1109 + xennet_set_rx_rsp_cons(queue, cons); 1129 1110 1130 1111 return 0; 1131 1112 } ··· 1248 1229 1249 1230 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1250 1231 __skb_queue_head(&tmpq, skb); 1251 - queue->rx.rsp_cons += skb_queue_len(&tmpq); 1232 + xennet_set_rx_rsp_cons(queue, 1233 + queue->rx.rsp_cons + 1234 + skb_queue_len(&tmpq)); 1252 1235 goto err; 1253 1236 } 1254 1237 } ··· 1274 1253 1275 1254 __skb_queue_tail(&rxq, skb); 1276 1255 1277 - i = ++queue->rx.rsp_cons; 1256 + i = queue->rx.rsp_cons + 1; 1257 + xennet_set_rx_rsp_cons(queue, i); 1278 1258 work_done++; 1279 1259 } 1280 1260 if (need_xdp_flush) ··· 1439 1417 return 0; 1440 1418 } 1441 1419 1442 - static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1420 + static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) 1443 1421 { 1444 - struct netfront_queue *queue = dev_id; 1445 1422 unsigned long flags; 1446 1423 1447 - if (queue->info->broken) 1448 - return IRQ_HANDLED; 1424 + if (unlikely(queue->info->broken)) 1425 + return false; 1449 1426 1450 1427 spin_lock_irqsave(&queue->tx_lock, flags); 1451 - xennet_tx_buf_gc(queue); 1428 + if (xennet_tx_buf_gc(queue)) 1429 + *eoi = 0; 1452 1430 spin_unlock_irqrestore(&queue->tx_lock, flags); 1431 + 1432 + return true; 1433 + } 1434 + 1435 + static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1436 + { 1437 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1438 + 1439 + if (likely(xennet_handle_tx(dev_id, &eoiflag))) 1440 + xen_irq_lateeoi(irq, eoiflag); 1453 1441 1454 1442 return IRQ_HANDLED; 1455 1443 } 1456 1444 1445 + static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) 1446 + { 1447 + unsigned int work_queued; 1448 + unsigned long flags; 1449 + 1450 + if (unlikely(queue->info->broken)) 1451 + return false; 1452 + 1453 + spin_lock_irqsave(&queue->rx_cons_lock, flags); 1454 + work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 1455 + if (work_queued > queue->rx_rsp_unconsumed) { 1456 + queue->rx_rsp_unconsumed = work_queued; 1457 + *eoi = 0; 1458 + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { 1459 + const struct device *dev = &queue->info->netdev->dev; 1460 + 1461 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1462 + dev_alert(dev, "RX producer index going backwards\n"); 1463 + dev_alert(dev, "Disabled for further use\n"); 1464 + queue->info->broken = true; 1465 + return false; 1466 + } 1467 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1468 + 1469 + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) 1470 + napi_schedule(&queue->napi); 1471 + 1472 + return true; 1473 + } 1474 + 1457 1475 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1458 1476 { 1459 - struct netfront_queue *queue = dev_id; 1460 - struct net_device *dev = queue->info->netdev; 1477 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1461 1478 1462 - if (queue->info->broken) 1463 - return IRQ_HANDLED; 1464 - 1465 - if (likely(netif_carrier_ok(dev) && 1466 - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1467 - napi_schedule(&queue->napi); 1479 + if (likely(xennet_handle_rx(dev_id, &eoiflag))) 1480 + xen_irq_lateeoi(irq, eoiflag); 1468 1481 1469 1482 return IRQ_HANDLED; 1470 1483 } 1471 1484 1472 1485 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1473 1486 { 1474 - xennet_tx_interrupt(irq, dev_id); 1475 - xennet_rx_interrupt(irq, dev_id); 1487 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1488 + 1489 + if (xennet_handle_tx(dev_id, &eoiflag) && 1490 + xennet_handle_rx(dev_id, &eoiflag)) 1491 + xen_irq_lateeoi(irq, eoiflag); 1492 + 1476 1493 return IRQ_HANDLED; 1477 1494 } 1478 1495 ··· 1829 1768 if (err < 0) 1830 1769 goto fail; 1831 1770 1832 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1833 - xennet_interrupt, 1834 - 0, queue->info->netdev->name, queue); 1771 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1772 + xennet_interrupt, 0, 1773 + queue->info->netdev->name, 1774 + queue); 1835 1775 if (err < 0) 1836 1776 goto bind_fail; 1837 1777 queue->rx_evtchn = queue->tx_evtchn; ··· 1860 1798 1861 1799 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 1862 1800 "%s-tx", queue->name); 1863 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1864 - xennet_tx_interrupt, 1865 - 0, queue->tx_irq_name, queue); 1801 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1802 + xennet_tx_interrupt, 0, 1803 + queue->tx_irq_name, queue); 1866 1804 if (err < 0) 1867 1805 goto bind_tx_fail; 1868 1806 queue->tx_irq = err; 1869 1807 1870 1808 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 1871 1809 "%s-rx", queue->name); 1872 - err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1873 - xennet_rx_interrupt, 1874 - 0, queue->rx_irq_name, queue); 1810 + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, 1811 + xennet_rx_interrupt, 0, 1812 + queue->rx_irq_name, queue); 1875 1813 if (err < 0) 1876 1814 goto bind_rx_fail; 1877 1815 queue->rx_irq = err; ··· 1973 1911 1974 1912 spin_lock_init(&queue->tx_lock); 1975 1913 spin_lock_init(&queue->rx_lock); 1914 + spin_lock_init(&queue->rx_cons_lock); 1976 1915 1977 1916 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 1978 1917
+20 -9
drivers/nfc/st21nfca/i2c.c
··· 524 524 phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); 525 525 if (IS_ERR(phy->gpiod_ena)) { 526 526 nfc_err(dev, "Unable to get ENABLE GPIO\n"); 527 - return PTR_ERR(phy->gpiod_ena); 527 + r = PTR_ERR(phy->gpiod_ena); 528 + goto out_free; 528 529 } 529 530 530 531 phy->se_status.is_ese_present = ··· 536 535 r = st21nfca_hci_platform_init(phy); 537 536 if (r < 0) { 538 537 nfc_err(&client->dev, "Unable to reboot st21nfca\n"); 539 - return r; 538 + goto out_free; 540 539 } 541 540 542 541 r = devm_request_threaded_irq(&client->dev, client->irq, NULL, ··· 545 544 ST21NFCA_HCI_DRIVER_NAME, phy); 546 545 if (r < 0) { 547 546 nfc_err(&client->dev, "Unable to register IRQ handler\n"); 548 - return r; 547 + goto out_free; 549 548 } 550 549 551 - return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, 552 - ST21NFCA_FRAME_HEADROOM, 553 - ST21NFCA_FRAME_TAILROOM, 554 - ST21NFCA_HCI_LLC_MAX_PAYLOAD, 555 - &phy->hdev, 556 - &phy->se_status); 550 + r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, 551 + ST21NFCA_FRAME_HEADROOM, 552 + ST21NFCA_FRAME_TAILROOM, 553 + ST21NFCA_HCI_LLC_MAX_PAYLOAD, 554 + &phy->hdev, 555 + &phy->se_status); 556 + if (r) 557 + goto out_free; 558 + 559 + return 0; 560 + 561 + out_free: 562 + kfree_skb(phy->pending_skb); 563 + return r; 557 564 } 558 565 559 566 static int st21nfca_hci_i2c_remove(struct i2c_client *client) ··· 572 563 573 564 if (phy->powered) 574 565 st21nfca_hci_i2c_disable(phy); 566 + if (phy->pending_skb) 567 + kfree_skb(phy->pending_skb); 575 568 576 569 return 0; 577 570 }
+11 -4
drivers/pci/msi.c
··· 722 722 goto out_disable; 723 723 } 724 724 725 - /* Ensure that all table entries are masked. */ 726 - msix_mask_all(base, tsize); 727 - 728 725 ret = msix_setup_entries(dev, base, entries, nvec, affd); 729 726 if (ret) 730 727 goto out_disable; ··· 748 751 /* Set MSI-X enabled bits and unmask the function */ 749 752 pci_intx_for_msi(dev, 0); 750 753 dev->msix_enabled = 1; 754 + 755 + /* 756 + * Ensure that all table entries are masked to prevent 757 + * stale entries from firing in a crash kernel. 758 + * 759 + * Done late to deal with a broken Marvell NVME device 760 + * which takes the MSI-X mask bits into account even 761 + * when MSI-X is disabled, which prevents MSI delivery. 762 + */ 763 + msix_mask_all(base, tsize); 751 764 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 752 765 753 766 pcibios_free_irq(dev); ··· 784 777 free_msi_irqs(dev); 785 778 786 779 out_disable: 787 - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 780 + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); 788 781 789 782 return ret; 790 783 }
+16 -13
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 1244 1244 raw_spin_lock_init(&pc->irq_lock[i]); 1245 1245 } 1246 1246 1247 + pc->pctl_desc = *pdata->pctl_desc; 1248 + pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc); 1249 + if (IS_ERR(pc->pctl_dev)) { 1250 + gpiochip_remove(&pc->gpio_chip); 1251 + return PTR_ERR(pc->pctl_dev); 1252 + } 1253 + 1254 + pc->gpio_range = *pdata->gpio_range; 1255 + pc->gpio_range.base = pc->gpio_chip.base; 1256 + pc->gpio_range.gc = &pc->gpio_chip; 1257 + pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range); 1258 + 1247 1259 girq = &pc->gpio_chip.irq; 1248 1260 girq->chip = &bcm2835_gpio_irq_chip; 1249 1261 girq->parent_handler = bcm2835_gpio_irq_handler; ··· 1263 1251 girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS, 1264 1252 sizeof(*girq->parents), 1265 1253 GFP_KERNEL); 1266 - if (!girq->parents) 1254 + if (!girq->parents) { 1255 + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); 1267 1256 return -ENOMEM; 1257 + } 1268 1258 1269 1259 if (is_7211) { 1270 1260 pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS, ··· 1321 1307 err = gpiochip_add_data(&pc->gpio_chip, pc); 1322 1308 if (err) { 1323 1309 dev_err(dev, "could not add GPIO chip\n"); 1310 + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); 1324 1311 return err; 1325 1312 } 1326 - 1327 - pc->pctl_desc = *pdata->pctl_desc; 1328 - pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc); 1329 - if (IS_ERR(pc->pctl_dev)) { 1330 - gpiochip_remove(&pc->gpio_chip); 1331 - return PTR_ERR(pc->pctl_dev); 1332 - } 1333 - 1334 - pc->gpio_range = *pdata->gpio_range; 1335 - pc->gpio_range.base = pc->gpio_chip.base; 1336 - pc->gpio_range.gc = &pc->gpio_chip; 1337 - pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range); 1338 1313 1339 1314 return 0; 1340 1315 }
+6 -2
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
··· 285 285 desc = (const struct mtk_pin_desc *)hw->soc->pins; 286 286 *gpio_chip = &hw->chip; 287 287 288 - /* Be greedy to guess first gpio_n is equal to eint_n */ 289 - if (desc[eint_n].eint.eint_n == eint_n) 288 + /* 289 + * Be greedy to guess first gpio_n is equal to eint_n. 290 + * Only eint virtual eint number is greater than gpio number. 291 + */ 292 + if (hw->soc->npins > eint_n && 293 + desc[eint_n].eint.eint_n == eint_n) 290 294 *gpio_n = eint_n; 291 295 else 292 296 *gpio_n = mtk_xt_find_eint_num(hw, eint_n);
+4 -4
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 1251 1251 bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK; 1252 1252 bank->gpio_chip.base = args.args[1]; 1253 1253 1254 - npins = args.args[2]; 1255 - while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 1256 - ++i, &args)) 1257 - npins += args.args[2]; 1254 + /* get the last defined gpio line (offset + nb of pins) */ 1255 + npins = args.args[0] + args.args[2]; 1256 + while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args)) 1257 + npins = max(npins, (int)(args.args[0] + args.args[2])); 1258 1258 } else { 1259 1259 bank_nr = pctl->nbanks; 1260 1260 bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+2 -2
drivers/platform/mellanox/mlxbf-pmc.c
··· 1374 1374 pmc->block[i].counters = info[2]; 1375 1375 pmc->block[i].type = info[3]; 1376 1376 1377 - if (IS_ERR(pmc->block[i].mmio_base)) 1378 - return PTR_ERR(pmc->block[i].mmio_base); 1377 + if (!pmc->block[i].mmio_base) 1378 + return -ENOMEM; 1379 1379 1380 1380 ret = mlxbf_pmc_create_groups(dev, i); 1381 1381 if (ret)
+1 -1
drivers/platform/x86/Makefile
··· 68 68 obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o 69 69 70 70 # Intel 71 - obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL) += intel/ 71 + obj-y += intel/ 72 72 73 73 # MSI 74 74 obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+2 -1
drivers/platform/x86/amd-pmc.c
··· 508 508 } 509 509 510 510 static const struct dev_pm_ops amd_pmc_pm_ops = { 511 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(amd_pmc_suspend, amd_pmc_resume) 511 + .suspend_noirq = amd_pmc_suspend, 512 + .resume_noirq = amd_pmc_resume, 512 513 }; 513 514 514 515 static const struct pci_device_id pmc_pci_ids[] = {
+1 -1
drivers/platform/x86/apple-gmux.c
··· 625 625 } 626 626 627 627 gmux_data->iostart = res->start; 628 - gmux_data->iolen = res->end - res->start; 628 + gmux_data->iolen = resource_size(res); 629 629 630 630 if (gmux_data->iolen < GMUX_MIN_IO_LEN) { 631 631 pr_err("gmux I/O region too small (%lu < %u)\n",
-15
drivers/platform/x86/intel/Kconfig
··· 3 3 # Intel x86 Platform Specific Drivers 4 4 # 5 5 6 - menuconfig X86_PLATFORM_DRIVERS_INTEL 7 - bool "Intel x86 Platform Specific Device Drivers" 8 - default y 9 - help 10 - Say Y here to get to see options for device drivers for 11 - various Intel x86 platforms, including vendor-specific 12 - drivers. This option alone does not add any kernel code. 13 - 14 - If you say N, all options in this submenu will be skipped 15 - and disabled. 16 - 17 - if X86_PLATFORM_DRIVERS_INTEL 18 - 19 6 source "drivers/platform/x86/intel/atomisp2/Kconfig" 20 7 source "drivers/platform/x86/intel/int1092/Kconfig" 21 8 source "drivers/platform/x86/intel/int33fe/Kconfig" ··· 170 183 171 184 To compile this driver as a module, choose M here: the module 172 185 will be called intel-uncore-frequency. 173 - 174 - endif # X86_PLATFORM_DRIVERS_INTEL
+1 -1
drivers/platform/x86/intel/pmc/pltdrv.c
··· 65 65 66 66 retval = platform_device_register(pmc_core_device); 67 67 if (retval) 68 - kfree(pmc_core_device); 68 + platform_device_put(pmc_core_device); 69 69 70 70 return retval; 71 71 }
+30 -28
drivers/platform/x86/system76_acpi.c
··· 35 35 union acpi_object *nfan; 36 36 union acpi_object *ntmp; 37 37 struct input_dev *input; 38 + bool has_open_ec; 38 39 }; 39 40 40 41 static const struct acpi_device_id device_ids[] = { ··· 280 279 281 280 static void system76_battery_init(void) 282 281 { 283 - acpi_handle handle; 284 - 285 - handle = ec_get_handle(); 286 - if (handle && acpi_has_method(handle, "GBCT")) 287 - battery_hook_register(&system76_battery_hook); 282 + battery_hook_register(&system76_battery_hook); 288 283 } 289 284 290 285 static void system76_battery_exit(void) 291 286 { 292 - acpi_handle handle; 293 - 294 - handle = ec_get_handle(); 295 - if (handle && acpi_has_method(handle, "GBCT")) 296 - battery_hook_unregister(&system76_battery_hook); 287 + battery_hook_unregister(&system76_battery_hook); 297 288 } 298 289 299 290 // Get the airplane mode LED brightness ··· 666 673 acpi_dev->driver_data = data; 667 674 data->acpi_dev = acpi_dev; 668 675 676 + // Some models do not run open EC firmware. Check for an ACPI method 677 + // that only exists on open EC to guard functionality specific to it. 678 + data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN"); 679 + 669 680 err = system76_get(data, "INIT"); 670 681 if (err) 671 682 return err; ··· 715 718 if (err) 716 719 goto error; 717 720 718 - err = system76_get_object(data, "NFAN", &data->nfan); 719 - if (err) 720 - goto error; 721 + if (data->has_open_ec) { 722 + err = system76_get_object(data, "NFAN", &data->nfan); 723 + if (err) 724 + goto error; 721 725 722 - err = system76_get_object(data, "NTMP", &data->ntmp); 723 - if (err) 724 - goto error; 726 + err = system76_get_object(data, "NTMP", &data->ntmp); 727 + if (err) 728 + goto error; 725 729 726 - data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev, 727 - "system76_acpi", data, &thermal_chip_info, NULL); 728 - err = PTR_ERR_OR_ZERO(data->therm); 729 - if (err) 730 - goto error; 730 + data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev, 731 + "system76_acpi", data, &thermal_chip_info, NULL); 732 + err = PTR_ERR_OR_ZERO(data->therm); 733 + if (err) 734 + goto error; 731 735 732 - system76_battery_init(); 736 + system76_battery_init(); 737 + } 733 738 734 739 return 0; 735 740 736 741 error: 737 - kfree(data->ntmp); 738 - kfree(data->nfan); 742 + if (data->has_open_ec) { 743 + kfree(data->ntmp); 744 + kfree(data->nfan); 745 + } 739 746 return err; 740 747 } 741 748 ··· 750 749 751 750 data = acpi_driver_data(acpi_dev); 752 751 753 - system76_battery_exit(); 752 + if (data->has_open_ec) { 753 + system76_battery_exit(); 754 + kfree(data->nfan); 755 + kfree(data->ntmp); 756 + } 754 757 755 758 devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led); 756 759 devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led); 757 - 758 - kfree(data->nfan); 759 - kfree(data->ntmp); 760 760 761 761 system76_get(data, "FINI"); 762 762
+21 -17
drivers/scsi/pm8001/pm80xx_hwi.c
··· 3053 3053 struct smp_completion_resp *psmpPayload; 3054 3054 struct task_status_struct *ts; 3055 3055 struct pm8001_device *pm8001_dev; 3056 - char *pdma_respaddr = NULL; 3057 3056 3058 3057 psmpPayload = (struct smp_completion_resp *)(piomb + 4); 3059 3058 status = le32_to_cpu(psmpPayload->status); ··· 3079 3080 if (pm8001_dev) 3080 3081 atomic_dec(&pm8001_dev->running_req); 3081 3082 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { 3083 + struct scatterlist *sg_resp = &t->smp_task.smp_resp; 3084 + u8 *payload; 3085 + void *to; 3086 + 3082 3087 pm8001_dbg(pm8001_ha, IO, 3083 3088 "DIRECT RESPONSE Length:%d\n", 3084 3089 param); 3085 - pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 3086 - ((u64)sg_dma_address 3087 - (&t->smp_task.smp_resp)))); 3090 + to = kmap_atomic(sg_page(sg_resp)); 3091 + payload = to + sg_resp->offset; 3088 3092 for (i = 0; i < param; i++) { 3089 - *(pdma_respaddr+i) = psmpPayload->_r_a[i]; 3093 + *(payload + i) = psmpPayload->_r_a[i]; 3090 3094 pm8001_dbg(pm8001_ha, IO, 3091 3095 "SMP Byte%d DMA data 0x%x psmp 0x%x\n", 3092 - i, *(pdma_respaddr + i), 3096 + i, *(payload + i), 3093 3097 psmpPayload->_r_a[i]); 3094 3098 } 3099 + kunmap_atomic(to); 3095 3100 } 3096 3101 break; 3097 3102 case IO_ABORTED: ··· 4239 4236 struct sas_task *task = ccb->task; 4240 4237 struct domain_device *dev = task->dev; 4241 4238 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4242 - struct scatterlist *sg_req, *sg_resp; 4239 + struct scatterlist *sg_req, *sg_resp, *smp_req; 4243 4240 u32 req_len, resp_len; 4244 4241 struct smp_req smp_cmd; 4245 4242 u32 opc; 4246 4243 struct inbound_queue_table *circularQ; 4247 - char *preq_dma_addr = NULL; 4248 - __le64 tmp_addr; 4249 4244 u32 i, length; 4245 + u8 *payload; 4246 + u8 *to; 4250 4247 4251 4248 memset(&smp_cmd, 0, sizeof(smp_cmd)); 4252 4249 /* ··· 4283 4280 pm8001_ha->smp_exp_mode = SMP_INDIRECT; 4284 4281 4285 4282 4286 - tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); 4287 - preq_dma_addr = (char *)phys_to_virt(tmp_addr); 4283 + smp_req = &task->smp_task.smp_req; 4284 + to = kmap_atomic(sg_page(smp_req)); 4285 + payload = to + smp_req->offset; 4288 4286 4289 4287 /* INDIRECT MODE command settings. Use DMA */ 4290 4288 if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { ··· 4293 4289 /* for SPCv indirect mode. Place the top 4 bytes of 4294 4290 * SMP Request header here. */ 4295 4291 for (i = 0; i < 4; i++) 4296 - smp_cmd.smp_req16[i] = *(preq_dma_addr + i); 4292 + smp_cmd.smp_req16[i] = *(payload + i); 4297 4293 /* exclude top 4 bytes for SMP req header */ 4298 4294 smp_cmd.long_smp_req.long_req_addr = 4299 4295 cpu_to_le64((u64)sg_dma_address ··· 4324 4320 pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n"); 4325 4321 for (i = 0; i < length; i++) 4326 4322 if (i < 16) { 4327 - smp_cmd.smp_req16[i] = *(preq_dma_addr+i); 4323 + smp_cmd.smp_req16[i] = *(payload + i); 4328 4324 pm8001_dbg(pm8001_ha, IO, 4329 4325 "Byte[%d]:%x (DMA data:%x)\n", 4330 4326 i, smp_cmd.smp_req16[i], 4331 - *(preq_dma_addr)); 4327 + *(payload)); 4332 4328 } else { 4333 - smp_cmd.smp_req[i] = *(preq_dma_addr+i); 4329 + smp_cmd.smp_req[i] = *(payload + i); 4334 4330 pm8001_dbg(pm8001_ha, IO, 4335 4331 "Byte[%d]:%x (DMA data:%x)\n", 4336 4332 i, smp_cmd.smp_req[i], 4337 - *(preq_dma_addr)); 4333 + *(payload)); 4338 4334 } 4339 4335 } 4340 - 4336 + kunmap_atomic(to); 4341 4337 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, 4342 4338 &smp_cmd, pm8001_ha->smp_exp_mode, length); 4343 4339 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
+1 -1
drivers/spi/spi-armada-3700.c
··· 901 901 return 0; 902 902 903 903 error_clk: 904 - clk_disable_unprepare(spi->clk); 904 + clk_unprepare(spi->clk); 905 905 error: 906 906 spi_master_put(master); 907 907 out:
+2 -4
drivers/tee/optee/core.c
··· 48 48 goto err; 49 49 } 50 50 51 - for (i = 0; i < nr_pages; i++) { 52 - pages[i] = page; 53 - page++; 54 - } 51 + for (i = 0; i < nr_pages; i++) 52 + pages[i] = page + i; 55 53 56 54 shm->flags |= TEE_SHM_REGISTER; 57 55 rc = shm_register(shm->ctx, shm, pages, nr_pages,
+2
drivers/tee/optee/smc_abi.c
··· 23 23 #include "optee_private.h" 24 24 #include "optee_smc.h" 25 25 #include "optee_rpc_cmd.h" 26 + #include <linux/kmemleak.h> 26 27 #define CREATE_TRACE_POINTS 27 28 #include "optee_trace.h" 28 29 ··· 784 783 param->a4 = 0; 785 784 param->a5 = 0; 786 785 } 786 + kmemleak_not_leak(shm); 787 787 break; 788 788 case OPTEE_SMC_RPC_FUNC_FREE: 789 789 shm = reg_pair_to_ptr(param->a1, param->a2);
+66 -108
drivers/tee/tee_shm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2015-2016, Linaro Limited 3 + * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 4 */ 5 + #include <linux/anon_inodes.h> 5 6 #include <linux/device.h> 6 - #include <linux/dma-buf.h> 7 - #include <linux/fdtable.h> 8 7 #include <linux/idr.h> 8 + #include <linux/mm.h> 9 9 #include <linux/sched.h> 10 10 #include <linux/slab.h> 11 11 #include <linux/tee_drv.h> 12 12 #include <linux/uio.h> 13 - #include <linux/module.h> 14 13 #include "tee_private.h" 15 - 16 - MODULE_IMPORT_NS(DMA_BUF); 17 14 18 15 static void release_registered_pages(struct tee_shm *shm) 19 16 { ··· 28 31 } 29 32 } 30 33 31 - static void tee_shm_release(struct tee_shm *shm) 34 + static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 32 35 { 33 - struct tee_device *teedev = shm->ctx->teedev; 34 - 35 - if (shm->flags & TEE_SHM_DMA_BUF) { 36 - mutex_lock(&teedev->mutex); 37 - idr_remove(&teedev->idr, shm->id); 38 - mutex_unlock(&teedev->mutex); 39 - } 40 - 41 36 if (shm->flags & TEE_SHM_POOL) { 42 37 struct tee_shm_pool_mgr *poolm; 43 38 ··· 55 66 56 67 tee_device_put(teedev); 57 68 } 58 - 59 - static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment 60 - *attach, enum dma_data_direction dir) 61 - { 62 - return NULL; 63 - } 64 - 65 - static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, 66 - struct sg_table *table, 67 - enum dma_data_direction dir) 68 - { 69 - } 70 - 71 - static void tee_shm_op_release(struct dma_buf *dmabuf) 72 - { 73 - struct tee_shm *shm = dmabuf->priv; 74 - 75 - tee_shm_release(shm); 76 - } 77 - 78 - static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 79 - { 80 - struct tee_shm *shm = dmabuf->priv; 81 - size_t size = vma->vm_end - vma->vm_start; 82 - 83 - /* Refuse sharing shared memory provided by application */ 84 - if (shm->flags & TEE_SHM_USER_MAPPED) 85 - return -EINVAL; 86 - 87 - return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 88 - size, vma->vm_page_prot); 89 - } 90 - 91 - static const struct dma_buf_ops tee_shm_dma_buf_ops = { 92 - .map_dma_buf = tee_shm_op_map_dma_buf, 93 - .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 94 - .release = tee_shm_op_release, 95 - .mmap = tee_shm_op_mmap, 96 - }; 97 69 98 70 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 99 71 { ··· 90 140 goto err_dev_put; 91 141 } 92 142 143 + refcount_set(&shm->refcount, 1); 93 144 shm->flags = flags | TEE_SHM_POOL; 94 145 shm->ctx = ctx; 95 146 if (flags & TEE_SHM_DMA_BUF) ··· 104 153 goto err_kfree; 105 154 } 106 155 107 - 108 156 if (flags & TEE_SHM_DMA_BUF) { 109 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 110 - 111 157 mutex_lock(&teedev->mutex); 112 158 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 113 159 mutex_unlock(&teedev->mutex); ··· 112 164 ret = ERR_PTR(shm->id); 113 165 goto err_pool_free; 114 166 } 115 - 116 - exp_info.ops = &tee_shm_dma_buf_ops; 117 - exp_info.size = shm->size; 118 - exp_info.flags = O_RDWR; 119 - exp_info.priv = shm; 120 - 121 - shm->dmabuf = dma_buf_export(&exp_info); 122 - if (IS_ERR(shm->dmabuf)) { 123 - ret = ERR_CAST(shm->dmabuf); 124 - goto err_rem; 125 - } 126 167 } 127 168 128 169 teedev_ctx_get(ctx); 129 170 130 171 return shm; 131 - err_rem: 132 - if (flags & TEE_SHM_DMA_BUF) { 133 - mutex_lock(&teedev->mutex); 134 - idr_remove(&teedev->idr, shm->id); 135 - mutex_unlock(&teedev->mutex); 136 - } 137 172 err_pool_free: 138 173 poolm->ops->free(poolm, shm); 139 174 err_kfree: ··· 177 246 goto err; 178 247 } 179 248 249 + refcount_set(&shm->refcount, 1); 180 250 shm->flags = flags | TEE_SHM_REGISTER; 181 251 shm->ctx = ctx; 182 252 shm->id = -1; ··· 238 306 goto err; 239 307 } 240 308 241 - if (flags & TEE_SHM_DMA_BUF) { 242 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 243 - 244 - exp_info.ops = &tee_shm_dma_buf_ops; 245 - exp_info.size = shm->size; 246 - exp_info.flags = O_RDWR; 247 - exp_info.priv = shm; 248 - 249 - shm->dmabuf = dma_buf_export(&exp_info); 250 - if (IS_ERR(shm->dmabuf)) { 251 - ret = ERR_CAST(shm->dmabuf); 252 - teedev->desc->ops->shm_unregister(ctx, shm); 253 - goto err; 254 - } 255 - } 256 - 257 309 return shm; 258 310 err: 259 311 if (shm) { ··· 255 339 } 256 340 EXPORT_SYMBOL_GPL(tee_shm_register); 257 341 342 + static int tee_shm_fop_release(struct inode *inode, struct file *filp) 343 + { 344 + tee_shm_put(filp->private_data); 345 + return 0; 346 + } 347 + 348 + static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 349 + { 350 + struct tee_shm *shm = filp->private_data; 351 + size_t size = vma->vm_end - vma->vm_start; 352 + 353 + /* Refuse sharing shared memory provided by application */ 354 + if (shm->flags & TEE_SHM_USER_MAPPED) 355 + return -EINVAL; 356 + 357 + /* check for overflowing the buffer's size */ 358 + if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 359 + return -EINVAL; 360 + 361 + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 362 + size, vma->vm_page_prot); 363 + } 364 + 365 + static const struct file_operations tee_shm_fops = { 366 + .owner = THIS_MODULE, 367 + .release = tee_shm_fop_release, 368 + .mmap = tee_shm_fop_mmap, 369 + }; 370 + 258 371 /** 259 372 * tee_shm_get_fd() - Increase reference count and return file descriptor 260 373 * @shm: Shared memory handle ··· 296 351 if (!(shm->flags & TEE_SHM_DMA_BUF)) 297 352 return -EINVAL; 298 353 299 - get_dma_buf(shm->dmabuf); 300 - fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 354 + /* matched by tee_shm_put() in tee_shm_op_release() */ 355 + refcount_inc(&shm->refcount); 356 + fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 301 357 if (fd < 0) 302 - dma_buf_put(shm->dmabuf); 358 + tee_shm_put(shm); 303 359 return fd; 304 360 } 305 361 ··· 310 364 */ 311 365 void tee_shm_free(struct tee_shm *shm) 312 366 { 313 - /* 314 - * dma_buf_put() decreases the dmabuf reference counter and will 315 - * call tee_shm_release() when the last reference is gone. 316 - * 317 - * In the case of driver private memory we call tee_shm_release 318 - * directly instead as it doesn't have a reference counter. 319 - */ 320 - if (shm->flags & TEE_SHM_DMA_BUF) 321 - dma_buf_put(shm->dmabuf); 322 - else 323 - tee_shm_release(shm); 367 + tee_shm_put(shm); 324 368 } 325 369 EXPORT_SYMBOL_GPL(tee_shm_free); 326 370 ··· 417 481 teedev = ctx->teedev; 418 482 mutex_lock(&teedev->mutex); 419 483 shm = idr_find(&teedev->idr, id); 484 + /* 485 + * If the tee_shm was found in the IDR it must have a refcount 486 + * larger than 0 due to the guarantee in tee_shm_put() below. So 487 + * it's safe to use refcount_inc(). 488 + */ 420 489 if (!shm || shm->ctx != ctx) 421 490 shm = ERR_PTR(-EINVAL); 422 - else if (shm->flags & TEE_SHM_DMA_BUF) 423 - get_dma_buf(shm->dmabuf); 491 + else 492 + refcount_inc(&shm->refcount); 424 493 mutex_unlock(&teedev->mutex); 425 494 return shm; 426 495 } ··· 437 496 */ 438 497 void tee_shm_put(struct tee_shm *shm) 439 498 { 440 - if (shm->flags & TEE_SHM_DMA_BUF) 441 - dma_buf_put(shm->dmabuf); 499 + struct tee_device *teedev = shm->ctx->teedev; 500 + bool do_release = false; 501 + 502 + mutex_lock(&teedev->mutex); 503 + if (refcount_dec_and_test(&shm->refcount)) { 504 + /* 505 + * refcount has reached 0, we must now remove it from the 506 + * IDR before releasing the mutex. This will guarantee that 507 + * the refcount_inc() in tee_shm_get_from_id() never starts 508 + * from 0. 509 + */ 510 + if (shm->flags & TEE_SHM_DMA_BUF) 511 + idr_remove(&teedev->idr, shm->id); 512 + do_release = true; 513 + } 514 + mutex_unlock(&teedev->mutex); 515 + 516 + if (do_release) 517 + tee_shm_release(teedev, shm); 442 518 } 443 519 EXPORT_SYMBOL_GPL(tee_shm_put);
+27 -3
drivers/tty/hvc/hvc_xen.c
··· 37 37 struct xenbus_device *xbdev; 38 38 struct xencons_interface *intf; 39 39 unsigned int evtchn; 40 + XENCONS_RING_IDX out_cons; 41 + unsigned int out_cons_same; 40 42 struct hvc_struct *hvc; 41 43 int irq; 42 44 int vtermno; ··· 140 138 XENCONS_RING_IDX cons, prod; 141 139 int recv = 0; 142 140 struct xencons_info *xencons = vtermno_to_xencons(vtermno); 141 + unsigned int eoiflag = 0; 142 + 143 143 if (xencons == NULL) 144 144 return -EINVAL; 145 145 intf = xencons->intf; ··· 161 157 mb(); /* read ring before consuming */ 162 158 intf->in_cons = cons; 163 159 164 - notify_daemon(xencons); 160 + /* 161 + * When to mark interrupt having been spurious: 162 + * - there was no new data to be read, and 163 + * - the backend did not consume some output bytes, and 164 + * - the previous round with no read data didn't see consumed bytes 165 + * (we might have a race with an interrupt being in flight while 166 + * updating xencons->out_cons, so account for that by allowing one 167 + * round without any visible reason) 168 + */ 169 + if (intf->out_cons != xencons->out_cons) { 170 + xencons->out_cons = intf->out_cons; 171 + xencons->out_cons_same = 0; 172 + } 173 + if (recv) { 174 + notify_daemon(xencons); 175 + } else if (xencons->out_cons_same++ > 1) { 176 + eoiflag = XEN_EOI_FLAG_SPURIOUS; 177 + } 178 + 179 + xen_irq_lateeoi(xencons->irq, eoiflag); 180 + 165 181 return recv; 166 182 } 167 183 ··· 410 386 if (ret) 411 387 return ret; 412 388 info->evtchn = evtchn; 413 - irq = bind_evtchn_to_irq(evtchn); 389 + irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn); 414 390 if (irq < 0) 415 391 return irq; 416 392 info->irq = irq; ··· 575 551 return r; 576 552 577 553 info = vtermno_to_xencons(HVC_COOKIE); 578 - info->irq = bind_evtchn_to_irq(info->evtchn); 554 + info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn); 579 555 } 580 556 if (info->irq < 0) 581 557 info->irq = 0; /* NO_IRQ */
+22 -1
drivers/tty/n_hdlc.c
··· 140 140 struct n_hdlc_buf_list rx_buf_list; 141 141 struct n_hdlc_buf_list tx_free_buf_list; 142 142 struct n_hdlc_buf_list rx_free_buf_list; 143 + struct work_struct write_work; 144 + struct tty_struct *tty_for_write_work; 143 145 }; 144 146 145 147 /* ··· 156 154 /* Local functions */ 157 155 158 156 static struct n_hdlc *n_hdlc_alloc(void); 157 + static void n_hdlc_tty_write_work(struct work_struct *work); 159 158 160 159 /* max frame size for memory allocations */ 161 160 static int maxframe = 4096; ··· 213 210 wake_up_interruptible(&tty->read_wait); 214 211 wake_up_interruptible(&tty->write_wait); 215 212 213 + cancel_work_sync(&n_hdlc->write_work); 214 + 216 215 n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list); 217 216 n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list); 218 217 n_hdlc_free_buf_list(&n_hdlc->rx_buf_list); ··· 246 241 return -ENFILE; 247 242 } 248 243 244 + INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work); 245 + n_hdlc->tty_for_write_work = tty; 249 246 tty->disc_data = n_hdlc; 250 247 tty->receive_room = 65536; 251 248 ··· 342 335 } /* end of n_hdlc_send_frames() */ 343 336 344 337 /** 338 + * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup 339 + * @work: pointer to work_struct 340 + * 341 + * Called when low level device driver can accept more send data. 342 + */ 343 + static void n_hdlc_tty_write_work(struct work_struct *work) 344 + { 345 + struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work); 346 + struct tty_struct *tty = n_hdlc->tty_for_write_work; 347 + 348 + n_hdlc_send_frames(n_hdlc, tty); 349 + } /* end of n_hdlc_tty_write_work() */ 350 + 351 + /** 345 352 * n_hdlc_tty_wakeup - Callback for transmit wakeup 346 353 * @tty: pointer to associated tty instance data 347 354 * ··· 365 344 { 366 345 struct n_hdlc *n_hdlc = tty->disc_data; 367 346 368 - n_hdlc_send_frames(n_hdlc, tty); 347 + schedule_work(&n_hdlc->write_work); 369 348 } /* end of n_hdlc_tty_wakeup() */ 370 349 371 350 /**
-20
drivers/tty/serial/8250/8250_fintek.c
··· 290 290 } 291 291 } 292 292 293 - static void fintek_8250_goto_highspeed(struct uart_8250_port *uart, 294 - struct fintek_8250 *pdata) 295 - { 296 - sio_write_reg(pdata, LDN, pdata->index); 297 - 298 - switch (pdata->pid) { 299 - case CHIP_ID_F81966: 300 - case CHIP_ID_F81866: /* set uart clock for high speed serial mode */ 301 - sio_write_mask_reg(pdata, F81866_UART_CLK, 302 - F81866_UART_CLK_MASK, 303 - F81866_UART_CLK_14_769MHZ); 304 - 305 - uart->port.uartclk = 921600 * 16; 306 - break; 307 - default: /* leave clock speed untouched */ 308 - break; 309 - } 310 - } 311 - 312 293 static void fintek_8250_set_termios(struct uart_port *port, 313 294 struct ktermios *termios, 314 295 struct ktermios *old) ··· 411 430 412 431 fintek_8250_set_irq_mode(pdata, level_mode); 413 432 fintek_8250_set_max_fifo(pdata); 414 - fintek_8250_goto_highspeed(uart, pdata); 415 433 416 434 fintek_8250_exit_key(addr[i]); 417 435
+12
drivers/usb/cdns3/cdnsp-gadget.c
··· 1541 1541 { 1542 1542 struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); 1543 1543 struct cdns *cdns = dev_get_drvdata(pdev->dev); 1544 + unsigned long flags; 1544 1545 1545 1546 trace_cdnsp_pullup(is_on); 1547 + 1548 + /* 1549 + * Disable events handling while controller is being 1550 + * enabled/disabled. 1551 + */ 1552 + disable_irq(cdns->dev_irq); 1553 + spin_lock_irqsave(&pdev->lock, flags); 1546 1554 1547 1555 if (!is_on) { 1548 1556 cdnsp_reset_device(pdev); ··· 1558 1550 } else { 1559 1551 cdns_set_vbus(cdns); 1560 1552 } 1553 + 1554 + spin_unlock_irqrestore(&pdev->lock, flags); 1555 + enable_irq(cdns->dev_irq); 1556 + 1561 1557 return 0; 1562 1558 } 1563 1559
+10 -1
drivers/usb/cdns3/cdnsp-ring.c
··· 1029 1029 return; 1030 1030 } 1031 1031 1032 + *status = 0; 1033 + 1032 1034 cdnsp_finish_td(pdev, td, event, pep, status); 1033 1035 } 1034 1036 ··· 1525 1523 spin_lock_irqsave(&pdev->lock, flags); 1526 1524 1527 1525 if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) { 1528 - cdnsp_died(pdev); 1526 + /* 1527 + * While removing or stopping driver there may still be deferred 1528 + * not handled interrupt which should not be treated as error. 1529 + * Driver should simply ignore it. 1530 + */ 1531 + if (pdev->gadget_driver) 1532 + cdnsp_died(pdev); 1533 + 1529 1534 spin_unlock_irqrestore(&pdev->lock, flags); 1530 1535 return IRQ_HANDLED; 1531 1536 }
+2 -2
drivers/usb/cdns3/cdnsp-trace.h
··· 57 57 __entry->first_prime_det = pep->stream_info.first_prime_det; 58 58 __entry->drbls_count = pep->stream_info.drbls_count; 59 59 ), 60 - TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d " 60 + TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d " 61 61 "tds %d, first prime: %d drbls %d", 62 - __get_str(name), __entry->state, __entry->stream_id, 62 + __get_str(name), __entry->stream_id, __entry->state, 63 63 __entry->enabled, __entry->num_streams, __entry->td_count, 64 64 __entry->first_prime_det, __entry->drbls_count) 65 65 );
+3
drivers/usb/core/quirks.c
··· 434 434 { USB_DEVICE(0x1532, 0x0116), .driver_info = 435 435 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 436 436 437 + /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */ 438 + { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM }, 439 + 437 440 /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */ 438 441 { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM }, 439 442
+3
drivers/usb/dwc2/platform.c
··· 575 575 ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN; 576 576 ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN; 577 577 dwc2_writel(hsotg, ggpio, GGPIO); 578 + 579 + /* ID/VBUS detection startup time */ 580 + usleep_range(5000, 7000); 578 581 } 579 582 580 583 retval = dwc2_drd_init(hsotg);
+11 -4
drivers/usb/early/xhci-dbc.c
··· 14 14 #include <linux/pci_ids.h> 15 15 #include <linux/memblock.h> 16 16 #include <linux/io.h> 17 - #include <linux/iopoll.h> 18 17 #include <asm/pci-direct.h> 19 18 #include <asm/fixmap.h> 20 19 #include <linux/bcd.h> ··· 135 136 { 136 137 u32 result; 137 138 138 - return readl_poll_timeout_atomic(ptr, result, 139 - ((result & mask) == done), 140 - delay, wait); 139 + /* Can not use readl_poll_timeout_atomic() for early boot things */ 140 + do { 141 + result = readl(ptr); 142 + result &= mask; 143 + if (result == done) 144 + return 0; 145 + udelay(delay); 146 + wait -= delay; 147 + } while (wait > 0); 148 + 149 + return -ETIMEDOUT; 141 150 } 142 151 143 152 static void __init xdbc_bios_handoff(void)
+3 -3
drivers/usb/gadget/composite.c
··· 1680 1680 u8 endp; 1681 1681 1682 1682 if (w_length > USB_COMP_EP0_BUFSIZ) { 1683 - if (ctrl->bRequestType == USB_DIR_OUT) { 1684 - goto done; 1685 - } else { 1683 + if (ctrl->bRequestType & USB_DIR_IN) { 1686 1684 /* Cast away the const, we are going to overwrite on purpose. */ 1687 1685 __le16 *temp = (__le16 *)&ctrl->wLength; 1688 1686 1689 1687 *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); 1690 1688 w_length = USB_COMP_EP0_BUFSIZ; 1689 + } else { 1690 + goto done; 1691 1691 } 1692 1692 } 1693 1693
+6 -3
drivers/usb/gadget/function/f_fs.c
··· 1773 1773 1774 1774 BUG_ON(ffs->gadget); 1775 1775 1776 - if (ffs->epfiles) 1776 + if (ffs->epfiles) { 1777 1777 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); 1778 + ffs->epfiles = NULL; 1779 + } 1778 1780 1779 - if (ffs->ffs_eventfd) 1781 + if (ffs->ffs_eventfd) { 1780 1782 eventfd_ctx_put(ffs->ffs_eventfd); 1783 + ffs->ffs_eventfd = NULL; 1784 + } 1781 1785 1782 1786 kfree(ffs->raw_descs_data); 1783 1787 kfree(ffs->raw_strings); ··· 1794 1790 1795 1791 ffs_data_clear(ffs); 1796 1792 1797 - ffs->epfiles = NULL; 1798 1793 ffs->raw_descs_data = NULL; 1799 1794 ffs->raw_descs = NULL; 1800 1795 ffs->raw_strings = NULL;
+6 -10
drivers/usb/gadget/function/u_ether.c
··· 17 17 #include <linux/etherdevice.h> 18 18 #include <linux/ethtool.h> 19 19 #include <linux/if_vlan.h> 20 + #include <linux/etherdevice.h> 20 21 21 22 #include "u_ether.h" 22 23 ··· 864 863 { 865 864 struct eth_dev *dev; 866 865 struct usb_gadget *g; 867 - struct sockaddr sa; 868 866 int status; 869 867 870 868 if (!net->dev.parent) 871 869 return -EINVAL; 872 870 dev = netdev_priv(net); 873 871 g = dev->gadget; 872 + 873 + net->addr_assign_type = NET_ADDR_RANDOM; 874 + eth_hw_addr_set(net, dev->dev_mac); 875 + 874 876 status = register_netdev(net); 875 877 if (status < 0) { 876 878 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 877 879 return status; 878 880 } else { 879 881 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 882 + INFO(dev, "MAC %pM\n", dev->dev_mac); 880 883 881 884 /* two kinds of host-initiated state changes: 882 885 * - iff DATA transfer is active, carrier is "on" ··· 888 883 */ 889 884 netif_carrier_off(net); 890 885 } 891 - sa.sa_family = net->type; 892 - memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); 893 - rtnl_lock(); 894 - status = dev_set_mac_address(net, &sa, NULL); 895 - rtnl_unlock(); 896 - if (status) 897 - pr_warn("cannot set self ethernet address: %d\n", status); 898 - else 899 - INFO(dev, "MAC %pM\n", dev->dev_mac); 900 886 901 887 return status; 902 888 }
+3 -3
drivers/usb/gadget/legacy/dbgp.c
··· 346 346 u16 len = 0; 347 347 348 348 if (length > DBGP_REQ_LEN) { 349 - if (ctrl->bRequestType == USB_DIR_OUT) { 350 - return err; 351 - } else { 349 + if (ctrl->bRequestType & USB_DIR_IN) { 352 350 /* Cast away the const, we are going to overwrite on purpose. */ 353 351 __le16 *temp = (__le16 *)&ctrl->wLength; 354 352 355 353 *temp = cpu_to_le16(DBGP_REQ_LEN); 356 354 length = DBGP_REQ_LEN; 355 + } else { 356 + return err; 357 357 } 358 358 } 359 359
+3 -3
drivers/usb/gadget/legacy/inode.c
··· 1334 1334 u16 w_length = le16_to_cpu(ctrl->wLength); 1335 1335 1336 1336 if (w_length > RBUF_SIZE) { 1337 - if (ctrl->bRequestType == USB_DIR_OUT) { 1338 - return value; 1339 - } else { 1337 + if (ctrl->bRequestType & USB_DIR_IN) { 1340 1338 /* Cast away the const, we are going to overwrite on purpose. */ 1341 1339 __le16 *temp = (__le16 *)&ctrl->wLength; 1342 1340 1343 1341 *temp = cpu_to_le16(RBUF_SIZE); 1344 1342 w_length = RBUF_SIZE; 1343 + } else { 1344 + return value; 1345 1345 } 1346 1346 } 1347 1347
+1 -1
drivers/usb/host/xhci-mtk-sch.c
··· 781 781 782 782 ret = xhci_check_bandwidth(hcd, udev); 783 783 if (!ret) 784 - INIT_LIST_HEAD(&mtk->bw_ep_chk_list); 784 + list_del_init(&mtk->bw_ep_chk_list); 785 785 786 786 return ret; 787 787 }
+9 -2
drivers/usb/host/xhci-pci.c
··· 71 71 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e 72 72 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6 73 73 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7 74 + #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c 75 + #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f 74 76 75 77 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 76 78 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 ··· 123 121 /* Look for vendor-specific quirks */ 124 122 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && 125 123 (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || 126 - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 || 127 124 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { 128 125 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && 129 126 pdev->revision == 0x0) { ··· 156 155 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && 157 156 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) 158 157 xhci->quirks |= XHCI_BROKEN_STREAMS; 158 + 159 + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && 160 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100) 161 + xhci->quirks |= XHCI_TRUST_TX_LENGTH; 159 162 160 163 if (pdev->vendor == PCI_VENDOR_ID_NEC) 161 164 xhci->quirks |= XHCI_NEC_HOST; ··· 335 330 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 || 336 331 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 || 337 332 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 || 338 - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6)) 333 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 || 334 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 || 335 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8)) 339 336 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; 340 337 341 338 if (xhci->quirks & XHCI_RESET_ON_RESUME)
+10 -2
drivers/usb/mtu3/mtu3_gadget.c
··· 77 77 if (usb_endpoint_xfer_int(desc) || 78 78 usb_endpoint_xfer_isoc(desc)) { 79 79 interval = desc->bInterval; 80 - interval = clamp_val(interval, 1, 16) - 1; 80 + interval = clamp_val(interval, 1, 16); 81 81 if (usb_endpoint_xfer_isoc(desc) && comp_desc) 82 82 mult = comp_desc->bmAttributes; 83 83 } ··· 89 89 if (usb_endpoint_xfer_isoc(desc) || 90 90 usb_endpoint_xfer_int(desc)) { 91 91 interval = desc->bInterval; 92 - interval = clamp_val(interval, 1, 16) - 1; 92 + interval = clamp_val(interval, 1, 16); 93 93 mult = usb_endpoint_maxp_mult(desc) - 1; 94 94 } 95 + break; 96 + case USB_SPEED_FULL: 97 + if (usb_endpoint_xfer_isoc(desc)) 98 + interval = clamp_val(desc->bInterval, 1, 16); 99 + else if (usb_endpoint_xfer_int(desc)) 100 + interval = clamp_val(desc->bInterval, 1, 255); 101 + 95 102 break; 96 103 default: 97 104 break; /*others are ignored */ ··· 242 235 mreq->request.dma = DMA_ADDR_INVALID; 243 236 mreq->epnum = mep->epnum; 244 237 mreq->mep = mep; 238 + INIT_LIST_HEAD(&mreq->list); 245 239 trace_mtu3_alloc_request(mreq); 246 240 247 241 return &mreq->request;
+6 -1
drivers/usb/mtu3/mtu3_qmu.c
··· 273 273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); 274 274 } 275 275 276 + /* prevent reorder, make sure GPD's HWO is set last */ 277 + mb(); 276 278 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 277 279 278 280 mreq->gpd = gpd; ··· 308 306 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 309 307 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); 310 308 gpd->dw3_info = cpu_to_le32(ext_addr); 309 + /* prevent reorder, make sure GPD's HWO is set last */ 310 + mb(); 311 311 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 312 312 313 313 mreq->gpd = gpd; ··· 449 445 return; 450 446 } 451 447 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); 452 - 448 + /* prevent reorder, make sure GPD's HWO is set last */ 449 + mb(); 453 450 /* by pass the current GDP */ 454 451 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); 455 452
+4 -2
drivers/usb/serial/cp210x.c
··· 1635 1635 1636 1636 /* 2 banks of GPIO - One for the pins taken from each serial port */ 1637 1637 if (intf_num == 0) { 1638 + priv->gc.ngpio = 2; 1639 + 1638 1640 if (mode.eci == CP210X_PIN_MODE_MODEM) { 1639 1641 /* mark all GPIOs of this interface as reserved */ 1640 1642 priv->gpio_altfunc = 0xff; ··· 1647 1645 priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) & 1648 1646 CP210X_ECI_GPIO_MODE_MASK) >> 1649 1647 CP210X_ECI_GPIO_MODE_OFFSET); 1650 - priv->gc.ngpio = 2; 1651 1648 } else if (intf_num == 1) { 1649 + priv->gc.ngpio = 3; 1650 + 1652 1651 if (mode.sci == CP210X_PIN_MODE_MODEM) { 1653 1652 /* mark all GPIOs of this interface as reserved */ 1654 1653 priv->gpio_altfunc = 0xff; ··· 1660 1657 priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) & 1661 1658 CP210X_SCI_GPIO_MODE_MASK) >> 1662 1659 CP210X_SCI_GPIO_MODE_OFFSET); 1663 - priv->gc.ngpio = 3; 1664 1660 } else { 1665 1661 return -ENODEV; 1666 1662 }
+8
drivers/usb/serial/option.c
··· 1219 1219 .driver_info = NCTRL(2) | RSVD(3) }, 1220 1220 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */ 1221 1221 .driver_info = NCTRL(0) | RSVD(1) }, 1222 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */ 1223 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, 1224 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */ 1225 + .driver_info = NCTRL(0) | RSVD(1) }, 1226 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */ 1227 + .driver_info = NCTRL(2) | RSVD(3) }, 1228 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ 1229 + .driver_info = NCTRL(0) | RSVD(1) }, 1222 1230 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1223 1231 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, 1224 1232 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+13 -5
drivers/usb/typec/tcpm/tcpm.c
··· 324 324 325 325 bool attached; 326 326 bool connected; 327 + bool registered; 327 328 bool pd_supported; 328 329 enum typec_port_type port_type; 329 330 ··· 6292 6291 { 6293 6292 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer); 6294 6293 6295 - kthread_queue_work(port->wq, &port->state_machine); 6294 + if (port->registered) 6295 + kthread_queue_work(port->wq, &port->state_machine); 6296 6296 return HRTIMER_NORESTART; 6297 6297 } 6298 6298 ··· 6301 6299 { 6302 6300 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer); 6303 6301 6304 - kthread_queue_work(port->wq, &port->vdm_state_machine); 6302 + if (port->registered) 6303 + kthread_queue_work(port->wq, &port->vdm_state_machine); 6305 6304 return HRTIMER_NORESTART; 6306 6305 } 6307 6306 ··· 6310 6307 { 6311 6308 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer); 6312 6309 6313 - kthread_queue_work(port->wq, &port->enable_frs); 6310 + if (port->registered) 6311 + kthread_queue_work(port->wq, &port->enable_frs); 6314 6312 return HRTIMER_NORESTART; 6315 6313 } 6316 6314 ··· 6319 6315 { 6320 6316 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer); 6321 6317 6322 - kthread_queue_work(port->wq, &port->send_discover_work); 6318 + if (port->registered) 6319 + kthread_queue_work(port->wq, &port->send_discover_work); 6323 6320 return HRTIMER_NORESTART; 6324 6321 } 6325 6322 ··· 6408 6403 typec_port_register_altmodes(port->typec_port, 6409 6404 &tcpm_altmode_ops, port, 6410 6405 port->port_altmode, ALTMODE_DISCOVERY_MAX); 6406 + port->registered = true; 6411 6407 6412 6408 mutex_lock(&port->lock); 6413 6409 tcpm_init(port); ··· 6430 6424 { 6431 6425 int i; 6432 6426 6427 + port->registered = false; 6428 + kthread_destroy_worker(port->wq); 6429 + 6433 6430 hrtimer_cancel(&port->send_discover_timer); 6434 6431 hrtimer_cancel(&port->enable_frs_timer); 6435 6432 hrtimer_cancel(&port->vdm_state_machine_timer); ··· 6444 6435 typec_unregister_port(port->typec_port); 6445 6436 usb_role_switch_put(port->role_sw); 6446 6437 tcpm_debugfs_exit(port); 6447 - kthread_destroy_worker(port->wq); 6448 6438 } 6449 6439 EXPORT_SYMBOL_GPL(tcpm_unregister_port); 6450 6440
+3 -1
drivers/usb/typec/ucsi/ucsi.c
··· 1150 1150 ret = 0; 1151 1151 } 1152 1152 1153 - if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == UCSI_CONSTAT_PWR_OPMODE_PD) { 1153 + if (con->partner && 1154 + UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == 1155 + UCSI_CONSTAT_PWR_OPMODE_PD) { 1154 1156 ucsi_get_src_pdos(con); 1155 1157 ucsi_check_altmodes(con); 1156 1158 }
+3 -2
drivers/virt/nitro_enclaves/ne_misc_dev.c
··· 886 886 goto put_pages; 887 887 } 888 888 889 - gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET, 890 - ne_mem_region->pages + i, NULL); 889 + gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1, 890 + ne_mem_region->pages + i, FOLL_GET); 891 + 891 892 if (gup_rc < 0) { 892 893 rc = gup_rc; 893 894
+6
drivers/xen/events/events_base.c
··· 1251 1251 } 1252 1252 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 1253 1253 1254 + int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) 1255 + { 1256 + return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL); 1257 + } 1258 + EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); 1259 + 1254 1260 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 1255 1261 { 1256 1262 struct evtchn_bind_ipi bind_ipi;
+9 -8
fs/btrfs/ctree.c
··· 463 463 BUG_ON(ret < 0); 464 464 rcu_assign_pointer(root->node, cow); 465 465 466 - btrfs_free_tree_block(trans, root, buf, parent_start, 467 - last_ref); 466 + btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 467 + parent_start, last_ref); 468 468 free_extent_buffer(buf); 469 469 add_root_to_dirty_list(root); 470 470 } else { ··· 485 485 return ret; 486 486 } 487 487 } 488 - btrfs_free_tree_block(trans, root, buf, parent_start, 489 - last_ref); 488 + btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 489 + parent_start, last_ref); 490 490 } 491 491 if (unlock_orig) 492 492 btrfs_tree_unlock(buf); ··· 927 927 free_extent_buffer(mid); 928 928 929 929 root_sub_used(root, mid->len); 930 - btrfs_free_tree_block(trans, root, mid, 0, 1); 930 + btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 931 931 /* once for the root ptr */ 932 932 free_extent_buffer_stale(mid); 933 933 return 0; ··· 986 986 btrfs_tree_unlock(right); 987 987 del_ptr(root, path, level + 1, pslot + 1); 988 988 root_sub_used(root, right->len); 989 - btrfs_free_tree_block(trans, root, right, 0, 1); 989 + btrfs_free_tree_block(trans, btrfs_root_id(root), right, 990 + 0, 1); 990 991 free_extent_buffer_stale(right); 991 992 right = NULL; 992 993 } else { ··· 1032 1031 btrfs_tree_unlock(mid); 1033 1032 del_ptr(root, path, level + 1, pslot); 1034 1033 root_sub_used(root, mid->len); 1035 - btrfs_free_tree_block(trans, root, mid, 0, 1); 1034 + btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1036 1035 free_extent_buffer_stale(mid); 1037 1036 mid = NULL; 1038 1037 } else { ··· 4033 4032 root_sub_used(root, leaf->len); 4034 4033 4035 4034 atomic_inc(&leaf->refs); 4036 - btrfs_free_tree_block(trans, root, leaf, 0, 1); 4035 + btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4037 4036 free_extent_buffer_stale(leaf); 4038 4037 } 4039 4038 /*
+6 -1
fs/btrfs/ctree.h
··· 2257 2257 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2258 2258 } 2259 2259 2260 + static inline u64 btrfs_root_id(const struct btrfs_root *root) 2261 + { 2262 + return root->root_key.objectid; 2263 + } 2264 + 2260 2265 /* struct btrfs_root_backup */ 2261 2266 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2262 2267 tree_root, 64); ··· 2724 2719 u64 empty_size, 2725 2720 enum btrfs_lock_nesting nest); 2726 2721 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 2727 - struct btrfs_root *root, 2722 + u64 root_id, 2728 2723 struct extent_buffer *buf, 2729 2724 u64 parent, int last_ref); 2730 2725 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+8
fs/btrfs/disk-io.c
··· 1732 1732 } 1733 1733 return root; 1734 1734 fail: 1735 + /* 1736 + * If our caller provided us an anonymous device, then it's his 1737 + * responsability to free it in case we fail. So we have to set our 1738 + * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() 1739 + * and once again by our caller. 1740 + */ 1741 + if (anon_dev) 1742 + root->anon_dev = 0; 1735 1743 btrfs_put_root(root); 1736 1744 return ERR_PTR(ret); 1737 1745 }
+7 -6
fs/btrfs/extent-tree.c
··· 3275 3275 } 3276 3276 3277 3277 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3278 - struct btrfs_root *root, 3278 + u64 root_id, 3279 3279 struct extent_buffer *buf, 3280 3280 u64 parent, int last_ref) 3281 3281 { 3282 - struct btrfs_fs_info *fs_info = root->fs_info; 3282 + struct btrfs_fs_info *fs_info = trans->fs_info; 3283 3283 struct btrfs_ref generic_ref = { 0 }; 3284 3284 int ret; 3285 3285 3286 3286 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3287 3287 buf->start, buf->len, parent); 3288 3288 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3289 - root->root_key.objectid, 0, false); 3289 + root_id, 0, false); 3290 3290 3291 - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 3291 + if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3292 3292 btrfs_ref_tree_mod(fs_info, &generic_ref); 3293 3293 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3294 3294 BUG_ON(ret); /* -ENOMEM */ ··· 3298 3298 struct btrfs_block_group *cache; 3299 3299 bool must_pin = false; 3300 3300 3301 - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 3301 + if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3302 3302 ret = check_ref_cleanup(trans, buf->start); 3303 3303 if (!ret) { 3304 3304 btrfs_redirty_list_add(trans->transaction, buf); ··· 5472 5472 goto owner_mismatch; 5473 5473 } 5474 5474 5475 - btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); 5475 + btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5476 + wc->refs[level] == 1); 5476 5477 out: 5477 5478 wc->refs[level] = 0; 5478 5479 wc->flags[level] = 0;
+8
fs/btrfs/extent_io.c
··· 6611 6611 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 6612 6612 return 0; 6613 6613 6614 + /* 6615 + * We could have had EXTENT_BUFFER_UPTODATE cleared by the write 6616 + * operation, which could potentially still be in flight. In this case 6617 + * we simply want to return an error. 6618 + */ 6619 + if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) 6620 + return -EIO; 6621 + 6614 6622 if (eb->fs_info->sectorsize < PAGE_SIZE) 6615 6623 return read_extent_buffer_subpage(eb, wait, mirror_num); 6616 6624
+2 -2
fs/btrfs/free-space-tree.c
··· 1256 1256 btrfs_tree_lock(free_space_root->node); 1257 1257 btrfs_clean_tree_block(free_space_root->node); 1258 1258 btrfs_tree_unlock(free_space_root->node); 1259 - btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1260 - 0, 1); 1259 + btrfs_free_tree_block(trans, btrfs_root_id(free_space_root), 1260 + free_space_root->node, 0, 1); 1261 1261 1262 1262 btrfs_put_root(free_space_root); 1263 1263
+6 -4
fs/btrfs/ioctl.c
··· 617 617 * Since we don't abort the transaction in this case, free the 618 618 * tree block so that we don't leak space and leave the 619 619 * filesystem in an inconsistent state (an extent item in the 620 - * extent tree without backreferences). Also no need to have 621 - * the tree block locked since it is not in any tree at this 622 - * point, so no other task can find it and use it. 620 + * extent tree with a backreference for a root that does not 621 + * exists). 623 622 */ 624 - btrfs_free_tree_block(trans, root, leaf, 0, 1); 623 + btrfs_tree_lock(leaf); 624 + btrfs_clean_tree_block(leaf); 625 + btrfs_tree_unlock(leaf); 626 + btrfs_free_tree_block(trans, objectid, leaf, 0, 1); 625 627 free_extent_buffer(leaf); 626 628 goto fail; 627 629 }
+2 -1
fs/btrfs/qgroup.c
··· 1219 1219 btrfs_tree_lock(quota_root->node); 1220 1220 btrfs_clean_tree_block(quota_root->node); 1221 1221 btrfs_tree_unlock(quota_root->node); 1222 - btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 1222 + btrfs_free_tree_block(trans, btrfs_root_id(quota_root), 1223 + quota_root->node, 0, 1); 1223 1224 1224 1225 btrfs_put_root(quota_root); 1225 1226
+2
fs/btrfs/tree-log.c
··· 1181 1181 parent_objectid, victim_name, 1182 1182 victim_name_len); 1183 1183 if (ret < 0) { 1184 + kfree(victim_name); 1184 1185 return ret; 1185 1186 } else if (!ret) { 1186 1187 ret = -ENOENT; ··· 3978 3977 goto done; 3979 3978 } 3980 3979 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3980 + ctx->last_dir_item_offset = min_key.offset; 3981 3981 ret = overwrite_item(trans, log, dst_path, 3982 3982 path->nodes[0], path->slots[0], 3983 3983 &min_key);
+4 -2
fs/btrfs/volumes.c
··· 1370 1370 1371 1371 bytenr_orig = btrfs_sb_offset(0); 1372 1372 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1373 - if (ret) 1374 - return ERR_PTR(ret); 1373 + if (ret) { 1374 + device = ERR_PTR(ret); 1375 + goto error_bdev_put; 1376 + } 1375 1377 1376 1378 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1377 1379 if (IS_ERR(disk_super)) {
+7
fs/cifs/connect.c
··· 3064 3064 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) 3065 3065 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); 3066 3066 3067 + /* 3068 + * The cookie is initialized from volume info returned above. 3069 + * Inside cifs_fscache_get_super_cookie it checks 3070 + * that we do not get super cookie twice. 3071 + */ 3072 + cifs_fscache_get_super_cookie(tcon); 3073 + 3067 3074 out: 3068 3075 mnt_ctx->server = server; 3069 3076 mnt_ctx->ses = ses;
+37 -1
fs/cifs/fs_context.c
··· 435 435 } 436 436 437 437 /* 438 + * Remove duplicate path delimiters. Windows is supposed to do that 439 + * but there are some bugs that prevent rename from working if there are 440 + * multiple delimiters. 441 + * 442 + * Returns a sanitized duplicate of @path. The caller is responsible for 443 + * cleaning up the original. 444 + */ 445 + #define IS_DELIM(c) ((c) == '/' || (c) == '\\') 446 + static char *sanitize_path(char *path) 447 + { 448 + char *cursor1 = path, *cursor2 = path; 449 + 450 + /* skip all prepended delimiters */ 451 + while (IS_DELIM(*cursor1)) 452 + cursor1++; 453 + 454 + /* copy the first letter */ 455 + *cursor2 = *cursor1; 456 + 457 + /* copy the remainder... */ 458 + while (*(cursor1++)) { 459 + /* ... skipping all duplicated delimiters */ 460 + if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2)) 461 + continue; 462 + *(++cursor2) = *cursor1; 463 + } 464 + 465 + /* if the last character is a delimiter, skip it */ 466 + if (IS_DELIM(*(cursor2 - 1))) 467 + cursor2--; 468 + 469 + *(cursor2) = '\0'; 470 + return kstrdup(path, GFP_KERNEL); 471 + } 472 + 473 + /* 438 474 * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath 439 475 * fields with the result. Returns 0 on success and an error otherwise 440 476 * (e.g. ENOMEM or EINVAL) ··· 529 493 if (!*pos) 530 494 return 0; 531 495 532 - ctx->prepath = kstrdup(pos, GFP_KERNEL); 496 + ctx->prepath = sanitize_path(pos); 533 497 if (!ctx->prepath) 534 498 return -ENOMEM; 535 499
-13
fs/cifs/inode.c
··· 1356 1356 goto out; 1357 1357 } 1358 1358 1359 - #ifdef CONFIG_CIFS_FSCACHE 1360 - /* populate tcon->resource_id */ 1361 - tcon->resource_id = CIFS_I(inode)->uniqueid; 1362 - #endif 1363 - 1364 1359 if (rc && tcon->pipe) { 1365 1360 cifs_dbg(FYI, "ipc connection - fake read inode\n"); 1366 1361 spin_lock(&inode->i_lock); ··· 1370 1375 iget_failed(inode); 1371 1376 inode = ERR_PTR(rc); 1372 1377 } 1373 - 1374 - /* 1375 - * The cookie is initialized from volume info returned above. 1376 - * Inside cifs_fscache_get_super_cookie it checks 1377 - * that we do not get super cookie twice. 1378 - */ 1379 - cifs_fscache_get_super_cookie(tcon); 1380 - 1381 1378 out: 1382 1379 kfree(path); 1383 1380 free_xid(xid);
+2
fs/io-wq.c
··· 395 395 if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) { 396 396 atomic_inc(&acct->nr_running); 397 397 atomic_inc(&wqe->wq->worker_refs); 398 + raw_spin_unlock(&wqe->lock); 398 399 io_queue_worker_create(worker, acct, create_worker_cb); 400 + raw_spin_lock(&wqe->lock); 399 401 } 400 402 } 401 403
+7 -3
fs/io_uring.c
··· 2891 2891 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; 2892 2892 2893 2893 kiocb->ki_pos = READ_ONCE(sqe->off); 2894 - if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) { 2895 - req->flags |= REQ_F_CUR_POS; 2896 - kiocb->ki_pos = file->f_pos; 2894 + if (kiocb->ki_pos == -1) { 2895 + if (!(file->f_mode & FMODE_STREAM)) { 2896 + req->flags |= REQ_F_CUR_POS; 2897 + kiocb->ki_pos = file->f_pos; 2898 + } else { 2899 + kiocb->ki_pos = 0; 2900 + } 2897 2901 } 2898 2902 kiocb->ki_flags = iocb_flags(file); 2899 2903 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
+1 -1
fs/ksmbd/ndr.c
··· 148 148 static int ndr_read_int32(struct ndr *n, __u32 *value) 149 149 { 150 150 if (n->offset + sizeof(__u32) > n->length) 151 - return 0; 151 + return -EINVAL; 152 152 153 153 if (value) 154 154 *value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
-3
fs/ksmbd/smb2ops.c
··· 271 271 if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES) 272 272 conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING; 273 273 274 - if (conn->cipher_type) 275 - conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 276 - 277 274 if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) 278 275 conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL; 279 276
+25 -4
fs/ksmbd/smb2pdu.c
··· 915 915 } 916 916 } 917 917 918 + /** 919 + * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption 920 + * @conn: smb connection 921 + * 922 + * Return: true if connection should be encrypted, else false 923 + */ 924 + static bool smb3_encryption_negotiated(struct ksmbd_conn *conn) 925 + { 926 + if (!conn->ops->generate_encryptionkey) 927 + return false; 928 + 929 + /* 930 + * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag. 931 + * SMB 3.1.1 uses the cipher_type field. 932 + */ 933 + return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) || 934 + conn->cipher_type; 935 + } 936 + 918 937 static void decode_compress_ctxt(struct ksmbd_conn *conn, 919 938 struct smb2_compression_capabilities_context *pneg_ctxt) 920 939 { ··· 1488 1469 (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED)) 1489 1470 sess->sign = true; 1490 1471 1491 - if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION && 1492 - conn->ops->generate_encryptionkey && 1472 + if (smb3_encryption_negotiated(conn) && 1493 1473 !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) { 1494 1474 rc = conn->ops->generate_encryptionkey(sess); 1495 1475 if (rc) { ··· 1577 1559 (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED)) 1578 1560 sess->sign = true; 1579 1561 1580 - if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) && 1581 - conn->ops->generate_encryptionkey) { 1562 + if (smb3_encryption_negotiated(conn)) { 1582 1563 retval = conn->ops->generate_encryptionkey(sess); 1583 1564 if (retval) { 1584 1565 ksmbd_debug(SMB, ··· 2979 2962 &pntsd_size, &fattr); 2980 2963 posix_acl_release(fattr.cf_acls); 2981 2964 posix_acl_release(fattr.cf_dacls); 2965 + if (rc) { 2966 + kfree(pntsd); 2967 + goto err_out; 2968 + } 2982 2969 2983 2970 rc = ksmbd_vfs_set_sd_xattr(conn, 2984 2971 user_ns,
+4 -7
fs/nfsd/nfs3proc.c
··· 438 438 439 439 static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp, 440 440 struct nfsd3_readdirres *resp, 441 - int count) 441 + u32 count) 442 442 { 443 443 struct xdr_buf *buf = &resp->dirlist; 444 444 struct xdr_stream *xdr = &resp->xdr; 445 445 446 - count = min_t(u32, count, svc_max_payload(rqstp)); 446 + count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp)); 447 447 448 448 memset(buf, 0, sizeof(*buf)); 449 449 450 450 /* Reserve room for the NULL ptr & eof flag (-2 words) */ 451 451 buf->buflen = count - XDR_UNIT * 2; 452 452 buf->pages = rqstp->rq_next_page; 453 - while (count > 0) { 454 - rqstp->rq_next_page++; 455 - count -= PAGE_SIZE; 456 - } 453 + rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 457 454 458 455 /* This is xdr_init_encode(), but it assumes that 459 456 * the head kvec has already been consumed. */ ··· 459 462 xdr->page_ptr = buf->pages; 460 463 xdr->iov = NULL; 461 464 xdr->p = page_address(*buf->pages); 462 - xdr->end = xdr->p + (PAGE_SIZE >> 2); 465 + xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE); 463 466 xdr->rqst = NULL; 464 467 } 465 468
+4 -4
fs/nfsd/nfsproc.c
··· 556 556 557 557 static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp, 558 558 struct nfsd_readdirres *resp, 559 - int count) 559 + u32 count) 560 560 { 561 561 struct xdr_buf *buf = &resp->dirlist; 562 562 struct xdr_stream *xdr = &resp->xdr; 563 563 564 - count = min_t(u32, count, PAGE_SIZE); 564 + count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp)); 565 565 566 566 memset(buf, 0, sizeof(*buf)); 567 567 568 568 /* Reserve room for the NULL ptr & eof flag (-2 words) */ 569 - buf->buflen = count - sizeof(__be32) * 2; 569 + buf->buflen = count - XDR_UNIT * 2; 570 570 buf->pages = rqstp->rq_next_page; 571 571 rqstp->rq_next_page++; 572 572 ··· 577 577 xdr->page_ptr = buf->pages; 578 578 xdr->iov = NULL; 579 579 xdr->p = page_address(*buf->pages); 580 - xdr->end = xdr->p + (PAGE_SIZE >> 2); 580 + xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE); 581 581 xdr->rqst = NULL; 582 582 } 583 583
+1
fs/zonefs/super.c
··· 1787 1787 MODULE_AUTHOR("Damien Le Moal"); 1788 1788 MODULE_DESCRIPTION("Zone file system for zoned block devices"); 1789 1789 MODULE_LICENSE("GPL"); 1790 + MODULE_ALIAS_FS("zonefs"); 1790 1791 module_init(zonefs_init); 1791 1792 module_exit(zonefs_exit);
+2 -2
include/linux/compiler.h
··· 121 121 asm volatile(__stringify_label(c) ":\n\t" \ 122 122 ".pushsection .discard.reachable\n\t" \ 123 123 ".long " __stringify_label(c) "b - .\n\t" \ 124 - ".popsection\n\t"); \ 124 + ".popsection\n\t" : : "i" (c)); \ 125 125 }) 126 126 #define annotate_reachable() __annotate_reachable(__COUNTER__) 127 127 ··· 129 129 asm volatile(__stringify_label(c) ":\n\t" \ 130 130 ".pushsection .discard.unreachable\n\t" \ 131 131 ".long " __stringify_label(c) "b - .\n\t" \ 132 - ".popsection\n\t"); \ 132 + ".popsection\n\t" : : "i" (c)); \ 133 133 }) 134 134 #define annotate_unreachable() __annotate_unreachable(__COUNTER__) 135 135
+6
include/linux/efi.h
··· 1283 1283 } 1284 1284 #endif 1285 1285 1286 + #ifdef CONFIG_SYSFB 1287 + extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 1288 + #else 1289 + static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { } 1290 + #endif 1291 + 1286 1292 #endif /* _LINUX_EFI_H */
+1 -1
include/linux/gfp.h
··· 624 624 625 625 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); 626 626 void free_pages_exact(void *virt, size_t size); 627 - __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1); 627 + __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); 628 628 629 629 #define __get_free_page(gfp_mask) \ 630 630 __get_free_pages((gfp_mask), 0)
+2 -2
include/linux/instrumentation.h
··· 11 11 asm volatile(__stringify(c) ": nop\n\t" \ 12 12 ".pushsection .discard.instr_begin\n\t" \ 13 13 ".long " __stringify(c) "b - .\n\t" \ 14 - ".popsection\n\t"); \ 14 + ".popsection\n\t" : : "i" (c)); \ 15 15 }) 16 16 #define instrumentation_begin() __instrumentation_begin(__COUNTER__) 17 17 ··· 50 50 asm volatile(__stringify(c) ": nop\n\t" \ 51 51 ".pushsection .discard.instr_end\n\t" \ 52 52 ".long " __stringify(c) "b - .\n\t" \ 53 - ".popsection\n\t"); \ 53 + ".popsection\n\t" : : "i" (c)); \ 54 54 }) 55 55 #define instrumentation_end() __instrumentation_end(__COUNTER__) 56 56 #else
+2 -2
include/linux/memblock.h
··· 405 405 phys_addr_t end, int nid, bool exact_nid); 406 406 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); 407 407 408 - static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, 409 - phys_addr_t align) 408 + static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, 409 + phys_addr_t align) 410 410 { 411 411 return memblock_phys_alloc_range(size, align, 0, 412 412 MEMBLOCK_ALLOC_ACCESSIBLE);
-1
include/linux/pagemap.h
··· 285 285 286 286 static inline bool page_cache_add_speculative(struct page *page, int count) 287 287 { 288 - VM_BUG_ON_PAGE(PageTail(page), page); 289 288 return folio_ref_try_add_rcu((struct folio *)page, count); 290 289 } 291 290
+2 -2
include/linux/tee_drv.h
··· 195 195 * @offset: offset of buffer in user space 196 196 * @pages: locked pages from userspace 197 197 * @num_pages: number of locked pages 198 - * @dmabuf: dmabuf used to for exporting to user space 198 + * @refcount: reference counter 199 199 * @flags: defined by TEE_SHM_* in tee_drv.h 200 200 * @id: unique id of a shared memory object on this device, shared 201 201 * with user space ··· 214 214 unsigned int offset; 215 215 struct page **pages; 216 216 size_t num_pages; 217 - struct dma_buf *dmabuf; 217 + refcount_t refcount; 218 218 u32 flags; 219 219 int id; 220 220 u64 sec_world_id;
+3 -3
include/net/sctp/sctp.h
··· 105 105 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); 106 106 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 107 107 108 + typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *); 108 109 void sctp_transport_walk_start(struct rhashtable_iter *iter); 109 110 void sctp_transport_walk_stop(struct rhashtable_iter *iter); 110 111 struct sctp_transport *sctp_transport_get_next(struct net *net, ··· 116 115 struct net *net, 117 116 const union sctp_addr *laddr, 118 117 const union sctp_addr *paddr, void *p); 119 - int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 120 - int (*cb_done)(struct sctp_transport *, void *), 121 - struct net *net, int *pos, void *p); 118 + int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done, 119 + struct net *net, int *pos, void *p); 122 120 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p); 123 121 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, 124 122 struct sctp_info *info);
+2 -1
include/net/sctp/structs.h
··· 1354 1354 reconf_enable:1; 1355 1355 1356 1356 __u8 strreset_enable; 1357 + struct rcu_head rcu; 1357 1358 }; 1358 1359 1359 1360 /* Recover the outter endpoint structure. */ ··· 1370 1369 struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t); 1371 1370 void sctp_endpoint_free(struct sctp_endpoint *); 1372 1371 void sctp_endpoint_put(struct sctp_endpoint *); 1373 - void sctp_endpoint_hold(struct sctp_endpoint *); 1372 + int sctp_endpoint_hold(struct sctp_endpoint *ep); 1374 1373 void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *); 1375 1374 struct sctp_association *sctp_endpoint_lookup_assoc( 1376 1375 const struct sctp_endpoint *ep,
+1
include/uapi/linux/byteorder/big_endian.h
··· 9 9 #define __BIG_ENDIAN_BITFIELD 10 10 #endif 11 11 12 + #include <linux/stddef.h> 12 13 #include <linux/types.h> 13 14 #include <linux/swab.h> 14 15
+1
include/uapi/linux/byteorder/little_endian.h
··· 9 9 #define __LITTLE_ENDIAN_BITFIELD 10 10 #endif 11 11 12 + #include <linux/stddef.h> 12 13 #include <linux/types.h> 13 14 #include <linux/swab.h> 14 15
+3 -3
include/uapi/linux/nfc.h
··· 263 263 #define NFC_SE_ENABLED 0x1 264 264 265 265 struct sockaddr_nfc { 266 - sa_family_t sa_family; 266 + __kernel_sa_family_t sa_family; 267 267 __u32 dev_idx; 268 268 __u32 target_idx; 269 269 __u32 nfc_protocol; ··· 271 271 272 272 #define NFC_LLCP_MAX_SERVICE_NAME 63 273 273 struct sockaddr_nfc_llcp { 274 - sa_family_t sa_family; 274 + __kernel_sa_family_t sa_family; 275 275 __u32 dev_idx; 276 276 __u32 target_idx; 277 277 __u32 nfc_protocol; 278 278 __u8 dsap; /* Destination SAP, if known */ 279 279 __u8 ssap; /* Source SAP to be bound to */ 280 280 char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; 281 - size_t service_name_len; 281 + __kernel_size_t service_name_len; 282 282 }; 283 283 284 284 /* NFC socket protocols */
+1
include/xen/events.h
··· 17 17 unsigned xen_evtchn_nr_channels(void); 18 18 19 19 int bind_evtchn_to_irq(evtchn_port_t evtchn); 20 + int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); 20 21 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, 21 22 irq_handler_t handler, 22 23 unsigned long irqflags, const char *devname,
+11
kernel/crash_core.c
··· 6 6 7 7 #include <linux/buildid.h> 8 8 #include <linux/crash_core.h> 9 + #include <linux/init.h> 9 10 #include <linux/utsname.h> 10 11 #include <linux/vmalloc.h> 11 12 ··· 295 294 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, 296 295 "crashkernel=", suffix_tbl[SUFFIX_LOW]); 297 296 } 297 + 298 + /* 299 + * Add a dummy early_param handler to mark crashkernel= as a known command line 300 + * parameter and suppress incorrect warnings in init/main.c. 301 + */ 302 + static int __init parse_crashkernel_dummy(char *arg) 303 + { 304 + return 0; 305 + } 306 + early_param("crashkernel", parse_crashkernel_dummy); 298 307 299 308 Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, 300 309 void *data, size_t data_len)
+1 -1
kernel/locking/rtmutex.c
··· 1380 1380 * - the VCPU on which owner runs is preempted 1381 1381 */ 1382 1382 if (!owner->on_cpu || need_resched() || 1383 - rt_mutex_waiter_is_top_waiter(lock, waiter) || 1383 + !rt_mutex_waiter_is_top_waiter(lock, waiter) || 1384 1384 vcpu_is_preempted(task_cpu(owner))) { 1385 1385 res = false; 1386 1386 break;
+9
kernel/signal.c
··· 4185 4185 ss_mode != 0)) 4186 4186 return -EINVAL; 4187 4187 4188 + /* 4189 + * Return before taking any locks if no actual 4190 + * sigaltstack changes were requested. 4191 + */ 4192 + if (t->sas_ss_sp == (unsigned long)ss_sp && 4193 + t->sas_ss_size == ss_size && 4194 + t->sas_ss_flags == ss_flags) 4195 + return 0; 4196 + 4188 4197 sigaltstack_lock(); 4189 4198 if (ss_mode == SS_DISABLE) { 4190 4199 ss_size = 0;
+1 -2
kernel/time/timekeeping.c
··· 1306 1306 timekeeping_forward_now(tk); 1307 1307 1308 1308 xt = tk_xtime(tk); 1309 - ts_delta.tv_sec = ts->tv_sec - xt.tv_sec; 1310 - ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec; 1309 + ts_delta = timespec64_sub(*ts, xt); 1311 1310 1312 1311 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { 1313 1312 ret = -EINVAL;
+9 -6
kernel/ucount.c
··· 264 264 long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v) 265 265 { 266 266 struct ucounts *iter; 267 + long max = LONG_MAX; 267 268 long ret = 0; 268 269 269 270 for (iter = ucounts; iter; iter = iter->ns->ucounts) { 270 - long max = READ_ONCE(iter->ns->ucount_max[type]); 271 271 long new = atomic_long_add_return(v, &iter->ucount[type]); 272 272 if (new < 0 || new > max) 273 273 ret = LONG_MAX; 274 274 else if (iter == ucounts) 275 275 ret = new; 276 + max = READ_ONCE(iter->ns->ucount_max[type]); 276 277 } 277 278 return ret; 278 279 } ··· 313 312 { 314 313 /* Caller must hold a reference to ucounts */ 315 314 struct ucounts *iter; 315 + long max = LONG_MAX; 316 316 long dec, ret = 0; 317 317 318 318 for (iter = ucounts; iter; iter = iter->ns->ucounts) { 319 - long max = READ_ONCE(iter->ns->ucount_max[type]); 320 319 long new = atomic_long_add_return(1, &iter->ucount[type]); 321 320 if (new < 0 || new > max) 322 321 goto unwind; 323 322 if (iter == ucounts) 324 323 ret = new; 324 + max = READ_ONCE(iter->ns->ucount_max[type]); 325 325 /* 326 326 * Grab an extra ucount reference for the caller when 327 327 * the rlimit count was previously 0. ··· 341 339 return 0; 342 340 } 343 341 344 - bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max) 342 + bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long rlimit) 345 343 { 346 344 struct ucounts *iter; 347 - if (get_ucounts_value(ucounts, type) > max) 348 - return true; 345 + long max = rlimit; 346 + if (rlimit > LONG_MAX) 347 + max = LONG_MAX; 349 348 for (iter = ucounts; iter; iter = iter->ns->ucounts) { 350 - max = READ_ONCE(iter->ns->ucount_max[type]); 351 349 if (get_ucounts_value(iter, type) > max) 352 350 return true; 351 + max = READ_ONCE(iter->ns->ucount_max[type]); 353 352 } 354 353 return false; 355 354 }
+2
mm/damon/dbgfs.c
··· 650 650 if (!targetid_is_pid(ctx)) 651 651 return; 652 652 653 + mutex_lock(&ctx->kdamond_lock); 653 654 damon_for_each_target_safe(t, next, ctx) { 654 655 put_pid((struct pid *)t->id); 655 656 damon_destroy_target(t); 656 657 } 658 + mutex_unlock(&ctx->kdamond_lock); 657 659 } 658 660 659 661 static struct damon_ctx *dbgfs_new_ctx(void)
+1
mm/kfence/core.c
··· 683 683 .open = open_objects, 684 684 .read = seq_read, 685 685 .llseek = seq_lseek, 686 + .release = seq_release, 686 687 }; 687 688 688 689 static int __init kfence_debugfs_init(void)
+5 -9
mm/memory-failure.c
··· 1470 1470 if (!(flags & MF_COUNT_INCREASED)) { 1471 1471 res = get_hwpoison_page(p, flags); 1472 1472 if (!res) { 1473 - /* 1474 - * Check "filter hit" and "race with other subpage." 1475 - */ 1476 1473 lock_page(head); 1477 - if (PageHWPoison(head)) { 1478 - if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) 1479 - || (p != head && TestSetPageHWPoison(head))) { 1474 + if (hwpoison_filter(p)) { 1475 + if (TestClearPageHWPoison(head)) 1480 1476 num_poisoned_pages_dec(); 1481 - unlock_page(head); 1482 - return 0; 1483 - } 1477 + unlock_page(head); 1478 + return 0; 1484 1479 } 1485 1480 unlock_page(head); 1486 1481 res = MF_FAILED; ··· 2234 2239 } else if (ret == 0) { 2235 2240 if (soft_offline_free_page(page) && try_again) { 2236 2241 try_again = false; 2242 + flags &= ~MF_COUNT_INCREASED; 2237 2243 goto retry; 2238 2244 } 2239 2245 }
+1 -2
mm/mempolicy.c
··· 2140 2140 * memory with both reclaim and compact as well. 2141 2141 */ 2142 2142 if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 2143 - page = __alloc_pages_node(hpage_node, 2144 - gfp, order); 2143 + page = __alloc_pages(gfp, order, hpage_node, nmask); 2145 2144 2146 2145 goto out; 2147 2146 }
+32
net/bridge/br_multicast.c
··· 4522 4522 } 4523 4523 #endif 4524 4524 4525 + void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4526 + unsigned long val) 4527 + { 4528 + unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4529 + 4530 + if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4531 + br_info(brmctx->br, 4532 + "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4533 + jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4534 + jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4535 + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4536 + } 4537 + 4538 + brmctx->multicast_query_interval = intvl_jiffies; 4539 + } 4540 + 4541 + void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4542 + unsigned long val) 4543 + { 4544 + unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4545 + 4546 + if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4547 + br_info(brmctx->br, 4548 + "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4549 + jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4550 + jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4551 + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4552 + } 4553 + 4554 + brmctx->multicast_startup_query_interval = intvl_jiffies; 4555 + } 4556 + 4525 4557 /** 4526 4558 * br_multicast_list_adjacent - Returns snooped multicast addresses 4527 4559 * @dev: The bridge port adjacent to which to retrieve addresses
+2 -2
net/bridge/br_netlink.c
··· 1357 1357 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1358 1358 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1359 1359 1360 - br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); 1360 + br_multicast_set_query_intvl(&br->multicast_ctx, val); 1361 1361 } 1362 1362 1363 1363 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { ··· 1369 1369 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1370 1370 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1371 1371 1372 - br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); 1372 + br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); 1373 1373 } 1374 1374 1375 1375 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
+9 -3
net/bridge/br_private.h
··· 28 28 #define BR_MAX_PORTS (1<<BR_PORT_BITS) 29 29 30 30 #define BR_MULTICAST_DEFAULT_HASH_MAX 4096 31 + #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000) 32 + #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN 31 33 32 34 #define BR_HWDOM_MAX BITS_PER_LONG 33 35 ··· 966 964 int nest_attr); 967 965 size_t br_multicast_querier_state_size(void); 968 966 size_t br_rports_size(const struct net_bridge_mcast *brmctx); 967 + void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 968 + unsigned long val); 969 + void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 970 + unsigned long val); 969 971 970 972 static inline bool br_group_is_l2(const struct br_ip *group) 971 973 { ··· 1154 1148 static inline bool 1155 1149 br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx) 1156 1150 { 1157 - return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && 1158 - br_multicast_ctx_is_vlan(brmctx) && 1159 - !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED); 1151 + return br_multicast_ctx_is_vlan(brmctx) && 1152 + (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) || 1153 + !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)); 1160 1154 } 1161 1155 1162 1156 static inline bool
+2 -2
net/bridge/br_sysfs_br.c
··· 657 657 static int set_query_interval(struct net_bridge *br, unsigned long val, 658 658 struct netlink_ext_ack *extack) 659 659 { 660 - br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); 660 + br_multicast_set_query_intvl(&br->multicast_ctx, val); 661 661 return 0; 662 662 } 663 663 ··· 705 705 static int set_startup_query_interval(struct net_bridge *br, unsigned long val, 706 706 struct netlink_ext_ack *extack) 707 707 { 708 - br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); 708 + br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); 709 709 return 0; 710 710 } 711 711
+2 -2
net/bridge/br_vlan_options.c
··· 521 521 u64 val; 522 522 523 523 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); 524 - v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val); 524 + br_multicast_set_query_intvl(&v->br_mcast_ctx, val); 525 525 *changed = true; 526 526 } 527 527 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { ··· 535 535 u64 val; 536 536 537 537 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); 538 - v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); 538 + br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val); 539 539 *changed = true; 540 540 } 541 541 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
+4 -6
net/ipv4/af_inet.c
··· 1985 1985 1986 1986 ip_init(); 1987 1987 1988 + /* Initialise per-cpu ipv4 mibs */ 1989 + if (init_ipv4_mibs()) 1990 + panic("%s: Cannot init ipv4 mibs\n", __func__); 1991 + 1988 1992 /* Setup TCP slab cache for open requests. */ 1989 1993 tcp_init(); 1990 1994 ··· 2019 2015 2020 2016 if (init_inet_pernet_ops()) 2021 2017 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); 2022 - /* 2023 - * Initialise per-cpu ipv4 mibs 2024 - */ 2025 - 2026 - if (init_ipv4_mibs()) 2027 - pr_crit("%s: Cannot init ipv4 mibs\n", __func__); 2028 2018 2029 2019 ipv4_proc_init(); 2030 2020
+1 -1
net/ipv4/udp.c
··· 3075 3075 { 3076 3076 seq_setwidth(seq, 127); 3077 3077 if (v == SEQ_START_TOKEN) 3078 - seq_puts(seq, " sl local_address rem_address st tx_queue " 3078 + seq_puts(seq, " sl local_address rem_address st tx_queue " 3079 3079 "rx_queue tr tm->when retrnsmt uid timeout " 3080 3080 "inode ref pointer drops"); 3081 3081 else {
+2
net/ipv6/ip6_vti.c
··· 808 808 struct net *net = dev_net(dev); 809 809 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 810 810 811 + memset(&p1, 0, sizeof(p1)); 812 + 811 813 switch (cmd) { 812 814 case SIOCGETTUNNEL: 813 815 if (dev == ip6n->fb_tnl_dev) {
+3
net/ipv6/raw.c
··· 1020 1020 struct raw6_sock *rp = raw6_sk(sk); 1021 1021 int val; 1022 1022 1023 + if (optlen < sizeof(val)) 1024 + return -EINVAL; 1025 + 1023 1026 if (copy_from_sockptr(&val, optval, sizeof(val))) 1024 1027 return -EFAULT; 1025 1028
+1 -1
net/ipv6/udp.c
··· 1204 1204 kfree_skb(skb); 1205 1205 return -EINVAL; 1206 1206 } 1207 - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 1207 + if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1208 1208 kfree_skb(skb); 1209 1209 return -EINVAL; 1210 1210 }
+5 -1
net/ncsi/ncsi-netlink.c
··· 112 112 pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR); 113 113 if (!pnest) 114 114 return -ENOMEM; 115 - nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id); 115 + rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id); 116 + if (rc) { 117 + nla_nest_cancel(skb, pnest); 118 + return rc; 119 + } 116 120 if ((0x1 << np->id) == ndp->package_whitelist) 117 121 nla_put_flag(skb, NCSI_PKG_ATTR_FORCED); 118 122 cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
+6 -6
net/sctp/diag.c
··· 290 290 return err; 291 291 } 292 292 293 - static int sctp_sock_dump(struct sctp_transport *tsp, void *p) 293 + static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) 294 294 { 295 - struct sctp_endpoint *ep = tsp->asoc->ep; 296 295 struct sctp_comm_param *commp = p; 297 296 struct sock *sk = ep->base.sk; 298 297 struct sk_buff *skb = commp->skb; ··· 301 302 int err = 0; 302 303 303 304 lock_sock(sk); 305 + if (ep != tsp->asoc->ep) 306 + goto release; 304 307 list_for_each_entry(assoc, &ep->asocs, asocs) { 305 308 if (cb->args[4] < cb->args[1]) 306 309 goto next; ··· 345 344 return err; 346 345 } 347 346 348 - static int sctp_sock_filter(struct sctp_transport *tsp, void *p) 347 + static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) 349 348 { 350 - struct sctp_endpoint *ep = tsp->asoc->ep; 351 349 struct sctp_comm_param *commp = p; 352 350 struct sock *sk = ep->base.sk; 353 351 const struct inet_diag_req_v2 *r = commp->r; ··· 505 505 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) 506 506 goto done; 507 507 508 - sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, 509 - net, &pos, &commp); 508 + sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump, 509 + net, &pos, &commp); 510 510 cb->args[2] = pos; 511 511 512 512 done:
+15 -8
net/sctp/endpointola.c
··· 184 184 } 185 185 186 186 /* Final destructor for endpoint. */ 187 + static void sctp_endpoint_destroy_rcu(struct rcu_head *head) 188 + { 189 + struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu); 190 + struct sock *sk = ep->base.sk; 191 + 192 + sctp_sk(sk)->ep = NULL; 193 + sock_put(sk); 194 + 195 + kfree(ep); 196 + SCTP_DBG_OBJCNT_DEC(ep); 197 + } 198 + 187 199 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 188 200 { 189 201 struct sock *sk; ··· 225 213 if (sctp_sk(sk)->bind_hash) 226 214 sctp_put_port(sk); 227 215 228 - sctp_sk(sk)->ep = NULL; 229 - /* Give up our hold on the sock */ 230 - sock_put(sk); 231 - 232 - kfree(ep); 233 - SCTP_DBG_OBJCNT_DEC(ep); 216 + call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu); 234 217 } 235 218 236 219 /* Hold a reference to an endpoint. */ 237 - void sctp_endpoint_hold(struct sctp_endpoint *ep) 220 + int sctp_endpoint_hold(struct sctp_endpoint *ep) 238 221 { 239 - refcount_inc(&ep->base.refcnt); 222 + return refcount_inc_not_zero(&ep->base.refcnt); 240 223 } 241 224 242 225 /* Release a reference to an endpoint and clean up if there are
+15 -8
net/sctp/socket.c
··· 5333 5333 } 5334 5334 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 5335 5335 5336 - int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 5337 - int (*cb_done)(struct sctp_transport *, void *), 5338 - struct net *net, int *pos, void *p) { 5336 + int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done, 5337 + struct net *net, int *pos, void *p) 5338 + { 5339 5339 struct rhashtable_iter hti; 5340 5340 struct sctp_transport *tsp; 5341 + struct sctp_endpoint *ep; 5341 5342 int ret; 5342 5343 5343 5344 again: ··· 5347 5346 5348 5347 tsp = sctp_transport_get_idx(net, &hti, *pos + 1); 5349 5348 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { 5350 - ret = cb(tsp, p); 5351 - if (ret) 5352 - break; 5349 + ep = tsp->asoc->ep; 5350 + if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */ 5351 + ret = cb(ep, tsp, p); 5352 + if (ret) 5353 + break; 5354 + sctp_endpoint_put(ep); 5355 + } 5353 5356 (*pos)++; 5354 5357 sctp_transport_put(tsp); 5355 5358 } 5356 5359 sctp_transport_walk_stop(&hti); 5357 5360 5358 5361 if (ret) { 5359 - if (cb_done && !cb_done(tsp, p)) { 5362 + if (cb_done && !cb_done(ep, tsp, p)) { 5360 5363 (*pos)++; 5364 + sctp_endpoint_put(ep); 5361 5365 sctp_transport_put(tsp); 5362 5366 goto again; 5363 5367 } 5368 + sctp_endpoint_put(ep); 5364 5369 sctp_transport_put(tsp); 5365 5370 } 5366 5371 5367 5372 return ret; 5368 5373 } 5369 - EXPORT_SYMBOL_GPL(sctp_for_each_transport); 5374 + EXPORT_SYMBOL_GPL(sctp_transport_traverse_process); 5370 5375 5371 5376 /* 7.2.1 Association Status (SCTP_STATUS) 5372 5377
+5
net/smc/smc.h
··· 180 180 u16 tx_cdc_seq; /* sequence # for CDC send */ 181 181 u16 tx_cdc_seq_fin; /* sequence # - tx completed */ 182 182 spinlock_t send_lock; /* protect wr_sends */ 183 + atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe 184 + * - inc when post wqe, 185 + * - dec on polled tx cqe 186 + */ 187 + wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/ 183 188 struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ 184 189 u32 tx_off; /* base offset in peer rmb */ 185 190
+24 -28
net/smc/smc_cdc.c
··· 31 31 struct smc_sock *smc; 32 32 int diff; 33 33 34 - if (!conn) 35 - /* already dismissed */ 36 - return; 37 - 38 34 smc = container_of(conn, struct smc_sock, conn); 39 35 bh_lock_sock(&smc->sk); 40 36 if (!wc_status) { ··· 47 51 conn); 48 52 conn->tx_cdc_seq_fin = cdcpend->ctrl_seq; 49 53 } 54 + 55 + if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) && 56 + unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq))) 57 + wake_up(&conn->cdc_pend_tx_wq); 58 + WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0); 59 + 50 60 smc_tx_sndbuf_nonfull(smc); 51 61 bh_unlock_sock(&smc->sk); 52 62 } ··· 109 107 conn->tx_cdc_seq++; 110 108 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 111 109 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); 110 + 111 + atomic_inc(&conn->cdc_pend_tx_wr); 112 + smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ 113 + 112 114 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 113 115 if (!rc) { 114 116 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); ··· 120 114 } else { 121 115 conn->tx_cdc_seq--; 122 116 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 117 + atomic_dec(&conn->cdc_pend_tx_wr); 123 118 } 124 119 125 120 return rc; ··· 143 136 peer->token = htonl(local->token); 144 137 peer->prod_flags.failover_validation = 1; 145 138 139 + /* We need to set pend->conn here to make sure smc_cdc_tx_handler() 140 + * can handle properly 141 + */ 142 + smc_cdc_add_pending_send(conn, pend); 143 + 144 + atomic_inc(&conn->cdc_pend_tx_wr); 145 + smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ 146 + 146 147 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 148 + if (unlikely(rc)) 149 + atomic_dec(&conn->cdc_pend_tx_wr); 150 + 147 151 return rc; 148 152 } 149 153 ··· 211 193 return rc; 212 194 } 213 195 214 - static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, 215 - unsigned long data) 196 + void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn) 216 197 { 217 - struct smc_connection *conn = (struct smc_connection *)data; 218 - struct smc_cdc_tx_pend *cdc_pend = 219 - (struct smc_cdc_tx_pend *)tx_pend; 220 - 221 - return cdc_pend->conn == conn; 222 - } 223 - 224 - static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend) 225 - { 226 - struct smc_cdc_tx_pend *cdc_pend = 227 - (struct smc_cdc_tx_pend *)tx_pend; 228 - 229 - cdc_pend->conn = NULL; 230 - } 231 - 232 - void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) 233 - { 234 - struct smc_link *link = conn->lnk; 235 - 236 - smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE, 237 - smc_cdc_tx_filter, smc_cdc_tx_dismisser, 238 - (unsigned long)conn); 198 + wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr)); 239 199 } 240 200 241 201 /* Send a SMC-D CDC header.
+1 -1
net/smc/smc_cdc.h
··· 291 291 struct smc_wr_buf **wr_buf, 292 292 struct smc_rdma_wr **wr_rdma_buf, 293 293 struct smc_cdc_tx_pend **pend); 294 - void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 294 + void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn); 295 295 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, 296 296 struct smc_cdc_tx_pend *pend); 297 297 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
+21 -6
net/smc/smc_core.c
··· 647 647 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 648 648 struct smc_link *lnk = &lgr->lnk[i]; 649 649 650 - if (smc_link_usable(lnk)) 650 + if (smc_link_sendable(lnk)) 651 651 lnk->state = SMC_LNK_INACTIVE; 652 652 } 653 653 wake_up_all(&lgr->llc_msg_waiter); ··· 1133 1133 smc_ism_unset_conn(conn); 1134 1134 tasklet_kill(&conn->rx_tsklet); 1135 1135 } else { 1136 - smc_cdc_tx_dismiss_slots(conn); 1136 + smc_cdc_wait_pend_tx_wr(conn); 1137 1137 if (current_work() != &conn->abort_work) 1138 1138 cancel_work_sync(&conn->abort_work); 1139 1139 } ··· 1210 1210 smc_llc_link_clear(lnk, log); 1211 1211 smcr_buf_unmap_lgr(lnk); 1212 1212 smcr_rtoken_clear_link(lnk); 1213 - smc_ib_modify_qp_reset(lnk); 1213 + smc_ib_modify_qp_error(lnk); 1214 1214 smc_wr_free_link(lnk); 1215 1215 smc_ib_destroy_queue_pair(lnk); 1216 1216 smc_ib_dealloc_protection_domain(lnk); ··· 1342 1342 else 1343 1343 tasklet_unlock_wait(&conn->rx_tsklet); 1344 1344 } else { 1345 - smc_cdc_tx_dismiss_slots(conn); 1345 + smc_cdc_wait_pend_tx_wr(conn); 1346 1346 } 1347 1347 smc_lgr_unregister_conn(conn); 1348 1348 smc_close_active_abort(smc); ··· 1465 1465 /* Called when an SMCR device is removed or the smc module is unloaded. 1466 1466 * If smcibdev is given, all SMCR link groups using this device are terminated. 1467 1467 * If smcibdev is NULL, all SMCR link groups are terminated. 1468 + * 1469 + * We must wait here for QPs been destroyed before we destroy the CQs, 1470 + * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus 1471 + * smc_sock cannot be released. 1468 1472 */ 1469 1473 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) 1470 1474 { 1471 1475 struct smc_link_group *lgr, *lg; 1472 1476 LIST_HEAD(lgr_free_list); 1477 + LIST_HEAD(lgr_linkdown_list); 1473 1478 int i; 1474 1479 1475 1480 spin_lock_bh(&smc_lgr_list.lock); ··· 1486 1481 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { 1487 1482 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1488 1483 if (lgr->lnk[i].smcibdev == smcibdev) 1489 - smcr_link_down_cond_sched(&lgr->lnk[i]); 1484 + list_move_tail(&lgr->list, &lgr_linkdown_list); 1490 1485 } 1491 1486 } 1492 1487 } ··· 1496 1491 list_del_init(&lgr->list); 1497 1492 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM); 1498 1493 __smc_lgr_terminate(lgr, false); 1494 + } 1495 + 1496 + list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) { 1497 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1498 + if (lgr->lnk[i].smcibdev == smcibdev) { 1499 + mutex_lock(&lgr->llc_conf_mutex); 1500 + smcr_link_down_cond(&lgr->lnk[i]); 1501 + mutex_unlock(&lgr->llc_conf_mutex); 1502 + } 1503 + } 1499 1504 } 1500 1505 1501 1506 if (smcibdev) { ··· 1607 1592 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list)) 1608 1593 return; 1609 1594 1610 - smc_ib_modify_qp_reset(lnk); 1611 1595 to_lnk = smc_switch_conns(lgr, lnk, true); 1612 1596 if (!to_lnk) { /* no backup link available */ 1613 1597 smcr_link_clear(lnk, true); ··· 1844 1830 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; 1845 1831 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; 1846 1832 conn->urg_state = SMC_URG_READ; 1833 + init_waitqueue_head(&conn->cdc_pend_tx_wq); 1847 1834 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work); 1848 1835 if (ini->is_smcd) { 1849 1836 conn->rx_off = sizeof(struct smcd_cdc_msg);
+6
net/smc/smc_core.h
··· 415 415 return true; 416 416 } 417 417 418 + static inline bool smc_link_sendable(struct smc_link *lnk) 419 + { 420 + return smc_link_usable(lnk) && 421 + lnk->qp_attr.cur_qp_state == IB_QPS_RTS; 422 + } 423 + 418 424 static inline bool smc_link_active(struct smc_link *lnk) 419 425 { 420 426 return lnk->state == SMC_LNK_ACTIVE;
+2 -2
net/smc/smc_ib.c
··· 109 109 IB_QP_MAX_QP_RD_ATOMIC); 110 110 } 111 111 112 - int smc_ib_modify_qp_reset(struct smc_link *lnk) 112 + int smc_ib_modify_qp_error(struct smc_link *lnk) 113 113 { 114 114 struct ib_qp_attr qp_attr; 115 115 116 116 memset(&qp_attr, 0, sizeof(qp_attr)); 117 - qp_attr.qp_state = IB_QPS_RESET; 117 + qp_attr.qp_state = IB_QPS_ERR; 118 118 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); 119 119 } 120 120
+1
net/smc/smc_ib.h
··· 90 90 int smc_ib_ready_link(struct smc_link *lnk); 91 91 int smc_ib_modify_qp_rts(struct smc_link *lnk); 92 92 int smc_ib_modify_qp_reset(struct smc_link *lnk); 93 + int smc_ib_modify_qp_error(struct smc_link *lnk); 93 94 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); 94 95 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, 95 96 struct smc_buf_desc *buf_slot, u8 link_idx);
+1 -1
net/smc/smc_llc.c
··· 1630 1630 delllc.reason = htonl(rsn); 1631 1631 1632 1632 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1633 - if (!smc_link_usable(&lgr->lnk[i])) 1633 + if (!smc_link_sendable(&lgr->lnk[i])) 1634 1634 continue; 1635 1635 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) 1636 1636 break;
+9 -40
net/smc/smc_wr.c
··· 58 58 } 59 59 60 60 /* wait till all pending tx work requests on the given link are completed */ 61 - int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) 61 + void smc_wr_tx_wait_no_pending_sends(struct smc_link *link) 62 62 { 63 - if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link), 64 - SMC_WR_TX_WAIT_PENDING_TIME)) 65 - return 0; 66 - else /* timeout */ 67 - return -EPIPE; 63 + wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link)); 68 64 } 69 65 70 66 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) ··· 79 83 struct smc_wr_tx_pend pnd_snd; 80 84 struct smc_link *link; 81 85 u32 pnd_snd_idx; 82 - int i; 83 86 84 87 link = wc->qp->qp_context; 85 88 ··· 119 124 } 120 125 121 126 if (wc->status) { 122 - for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { 123 - /* clear full struct smc_wr_tx_pend including .priv */ 124 - memset(&link->wr_tx_pends[i], 0, 125 - sizeof(link->wr_tx_pends[i])); 126 - memset(&link->wr_tx_bufs[i], 0, 127 - sizeof(link->wr_tx_bufs[i])); 128 - clear_bit(i, link->wr_tx_mask); 129 - } 130 127 if (link->lgr->smc_version == SMC_V2) { 131 128 memset(link->wr_tx_v2_pend, 0, 132 129 sizeof(*link->wr_tx_v2_pend)); ··· 171 184 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) 172 185 { 173 186 *idx = link->wr_tx_cnt; 174 - if (!smc_link_usable(link)) 187 + if (!smc_link_sendable(link)) 175 188 return -ENOLINK; 176 189 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { 177 190 if (!test_and_set_bit(*idx, link->wr_tx_mask)) ··· 214 227 } else { 215 228 rc = wait_event_interruptible_timeout( 216 229 link->wr_tx_wait, 217 - !smc_link_usable(link) || 230 + !smc_link_sendable(link) || 218 231 lgr->terminating || 219 232 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), 220 233 SMC_WR_TX_WAIT_FREE_SLOT_TIME); ··· 341 354 unsigned long timeout) 342 355 { 343 356 struct smc_wr_tx_pend *pend; 357 + u32 pnd_idx; 344 358 int rc; 345 359 346 360 pend = container_of(priv, struct smc_wr_tx_pend, priv); 347 361 pend->compl_requested = 1; 348 - init_completion(&link->wr_tx_compl[pend->idx]); 362 + pnd_idx = pend->idx; 363 + init_completion(&link->wr_tx_compl[pnd_idx]); 349 364 350 365 rc = smc_wr_tx_send(link, priv); 351 366 if (rc) 352 367 return rc; 353 368 /* wait for completion by smc_wr_tx_process_cqe() */ 354 369 rc = wait_for_completion_interruptible_timeout( 355 - &link->wr_tx_compl[pend->idx], timeout); 370 + &link->wr_tx_compl[pnd_idx], timeout); 356 371 if (rc <= 0) 357 372 rc = -ENODATA; 358 373 if (rc > 0) ··· 402 413 break; 403 414 } 404 415 return rc; 405 - } 406 - 407 - void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type, 408 - smc_wr_tx_filter filter, 409 - smc_wr_tx_dismisser dismisser, 410 - unsigned long data) 411 - { 412 - struct smc_wr_tx_pend_priv *tx_pend; 413 - struct smc_wr_rx_hdr *wr_tx; 414 - int i; 415 - 416 - for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { 417 - wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i]; 418 - if (wr_tx->type != wr_tx_hdr_type) 419 - continue; 420 - tx_pend = &link->wr_tx_pends[i].priv; 421 - if (filter(tx_pend, data)) 422 - dismisser(tx_pend); 423 - } 424 416 } 425 417 426 418 /****************************** receive queue ********************************/ ··· 639 669 smc_wr_wakeup_reg_wait(lnk); 640 670 smc_wr_wakeup_tx_wait(lnk); 641 671 642 - if (smc_wr_tx_wait_no_pending_sends(lnk)) 643 - bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT); 672 + smc_wr_tx_wait_no_pending_sends(lnk); 644 673 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); 645 674 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); 646 675
+2 -3
net/smc/smc_wr.h
··· 22 22 #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ 23 23 24 24 #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) 25 - #define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ) 26 25 27 26 #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ 28 27 ··· 61 62 62 63 static inline bool smc_wr_tx_link_hold(struct smc_link *link) 63 64 { 64 - if (!smc_link_usable(link)) 65 + if (!smc_link_sendable(link)) 65 66 return false; 66 67 atomic_inc(&link->wr_tx_refcnt); 67 68 return true; ··· 129 130 smc_wr_tx_filter filter, 130 131 smc_wr_tx_dismisser dismisser, 131 132 unsigned long data); 132 - int smc_wr_tx_wait_no_pending_sends(struct smc_link *link); 133 + void smc_wr_tx_wait_no_pending_sends(struct smc_link *link); 133 134 134 135 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); 135 136 int smc_wr_rx_post_init(struct smc_link *link);
+1
net/xdp/xsk_buff_pool.c
··· 83 83 xskb = &pool->heads[i]; 84 84 xskb->pool = pool; 85 85 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; 86 + INIT_LIST_HEAD(&xskb->free_list_node); 86 87 if (pool->unaligned) 87 88 pool->free_heads[i] = xskb; 88 89 else
+1 -1
scripts/recordmcount.pl
··· 219 219 220 220 } elsif ($arch eq "s390" && $bits == 64) { 221 221 if ($cc =~ /-DCC_USING_HOTPATCH/) { 222 - $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$"; 222 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$"; 223 223 $mcount_adjust = 0; 224 224 } 225 225 $alignment = 8;
+20 -15
security/selinux/hooks.c
··· 611 611 return 0; 612 612 } 613 613 614 - static int parse_sid(struct super_block *sb, const char *s, u32 *sid) 614 + static int parse_sid(struct super_block *sb, const char *s, u32 *sid, 615 + gfp_t gfp) 615 616 { 616 617 int rc = security_context_str_to_sid(&selinux_state, s, 617 - sid, GFP_KERNEL); 618 + sid, gfp); 618 619 if (rc) 619 620 pr_warn("SELinux: security_context_str_to_sid" 620 621 "(%s) failed for (dev %s, type %s) errno=%d\n", ··· 686 685 */ 687 686 if (opts) { 688 687 if (opts->fscontext) { 689 - rc = parse_sid(sb, opts->fscontext, &fscontext_sid); 688 + rc = parse_sid(sb, opts->fscontext, &fscontext_sid, 689 + GFP_KERNEL); 690 690 if (rc) 691 691 goto out; 692 692 if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, ··· 696 694 sbsec->flags |= FSCONTEXT_MNT; 697 695 } 698 696 if (opts->context) { 699 - rc = parse_sid(sb, opts->context, &context_sid); 697 + rc = parse_sid(sb, opts->context, &context_sid, 698 + GFP_KERNEL); 700 699 if (rc) 701 700 goto out; 702 701 if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, ··· 706 703 sbsec->flags |= CONTEXT_MNT; 707 704 } 708 705 if (opts->rootcontext) { 709 - rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid); 706 + rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid, 707 + GFP_KERNEL); 710 708 if (rc) 711 709 goto out; 712 710 if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, ··· 716 712 sbsec->flags |= ROOTCONTEXT_MNT; 717 713 } 718 714 if (opts->defcontext) { 719 - rc = parse_sid(sb, opts->defcontext, &defcontext_sid); 715 + rc = parse_sid(sb, opts->defcontext, &defcontext_sid, 716 + GFP_KERNEL); 720 717 if (rc) 721 718 goto out; 722 719 if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, ··· 2707 2702 return (sbsec->flags & SE_MNTMASK) ? 1 : 0; 2708 2703 2709 2704 if (opts->fscontext) { 2710 - rc = parse_sid(sb, opts->fscontext, &sid); 2705 + rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT); 2711 2706 if (rc) 2712 2707 return 1; 2713 2708 if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid)) 2714 2709 return 1; 2715 2710 } 2716 2711 if (opts->context) { 2717 - rc = parse_sid(sb, opts->context, &sid); 2712 + rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT); 2718 2713 if (rc) 2719 2714 return 1; 2720 2715 if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid)) ··· 2724 2719 struct inode_security_struct *root_isec; 2725 2720 2726 2721 root_isec = backing_inode_security(sb->s_root); 2727 - rc = parse_sid(sb, opts->rootcontext, &sid); 2722 + rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT); 2728 2723 if (rc) 2729 2724 return 1; 2730 2725 if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid)) 2731 2726 return 1; 2732 2727 } 2733 2728 if (opts->defcontext) { 2734 - rc = parse_sid(sb, opts->defcontext, &sid); 2729 + rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT); 2735 2730 if (rc) 2736 2731 return 1; 2737 2732 if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid)) ··· 2754 2749 return 0; 2755 2750 2756 2751 if (opts->fscontext) { 2757 - rc = parse_sid(sb, opts->fscontext, &sid); 2752 + rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL); 2758 2753 if (rc) 2759 2754 return rc; 2760 2755 if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid)) 2761 2756 goto out_bad_option; 2762 2757 } 2763 2758 if (opts->context) { 2764 - rc = parse_sid(sb, opts->context, &sid); 2759 + rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL); 2765 2760 if (rc) 2766 2761 return rc; 2767 2762 if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid)) ··· 2770 2765 if (opts->rootcontext) { 2771 2766 struct inode_security_struct *root_isec; 2772 2767 root_isec = backing_inode_security(sb->s_root); 2773 - rc = parse_sid(sb, opts->rootcontext, &sid); 2768 + rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL); 2774 2769 if (rc) 2775 2770 return rc; 2776 2771 if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid)) 2777 2772 goto out_bad_option; 2778 2773 } 2779 2774 if (opts->defcontext) { 2780 - rc = parse_sid(sb, opts->defcontext, &sid); 2775 + rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL); 2781 2776 if (rc) 2782 2777 return rc; 2783 2778 if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid)) ··· 5785 5780 struct sk_security_struct *sksec; 5786 5781 struct common_audit_data ad; 5787 5782 struct lsm_network_audit net = {0,}; 5788 - u8 proto; 5783 + u8 proto = 0; 5789 5784 5790 5785 sk = skb_to_full_sk(skb); 5791 5786 if (sk == NULL)
+14 -17
security/tomoyo/util.c
··· 1051 1051 return false; 1052 1052 if (!domain) 1053 1053 return true; 1054 + if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED])) 1055 + return false; 1054 1056 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list, 1055 1057 srcu_read_lock_held(&tomoyo_ss)) { 1056 1058 u16 perm; 1057 - u8 i; 1058 1059 1059 1060 if (ptr->is_deleted) 1060 1061 continue; ··· 1066 1065 */ 1067 1066 switch (ptr->type) { 1068 1067 case TOMOYO_TYPE_PATH_ACL: 1069 - data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm); 1068 + perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm); 1070 1069 break; 1071 1070 case TOMOYO_TYPE_PATH2_ACL: 1072 - data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm); 1071 + perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm); 1073 1072 break; 1074 1073 case TOMOYO_TYPE_PATH_NUMBER_ACL: 1075 - data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head) 1074 + perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head) 1076 1075 ->perm); 1077 1076 break; 1078 1077 case TOMOYO_TYPE_MKDEV_ACL: 1079 - data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm); 1078 + perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm); 1080 1079 break; 1081 1080 case TOMOYO_TYPE_INET_ACL: 1082 - data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm); 1081 + perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm); 1083 1082 break; 1084 1083 case TOMOYO_TYPE_UNIX_ACL: 1085 - data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm); 1084 + perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm); 1086 1085 break; 1087 1086 case TOMOYO_TYPE_MANUAL_TASK_ACL: 1088 1087 perm = 0; ··· 1090 1089 default: 1091 1090 perm = 1; 1092 1091 } 1093 - for (i = 0; i < 16; i++) 1094 - if (perm & (1 << i)) 1095 - count++; 1092 + count += hweight16(perm); 1096 1093 } 1097 1094 if (count < tomoyo_profile(domain->ns, domain->profile)-> 1098 1095 pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) 1099 1096 return true; 1100 - if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) { 1101 - domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true; 1102 - /* r->granted = false; */ 1103 - tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]); 1097 + WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true); 1098 + /* r->granted = false; */ 1099 + tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]); 1104 1100 #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING 1105 - pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n", 1106 - domain->domainname->name); 1101 + pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n", 1102 + domain->domainname->name); 1107 1103 #endif 1108 - } 1109 1104 return false; 1110 1105 }
+4
sound/core/jack.c
··· 509 509 return -ENOMEM; 510 510 511 511 jack->id = kstrdup(id, GFP_KERNEL); 512 + if (jack->id == NULL) { 513 + kfree(jack); 514 + return -ENOMEM; 515 + } 512 516 513 517 /* don't creat input device for phantom jack */ 514 518 if (!phantom_jack) {
+1
sound/core/rawmidi.c
··· 447 447 err = -ENOMEM; 448 448 goto __error; 449 449 } 450 + rawmidi_file->user_pversion = 0; 450 451 init_waitqueue_entry(&wait, current); 451 452 add_wait_queue(&rmidi->open_wait, &wait); 452 453 while (1) {
+1 -1
sound/drivers/opl3/opl3_midi.c
··· 397 397 } 398 398 if (instr_4op) { 399 399 vp2 = &opl3->voices[voice + 3]; 400 - if (vp->state > 0) { 400 + if (vp2->state > 0) { 401 401 opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + 402 402 voice_offset + 3); 403 403 reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
+10 -3
sound/hda/intel-sdw-acpi.c
··· 132 132 return AE_NOT_FOUND; 133 133 } 134 134 135 - info->handle = handle; 136 - 137 135 /* 138 136 * On some Intel platforms, multiple children of the HDAS 139 137 * device can be found, but only one of them is the SoundWire ··· 141 143 */ 142 144 if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE) 143 145 return AE_OK; /* keep going */ 146 + 147 + /* found the correct SoundWire controller */ 148 + info->handle = handle; 144 149 145 150 /* device found, stop namespace walk */ 146 151 return AE_CTRL_TERMINATE; ··· 165 164 acpi_status status; 166 165 167 166 info->handle = NULL; 167 + /* 168 + * In the HDAS ACPI scope, 'SNDW' may be either the child of 169 + * 'HDAS' or the grandchild of 'HDAS'. So let's go through 170 + * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW' 171 + * device. 172 + */ 168 173 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, 169 - parent_handle, 1, 174 + parent_handle, 2, 170 175 sdw_intel_acpi_cb, 171 176 NULL, info, NULL); 172 177 if (ACPI_FAILURE(status) || info->handle == NULL)
+15 -6
sound/pci/hda/patch_hdmi.c
··· 2947 2947 2948 2948 /* Intel Haswell and onwards; audio component with eld notifier */ 2949 2949 static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid, 2950 - const int *port_map, int port_num, int dev_num) 2950 + const int *port_map, int port_num, int dev_num, 2951 + bool send_silent_stream) 2951 2952 { 2952 2953 struct hdmi_spec *spec; 2953 2954 int err; ··· 2981 2980 * Enable silent stream feature, if it is enabled via 2982 2981 * module param or Kconfig option 2983 2982 */ 2984 - if (enable_silent_stream) 2983 + if (send_silent_stream) 2985 2984 spec->send_silent_stream = true; 2986 2985 2987 2986 return parse_intel_hdmi(codec); ··· 2989 2988 2990 2989 static int patch_i915_hsw_hdmi(struct hda_codec *codec) 2991 2990 { 2992 - return intel_hsw_common_init(codec, 0x08, NULL, 0, 3); 2991 + return intel_hsw_common_init(codec, 0x08, NULL, 0, 3, 2992 + enable_silent_stream); 2993 2993 } 2994 2994 2995 2995 static int patch_i915_glk_hdmi(struct hda_codec *codec) 2996 2996 { 2997 - return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3); 2997 + /* 2998 + * Silent stream calls audio component .get_power() from 2999 + * .pin_eld_notify(). On GLK this will deadlock in i915 due 3000 + * to the audio vs. CDCLK workaround. 3001 + */ 3002 + return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3, false); 2998 3003 } 2999 3004 3000 3005 static int patch_i915_icl_hdmi(struct hda_codec *codec) ··· 3011 3004 */ 3012 3005 static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb}; 3013 3006 3014 - return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3); 3007 + return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3, 3008 + enable_silent_stream); 3015 3009 } 3016 3010 3017 3011 static int patch_i915_tgl_hdmi(struct hda_codec *codec) ··· 3024 3016 static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; 3025 3017 int ret; 3026 3018 3027 - ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4); 3019 + ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4, 3020 + enable_silent_stream); 3028 3021 if (!ret) { 3029 3022 struct hdmi_spec *spec = codec->spec; 3030 3023
+28 -1
sound/pci/hda/patch_realtek.c
··· 6546 6546 alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs); 6547 6547 } 6548 6548 6549 + static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec, 6550 + const struct hda_fixup *fix, 6551 + int action) 6552 + { 6553 + /* 6554 + * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec, 6555 + * but uses the 0x8686 subproduct id in both cases. The ALC256 codec 6556 + * needs an additional quirk for sound working after suspend and resume. 6557 + */ 6558 + if (codec->core.vendor_id == 0x10ec0256) { 6559 + alc_update_coef_idx(codec, 0x10, 1<<9, 0); 6560 + snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120); 6561 + } else { 6562 + snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c); 6563 + } 6564 + } 6565 + 6549 6566 enum { 6550 6567 ALC269_FIXUP_GPIO2, 6551 6568 ALC269_FIXUP_SONY_VAIO, ··· 6783 6766 ALC256_FIXUP_SET_COEF_DEFAULTS, 6784 6767 ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, 6785 6768 ALC233_FIXUP_NO_AUDIO_JACK, 6769 + ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, 6786 6770 }; 6787 6771 6788 6772 static const struct hda_fixup alc269_fixups[] = { ··· 8508 8490 .type = HDA_FIXUP_FUNC, 8509 8491 .v.func = alc233_fixup_no_audio_jack, 8510 8492 }, 8493 + [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = { 8494 + .type = HDA_FIXUP_FUNC, 8495 + .v.func = alc256_fixup_mic_no_presence_and_resume, 8496 + .chained = true, 8497 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 8498 + }, 8511 8499 }; 8512 8500 8513 8501 static const struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 8684 8660 SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), 8685 8661 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), 8686 8662 SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), 8663 + SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), 8687 8664 SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), 8688 8665 SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), 8689 8666 SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), ··· 8730 8705 SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), 8731 8706 SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), 8732 8707 SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED), 8708 + SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 8733 8709 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 8734 8710 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 8735 8711 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ··· 8855 8829 SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC), 8856 8830 SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 8857 8831 SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 8858 - SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 8832 + SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME), 8859 8833 SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 8860 8834 SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), 8861 8835 SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), ··· 9149 9123 {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, 9150 9124 {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, 9151 9125 {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, 9126 + {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, 9152 9127 {} 9153 9128 }; 9154 9129 #define ALC225_STANDARD_PINS \
+4
sound/soc/codecs/rt5682.c
··· 929 929 unsigned int val, count; 930 930 931 931 if (jack_insert) { 932 + snd_soc_dapm_mutex_lock(dapm); 933 + 932 934 snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, 933 935 RT5682_PWR_VREF2 | RT5682_PWR_MB, 934 936 RT5682_PWR_VREF2 | RT5682_PWR_MB); ··· 981 979 snd_soc_component_update_bits(component, RT5682_MICBIAS_2, 982 980 RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK, 983 981 RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU); 982 + 983 + snd_soc_dapm_mutex_unlock(dapm); 984 984 } else { 985 985 rt5682_enable_push_button_irq(component, false); 986 986 snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+2 -2
sound/soc/codecs/tas2770.c
··· 291 291 ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ | 292 292 TAS2770_TDM_CFG_REG0_31_88_2_96KHZ; 293 293 break; 294 - case 19200: 294 + case 192000: 295 295 ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_48KHZ | 296 296 TAS2770_TDM_CFG_REG0_31_176_4_192KHZ; 297 297 break; 298 - case 17640: 298 + case 176400: 299 299 ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ | 300 300 TAS2770_TDM_CFG_REG0_31_176_4_192KHZ; 301 301 break;
-33
sound/soc/meson/aiu-encoder-i2s.c
··· 18 18 #define AIU_RST_SOFT_I2S_FAST BIT(0) 19 19 20 20 #define AIU_I2S_DAC_CFG_MSB_FIRST BIT(2) 21 - #define AIU_I2S_MISC_HOLD_EN BIT(2) 22 21 #define AIU_CLK_CTRL_I2S_DIV_EN BIT(0) 23 22 #define AIU_CLK_CTRL_I2S_DIV GENMASK(3, 2) 24 23 #define AIU_CLK_CTRL_AOCLK_INVERT BIT(6) ··· 33 34 snd_soc_component_update_bits(component, AIU_CLK_CTRL, 34 35 AIU_CLK_CTRL_I2S_DIV_EN, 35 36 enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0); 36 - } 37 - 38 - static void aiu_encoder_i2s_hold(struct snd_soc_component *component, 39 - bool enable) 40 - { 41 - snd_soc_component_update_bits(component, AIU_I2S_MISC, 42 - AIU_I2S_MISC_HOLD_EN, 43 - enable ? AIU_I2S_MISC_HOLD_EN : 0); 44 - } 45 - 46 - static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd, 47 - struct snd_soc_dai *dai) 48 - { 49 - struct snd_soc_component *component = dai->component; 50 - 51 - switch (cmd) { 52 - case SNDRV_PCM_TRIGGER_START: 53 - case SNDRV_PCM_TRIGGER_RESUME: 54 - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 55 - aiu_encoder_i2s_hold(component, false); 56 - return 0; 57 - 58 - case SNDRV_PCM_TRIGGER_STOP: 59 - case SNDRV_PCM_TRIGGER_SUSPEND: 60 - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 61 - aiu_encoder_i2s_hold(component, true); 62 - return 0; 63 - 64 - default: 65 - return -EINVAL; 66 - } 67 37 } 68 38 69 39 static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component, ··· 321 353 } 322 354 323 355 const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = { 324 - .trigger = aiu_encoder_i2s_trigger, 325 356 .hw_params = aiu_encoder_i2s_hw_params, 326 357 .hw_free = aiu_encoder_i2s_hw_free, 327 358 .set_fmt = aiu_encoder_i2s_set_fmt,
+19
sound/soc/meson/aiu-fifo-i2s.c
··· 20 20 #define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6) 21 21 #define AIU_MEM_I2S_BUF_CNTL_INIT BIT(0) 22 22 #define AIU_RST_SOFT_I2S_FAST BIT(0) 23 + #define AIU_I2S_MISC_HOLD_EN BIT(2) 24 + #define AIU_I2S_MISC_FORCE_LEFT_RIGHT BIT(4) 23 25 24 26 #define AIU_FIFO_I2S_BLOCK 256 25 27 ··· 92 90 unsigned int val; 93 91 int ret; 94 92 93 + snd_soc_component_update_bits(component, AIU_I2S_MISC, 94 + AIU_I2S_MISC_HOLD_EN, 95 + AIU_I2S_MISC_HOLD_EN); 96 + 95 97 ret = aiu_fifo_hw_params(substream, params, dai); 96 98 if (ret) 97 99 return ret; ··· 122 116 val = FIELD_PREP(AIU_MEM_I2S_MASKS_IRQ_BLOCK, val); 123 117 snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS, 124 118 AIU_MEM_I2S_MASKS_IRQ_BLOCK, val); 119 + 120 + /* 121 + * Most (all?) supported SoCs have this bit set by default. The vendor 122 + * driver however sets it manually (depending on the version either 123 + * while un-setting AIU_I2S_MISC_HOLD_EN or right before that). Follow 124 + * the same approach for consistency with the vendor driver. 125 + */ 126 + snd_soc_component_update_bits(component, AIU_I2S_MISC, 127 + AIU_I2S_MISC_FORCE_LEFT_RIGHT, 128 + AIU_I2S_MISC_FORCE_LEFT_RIGHT); 129 + 130 + snd_soc_component_update_bits(component, AIU_I2S_MISC, 131 + AIU_I2S_MISC_HOLD_EN, 0); 125 132 126 133 return 0; 127 134 }
+6
sound/soc/meson/aiu-fifo.c
··· 5 5 6 6 #include <linux/bitfield.h> 7 7 #include <linux/clk.h> 8 + #include <linux/dma-mapping.h> 8 9 #include <sound/pcm_params.h> 9 10 #include <sound/soc.h> 10 11 #include <sound/soc-dai.h> ··· 180 179 struct snd_card *card = rtd->card->snd_card; 181 180 struct aiu_fifo *fifo = dai->playback_dma_data; 182 181 size_t size = fifo->pcm->buffer_bytes_max; 182 + int ret; 183 + 184 + ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); 185 + if (ret) 186 + return ret; 183 187 184 188 snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, 185 189 card->dev, size, size);
+4
sound/soc/sof/intel/pci-tgl.c
··· 112 112 .driver_data = (unsigned long)&adls_desc}, 113 113 { PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */ 114 114 .driver_data = (unsigned long)&adl_desc}, 115 + { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */ 116 + .driver_data = (unsigned long)&adl_desc}, 115 117 { PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */ 118 + .driver_data = (unsigned long)&adl_desc}, 119 + { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */ 116 120 .driver_data = (unsigned long)&adl_desc}, 117 121 { 0, } 118 122 };
+10 -1
sound/soc/tegra/tegra_asoc_machine.c
··· 116 116 SOC_DAPM_PIN_SWITCH("Headset Mic"), 117 117 SOC_DAPM_PIN_SWITCH("Internal Mic 1"), 118 118 SOC_DAPM_PIN_SWITCH("Internal Mic 2"), 119 + SOC_DAPM_PIN_SWITCH("Headphones"), 120 + SOC_DAPM_PIN_SWITCH("Mic Jack"), 119 121 }; 120 122 121 123 int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd) 122 124 { 123 125 struct snd_soc_card *card = rtd->card; 124 126 struct tegra_machine *machine = snd_soc_card_get_drvdata(card); 127 + const char *jack_name; 125 128 int err; 126 129 127 130 if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) { 128 - err = snd_soc_card_jack_new(card, "Headphones Jack", 131 + if (machine->asoc->hp_jack_name) 132 + jack_name = machine->asoc->hp_jack_name; 133 + else 134 + jack_name = "Headphones Jack"; 135 + 136 + err = snd_soc_card_jack_new(card, jack_name, 129 137 SND_JACK_HEADPHONE, 130 138 &tegra_machine_hp_jack, 131 139 tegra_machine_hp_jack_pins, ··· 666 658 static const struct tegra_asoc_data tegra_max98090_data = { 667 659 .mclk_rate = tegra_machine_mclk_rate_12mhz, 668 660 .card = &snd_soc_tegra_max98090, 661 + .hp_jack_name = "Headphones", 669 662 .add_common_dapm_widgets = true, 670 663 .add_common_controls = true, 671 664 .add_common_snd_ops = true,
+1
sound/soc/tegra/tegra_asoc_machine.h
··· 14 14 struct tegra_asoc_data { 15 15 unsigned int (*mclk_rate)(unsigned int srate); 16 16 const char *codec_dev_name; 17 + const char *hp_jack_name; 17 18 struct snd_soc_card *card; 18 19 unsigned int mclk_id; 19 20 bool hp_jack_gpio_active_low;
+9 -4
tools/perf/builtin-inject.c
··· 755 755 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM; 756 756 } 757 757 758 + static int output_fd(struct perf_inject *inject) 759 + { 760 + return inject->in_place_update ? -1 : perf_data__fd(&inject->output); 761 + } 762 + 758 763 static int __cmd_inject(struct perf_inject *inject) 759 764 { 760 765 int ret = -EINVAL; 761 766 struct perf_session *session = inject->session; 762 - struct perf_data *data_out = &inject->output; 763 - int fd = inject->in_place_update ? -1 : perf_data__fd(data_out); 767 + int fd = output_fd(inject); 764 768 u64 output_data_offset; 765 769 766 770 signal(SIGINT, sig_handler); ··· 1019 1015 } 1020 1016 1021 1017 inject.session = __perf_session__new(&data, repipe, 1022 - perf_data__fd(&inject.output), 1018 + output_fd(&inject), 1023 1019 &inject.tool); 1024 1020 if (IS_ERR(inject.session)) { 1025 1021 ret = PTR_ERR(inject.session); ··· 1082 1078 zstd_fini(&(inject.session->zstd_data)); 1083 1079 perf_session__delete(inject.session); 1084 1080 out_close_output: 1085 - perf_data__close(&inject.output); 1081 + if (!inject.in_place_update) 1082 + perf_data__close(&inject.output); 1086 1083 free(inject.itrace_synth_opts.vm_tm_corr_args); 1087 1084 return ret; 1088 1085 }
+5
tools/perf/util/expr.c
··· 12 12 #include "expr-bison.h" 13 13 #include "expr-flex.h" 14 14 #include "smt.h" 15 + #include <linux/err.h> 15 16 #include <linux/kernel.h> 16 17 #include <linux/zalloc.h> 17 18 #include <ctype.h> ··· 300 299 return NULL; 301 300 302 301 ctx->ids = hashmap__new(key_hash, key_equal, NULL); 302 + if (IS_ERR(ctx->ids)) { 303 + free(ctx); 304 + return NULL; 305 + } 303 306 ctx->runtime = 0; 304 307 305 308 return ctx;
+1
tools/testing/selftests/kvm/.gitignore
··· 35 35 /x86_64/vmx_apic_access_test 36 36 /x86_64/vmx_close_while_nested_test 37 37 /x86_64/vmx_dirty_log_test 38 + /x86_64/vmx_invalid_nested_guest_state 38 39 /x86_64/vmx_preemption_timer_test 39 40 /x86_64/vmx_set_nested_state_test 40 41 /x86_64/vmx_tsc_adjust_test
+1
tools/testing/selftests/kvm/Makefile
··· 64 64 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test 65 65 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test 66 66 TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test 67 + TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state 67 68 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test 68 69 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test 69 70 TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
+1 -9
tools/testing/selftests/kvm/include/kvm_util.h
··· 71 71 72 72 #endif 73 73 74 - #if defined(__x86_64__) 75 - unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 76 - #else 77 - static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm) 78 - { 79 - return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 80 - } 81 - #endif 82 - 83 74 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 84 75 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 85 76 ··· 321 330 322 331 unsigned int vm_get_page_size(struct kvm_vm *vm); 323 332 unsigned int vm_get_page_shift(struct kvm_vm *vm); 333 + unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 324 334 uint64_t vm_get_max_gfn(struct kvm_vm *vm); 325 335 int vm_get_fd(struct kvm_vm *vm); 326 336
+5
tools/testing/selftests/kvm/lib/kvm_util.c
··· 2328 2328 return vm->page_shift; 2329 2329 } 2330 2330 2331 + unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm) 2332 + { 2333 + return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 2334 + } 2335 + 2331 2336 uint64_t vm_get_max_gfn(struct kvm_vm *vm) 2332 2337 { 2333 2338 return vm->max_gfn;
+105
tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include "test_util.h" 3 + #include "kvm_util.h" 4 + #include "processor.h" 5 + #include "vmx.h" 6 + 7 + #include <string.h> 8 + #include <sys/ioctl.h> 9 + 10 + #include "kselftest.h" 11 + 12 + #define VCPU_ID 0 13 + #define ARBITRARY_IO_PORT 0x2000 14 + 15 + static struct kvm_vm *vm; 16 + 17 + static void l2_guest_code(void) 18 + { 19 + /* 20 + * Generate an exit to L0 userspace, i.e. main(), via I/O to an 21 + * arbitrary port. 22 + */ 23 + asm volatile("inb %%dx, %%al" 24 + : : [port] "d" (ARBITRARY_IO_PORT) : "rax"); 25 + } 26 + 27 + static void l1_guest_code(struct vmx_pages *vmx_pages) 28 + { 29 + #define L2_GUEST_STACK_SIZE 64 30 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 31 + 32 + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 33 + GUEST_ASSERT(load_vmcs(vmx_pages)); 34 + 35 + /* Prepare the VMCS for L2 execution. */ 36 + prepare_vmcs(vmx_pages, l2_guest_code, 37 + &l2_guest_stack[L2_GUEST_STACK_SIZE]); 38 + 39 + /* 40 + * L2 must be run without unrestricted guest, verify that the selftests 41 + * library hasn't enabled it. Because KVM selftests jump directly to 42 + * 64-bit mode, unrestricted guest support isn't required. 43 + */ 44 + GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) || 45 + !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST)); 46 + 47 + GUEST_ASSERT(!vmlaunch()); 48 + 49 + /* L2 should triple fault after main() stuffs invalid guest state. */ 50 + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT); 51 + GUEST_DONE(); 52 + } 53 + 54 + int main(int argc, char *argv[]) 55 + { 56 + vm_vaddr_t vmx_pages_gva; 57 + struct kvm_sregs sregs; 58 + struct kvm_run *run; 59 + struct ucall uc; 60 + 61 + nested_vmx_check_supported(); 62 + 63 + vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); 64 + 65 + /* Allocate VMX pages and shared descriptors (vmx_pages). */ 66 + vcpu_alloc_vmx(vm, &vmx_pages_gva); 67 + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); 68 + 69 + vcpu_run(vm, VCPU_ID); 70 + 71 + run = vcpu_state(vm, VCPU_ID); 72 + 73 + /* 74 + * The first exit to L0 userspace should be an I/O access from L2. 75 + * Running L1 should launch L2 without triggering an exit to userspace. 76 + */ 77 + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 78 + "Expected KVM_EXIT_IO, got: %u (%s)\n", 79 + run->exit_reason, exit_reason_str(run->exit_reason)); 80 + 81 + TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, 82 + "Expected IN from port %d from L2, got port %d", 83 + ARBITRARY_IO_PORT, run->io.port); 84 + 85 + /* 86 + * Stuff invalid guest state for L2 by making TR unusuable. The next 87 + * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support 88 + * emulating invalid guest state for L2. 89 + */ 90 + memset(&sregs, 0, sizeof(sregs)); 91 + vcpu_sregs_get(vm, VCPU_ID, &sregs); 92 + sregs.tr.unusable = 1; 93 + vcpu_sregs_set(vm, VCPU_ID, &sregs); 94 + 95 + vcpu_run(vm, VCPU_ID); 96 + 97 + switch (get_ucall(vm, VCPU_ID, &uc)) { 98 + case UCALL_DONE: 99 + break; 100 + case UCALL_ABORT: 101 + TEST_FAIL("%s", (const char *)uc.args[0]); 102 + default: 103 + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); 104 + } 105 + }
-17
tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
··· 110 110 ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); 111 111 TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); 112 112 113 - /* testcase 4, set capabilities when we don't have PDCM bit */ 114 - entry_1_0->ecx &= ~X86_FEATURE_PDCM; 115 - vcpu_set_cpuid(vm, VCPU_ID, cpuid); 116 - ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); 117 - TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); 118 - 119 - /* testcase 5, set capabilities when we don't have PMU version bits */ 120 - entry_1_0->ecx |= X86_FEATURE_PDCM; 121 - eax.split.version_id = 0; 122 - entry_1_0->ecx = eax.full; 123 - vcpu_set_cpuid(vm, VCPU_ID, cpuid); 124 - ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); 125 - TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); 126 - 127 - vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0); 128 - ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0); 129 - 130 113 kvm_vm_free(vm); 131 114 }
-1
tools/testing/selftests/net/mptcp/config
··· 9 9 CONFIG_NETFILTER_ADVANCED=y 10 10 CONFIG_NETFILTER_NETLINK=m 11 11 CONFIG_NF_TABLES=m 12 - CONFIG_NFT_COUNTER=m 13 12 CONFIG_NFT_COMPAT=m 14 13 CONFIG_NETFILTER_XTABLES=m 15 14 CONFIG_NETFILTER_XT_MATCH_BPF=m
+4 -2
tools/testing/selftests/net/udpgro_fwd.sh
··· 132 132 local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \ 133 133 sed -e 's/\[//' -e 's/:.*//'` 134 134 if [ $rcv != $pkts ]; then 135 - echo " fail - received $rvs packets, expected $pkts" 135 + echo " fail - received $rcv packets, expected $pkts" 136 136 ret=1 137 137 return 138 138 fi ··· 185 185 IPT=iptables 186 186 SUFFIX=24 187 187 VXDEV=vxlan 188 + PING=ping 188 189 189 190 if [ $family = 6 ]; then 190 191 BM_NET=$BM_NET_V6 ··· 193 192 SUFFIX="64 nodad" 194 193 VXDEV=vxlan6 195 194 IPT=ip6tables 195 + PING="ping6" 196 196 fi 197 197 198 198 echo "IPv$family" ··· 239 237 240 238 # load arp cache before running the test to reduce the amount of 241 239 # stray traffic on top of the UDP tunnel 242 - ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null 240 + ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null 243 241 run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST 244 242 cleanup 245 243
+6 -6
tools/testing/selftests/net/udpgso.c
··· 156 156 }, 157 157 { 158 158 /* send max number of min sized segments */ 159 - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4, 159 + .tlen = UDP_MAX_SEGMENTS, 160 160 .gso_len = 1, 161 - .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4, 161 + .r_num_mss = UDP_MAX_SEGMENTS, 162 162 }, 163 163 { 164 164 /* send max number + 1 of min sized segments: fail */ 165 - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1, 165 + .tlen = UDP_MAX_SEGMENTS + 1, 166 166 .gso_len = 1, 167 167 .tfail = true, 168 168 }, ··· 259 259 }, 260 260 { 261 261 /* send max number of min sized segments */ 262 - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6, 262 + .tlen = UDP_MAX_SEGMENTS, 263 263 .gso_len = 1, 264 - .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6, 264 + .r_num_mss = UDP_MAX_SEGMENTS, 265 265 }, 266 266 { 267 267 /* send max number + 1 of min sized segments: fail */ 268 - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1, 268 + .tlen = UDP_MAX_SEGMENTS + 1, 269 269 .gso_len = 1, 270 270 .tfail = true, 271 271 },
+7 -1
tools/testing/selftests/net/udpgso_bench_tx.c
··· 419 419 420 420 static void parse_opts(int argc, char **argv) 421 421 { 422 + const char *bind_addr = NULL; 422 423 int max_len, hdrlen; 423 424 int c; 424 425 ··· 447 446 cfg_cpu = strtol(optarg, NULL, 0); 448 447 break; 449 448 case 'D': 450 - setup_sockaddr(cfg_family, optarg, &cfg_dst_addr); 449 + bind_addr = optarg; 451 450 break; 452 451 case 'l': 453 452 cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000; ··· 492 491 break; 493 492 } 494 493 } 494 + 495 + if (!bind_addr) 496 + bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0"; 497 + 498 + setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr); 495 499 496 500 if (optind != argc) 497 501 usage(argv[0]);