Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+4202 -2132
+4
.mailmap
··· 48 48 Felix Moeller <felix@derklecks.de> 49 49 Filipe Lautert <filipe@icewall.org> 50 50 Franck Bui-Huu <vagabon.xyz@gmail.com> 51 + Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com> 52 + Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com> 53 + Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com> 51 54 Frank Zago <fzago@systemfabricworks.com> 52 55 Greg Kroah-Hartman <greg@echidna.(none)> 53 56 Greg Kroah-Hartman <gregkh@suse.de> ··· 82 79 Kenneth W Chen <kenneth.w.chen@intel.com> 83 80 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 84 81 Koushik <raghavendra.koushik@neterion.com> 82 + Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> 85 83 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 86 84 Leonid I Ananiev <leonid.i.ananiev@intel.com> 87 85 Linas Vepstas <linas@austin.ibm.com>
+1 -1
Documentation/devicetree/bindings/arc/archs-pct.txt
··· 2 2 3 3 The ARC HS can be configured with a pipeline performance monitor for counting 4 4 CPU and cache events like cache misses and hits. Like conventional PCT there 5 - are 100+ hardware conditions dynamically mapped to upto 32 counters. 5 + are 100+ hardware conditions dynamically mapped to up to 32 counters. 6 6 It also supports overflow interrupts. 7 7 8 8 Required properties:
+1 -1
Documentation/devicetree/bindings/arc/pct.txt
··· 2 2 3 3 The ARC700 can be configured with a pipeline performance monitor for counting 4 4 CPU and cache events like cache misses and hits. Like conventional PCT there 5 - are 100+ hardware conditions dynamically mapped to upto 32 counters 5 + are 100+ hardware conditions dynamically mapped to up to 32 counters 6 6 7 7 Note that: 8 8 * The ARC 700 PCT does not support interrupts; although HW events may be
-1
Documentation/devicetree/bindings/arm/cpus.txt
··· 192 192 can be one of: 193 193 "allwinner,sun6i-a31" 194 194 "allwinner,sun8i-a23" 195 - "arm,psci" 196 195 "arm,realview-smp" 197 196 "brcm,bcm-nsp-smp" 198 197 "brcm,brahma-b15"
+2 -2
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
··· 6 6 Required properties : 7 7 8 8 - reg : Offset and length of the register set for the device 9 - - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or 10 - "rockchip,rk3288-i2c". 9 + - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c", 10 + "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c". 11 11 - interrupts : interrupt number 12 12 - clocks : parent clock 13 13
+5 -2
Documentation/devicetree/bindings/net/mediatek-net.txt
··· 9 9 Required properties: 10 10 - compatible: Should be "mediatek,mt7623-eth" 11 11 - reg: Address and length of the register set for the device 12 - - interrupts: Should contain the frame engines interrupt 12 + - interrupts: Should contain the three frame engines interrupts in numeric 13 + order. These are fe_int0, fe_int1 and fe_int2. 13 14 - clocks: the clock used by the core 14 15 - clock-names: the names of the clock listed in the clocks property. These are 15 16 "ethif", "esw", "gp2", "gp1" ··· 43 42 <&ethsys CLK_ETHSYS_GP2>, 44 43 <&ethsys CLK_ETHSYS_GP1>; 45 44 clock-names = "ethif", "esw", "gp2", "gp1"; 46 - interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>; 45 + interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW 46 + GIC_SPI 199 IRQ_TYPE_LEVEL_LOW 47 + GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>; 47 48 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 48 49 resets = <&ethsys MT2701_ETHSYS_ETH_RST>; 49 50 reset-names = "eth";
+11 -7
Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
··· 8 8 of memory mapped region. 9 9 - clock-names: from common clock binding: 10 10 Required elements: "24m" 11 - - rockchip,grf: phandle to the syscon managing the "general register files" 12 11 - #phy-cells : from the generic PHY bindings, must be 0; 13 12 14 13 Example: 15 14 16 - edp_phy: edp-phy { 17 - compatible = "rockchip,rk3288-dp-phy"; 18 - rockchip,grf = <&grf>; 19 - clocks = <&cru SCLK_EDP_24M>; 20 - clock-names = "24m"; 21 - #phy-cells = <0>; 15 + grf: syscon@ff770000 { 16 + compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd"; 17 + 18 + ... 19 + 20 + edp_phy: edp-phy { 21 + compatible = "rockchip,rk3288-dp-phy"; 22 + clocks = <&cru SCLK_EDP_24M>; 23 + clock-names = "24m"; 24 + #phy-cells = <0>; 25 + }; 22 26 };
+14 -8
Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
··· 3 3 4 4 Required properties: 5 5 - compatible: rockchip,rk3399-emmc-phy 6 - - rockchip,grf : phandle to the syscon managing the "general 7 - register files" 8 6 - #phy-cells: must be 0 9 - - reg: PHY configure reg address offset in "general 7 + - reg: PHY register address offset and length in "general 10 8 register files" 11 9 12 10 Example: 13 11 14 - emmcphy: phy { 15 - compatible = "rockchip,rk3399-emmc-phy"; 16 - rockchip,grf = <&grf>; 17 - reg = <0xf780>; 18 - #phy-cells = <0>; 12 + 13 + grf: syscon@ff770000 { 14 + compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd"; 15 + #address-cells = <1>; 16 + #size-cells = <1>; 17 + 18 + ... 19 + 20 + emmcphy: phy@f780 { 21 + compatible = "rockchip,rk3399-emmc-phy"; 22 + reg = <0xf780 0x20>; 23 + #phy-cells = <0>; 24 + }; 19 25 };
+4 -3
Documentation/devicetree/bindings/rtc/s3c-rtc.txt
··· 15 15 is the rtc tick interrupt. The number of cells representing a interrupt 16 16 depends on the parent interrupt controller. 17 17 - clocks: Must contain a list of phandle and clock specifier for the rtc 18 - and source clocks. 19 - - clock-names: Must contain "rtc" and "rtc_src" entries sorted in the 20 - same order as the clocks property. 18 + clock and in the case of a s3c6410 compatible controller, also 19 + a source clock. 20 + - clock-names: Must contain "rtc" and for a s3c6410 compatible controller, 21 + a "rtc_src" sorted in the same order as the clocks property. 21 22 22 23 Example: 23 24
+4
Documentation/input/event-codes.txt
··· 173 173 proximity of the device and while the value of the BTN_TOUCH code is 0. If 174 174 the input device may be used freely in three dimensions, consider ABS_Z 175 175 instead. 176 + - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable 177 + proximity and set to 0 when the tool leaves detectable proximity. 178 + BTN_TOOL_<name> signals the type of tool that is currently detected by the 179 + hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH. 176 180 177 181 * ABS_MT_<name>: 178 182 - Used to describe multitouch input events. Please see
+9 -8
Documentation/sysctl/vm.txt
··· 581 581 "Zone Order" orders the zonelists by zone type, then by node within each 582 582 zone. Specify "[Zz]one" for zone order. 583 583 584 - Specify "[Dd]efault" to request automatic configuration. Autoconfiguration 585 - will select "node" order in following case. 586 - (1) if the DMA zone does not exist or 587 - (2) if the DMA zone comprises greater than 50% of the available memory or 588 - (3) if any node's DMA zone comprises greater than 70% of its local memory and 589 - the amount of local memory is big enough. 584 + Specify "[Dd]efault" to request automatic configuration. 590 585 591 - Otherwise, "zone" order will be selected. Default order is recommended unless 592 - this is causing problems for your system/application. 586 + On 32-bit, the Normal zone needs to be preserved for allocations accessible 587 + by the kernel, so "zone" order will be selected. 588 + 589 + On 64-bit, devices that require DMA32/DMA are relatively rare, so "node" 590 + order will be selected. 591 + 592 + Default order is recommended unless this is causing problems for your 593 + system/application. 593 594 594 595 ============================================================== 595 596
+3 -3
Documentation/x86/x86_64/mm.txt
··· 19 19 ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space 20 20 ... unused hole ... 21 21 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 22 - ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space 22 + ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space 23 23 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls 24 24 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 25 25 ··· 31 31 the processes using the page fault handler, with init_level4_pgt as 32 32 reference. 33 33 34 - Current X86-64 implementations only support 40 bits of address space, 35 - but we support up to 46 bits. This expands into MBZ space in the page tables. 34 + Current X86-64 implementations support up to 46 bits of address space (64 TB), 35 + which is our current limit. This expands into MBZ space in the page tables. 36 36 37 37 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual 38 38 memory window (this size is arbitrary, it can be raised later if needed).
+12 -3
MAINTAINERS
··· 6027 6027 6028 6028 ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR 6029 6029 M: Or Gerlitz <ogerlitz@mellanox.com> 6030 - M: Sagi Grimberg <sagig@mellanox.com> 6030 + M: Sagi Grimberg <sagi@grimberg.me> 6031 6031 M: Roi Dayan <roid@mellanox.com> 6032 6032 L: linux-rdma@vger.kernel.org 6033 6033 S: Supported ··· 6037 6037 F: drivers/infiniband/ulp/iser/ 6038 6038 6039 6039 ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 6040 - M: Sagi Grimberg <sagig@mellanox.com> 6040 + M: Sagi Grimberg <sagi@grimberg.me> 6041 6041 T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 6042 6042 L: linux-rdma@vger.kernel.org 6043 6043 L: target-devel@vger.kernel.org ··· 6400 6400 F: mm/kmemleak-test.c 6401 6401 6402 6402 KPROBES 6403 - M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 6403 + M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> 6404 6404 M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6405 6405 M: "David S. Miller" <davem@davemloft.net> 6406 6406 M: Masami Hiramatsu <mhiramat@kernel.org> ··· 11070 11070 S: Maintained 11071 11071 F: drivers/clk/ti/ 11072 11072 F: include/linux/clk/ti.h 11073 + 11074 + TI ETHERNET SWITCH DRIVER (CPSW) 11075 + M: Mugunthan V N <mugunthanvnm@ti.com> 11076 + R: Grygorii Strashko <grygorii.strashko@ti.com> 11077 + L: linux-omap@vger.kernel.org 11078 + L: netdev@vger.kernel.org 11079 + S: Maintained 11080 + F: drivers/net/ethernet/ti/cpsw* 11081 + F: drivers/net/ethernet/ti/davinci* 11073 11082 11074 11083 TI FLASH MEDIA INTERFACE DRIVER 11075 11084 M: Alex Dubov <oakad@yahoo.com>
+4 -3
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 5 - NAME = Blurry Fish Butt 4 + EXTRAVERSION = -rc6 5 + NAME = Charred Weasel 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help" ··· 1008 1008 prepare: prepare0 prepare-objtool 1009 1009 1010 1010 ifdef CONFIG_STACK_VALIDATION 1011 - has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0) 1011 + has_libelf := $(call try-run,\ 1012 + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) 1012 1013 ifeq ($(has_libelf),1) 1013 1014 objtool_target := tools/objtool FORCE 1014 1015 else
+2
arch/arc/Kconfig
··· 35 35 select NO_BOOTMEM 36 36 select OF 37 37 select OF_EARLY_FLATTREE 38 + select OF_RESERVED_MEM 38 39 select PERF_USE_VMALLOC 39 40 select HAVE_DEBUG_STACKOVERFLOW 41 + select HAVE_GENERIC_DMA_COHERENT 40 42 41 43 config MIGHT_HAVE_PCI 42 44 bool
+35 -1
arch/arc/include/asm/irqflags-arcv2.h
··· 18 18 #define STATUS_AD_MASK (1<<STATUS_AD_BIT) 19 19 #define STATUS_IE_MASK (1<<STATUS_IE_BIT) 20 20 21 + /* status32 Bits as encoded/expected by CLRI/SETI */ 22 + #define CLRI_STATUS_IE_BIT 4 23 + 24 + #define CLRI_STATUS_E_MASK 0xF 25 + #define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT) 26 + 21 27 #define AUX_USER_SP 0x00D 22 28 #define AUX_IRQ_CTRL 0x00E 23 29 #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ ··· 106 100 : 107 101 : "memory"); 108 102 103 + /* To be compatible with irq_save()/irq_restore() 104 + * encode the irq bits as expected by CLRI/SETI 105 + * (this was needed to make CONFIG_TRACE_IRQFLAGS work) 106 + */ 107 + temp = (1 << 5) | 108 + ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) | 109 + (temp & CLRI_STATUS_E_MASK); 109 110 return temp; 110 111 } 111 112 ··· 121 108 */ 122 109 static inline int arch_irqs_disabled_flags(unsigned long flags) 123 110 { 124 - return !(flags & (STATUS_IE_MASK)); 111 + return !(flags & CLRI_STATUS_IE_MASK); 125 112 } 126 113 127 114 static inline int arch_irqs_disabled(void) ··· 141 128 142 129 #else 143 130 131 + #ifdef CONFIG_TRACE_IRQFLAGS 132 + 133 + .macro TRACE_ASM_IRQ_DISABLE 134 + bl trace_hardirqs_off 135 + .endm 136 + 137 + .macro TRACE_ASM_IRQ_ENABLE 138 + bl trace_hardirqs_on 139 + .endm 140 + 141 + #else 142 + 143 + .macro TRACE_ASM_IRQ_DISABLE 144 + .endm 145 + 146 + .macro TRACE_ASM_IRQ_ENABLE 147 + .endm 148 + 149 + #endif 144 150 .macro IRQ_DISABLE scratch 145 151 clri 152 + TRACE_ASM_IRQ_DISABLE 146 153 .endm 147 154 148 155 .macro IRQ_ENABLE scratch 156 + TRACE_ASM_IRQ_ENABLE 149 157 seti 150 158 .endm 151 159
+9 -1
arch/arc/kernel/entry-arcv2.S
··· 69 69 70 70 clri ; To make status32.IE agree with CPU internal state 71 71 72 - lr r0, [ICAUSE] 72 + #ifdef CONFIG_TRACE_IRQFLAGS 73 + TRACE_ASM_IRQ_DISABLE 74 + #endif 73 75 76 + lr r0, [ICAUSE] 74 77 mov blink, ret_from_exception 75 78 76 79 b.d arch_do_IRQ ··· 171 168 ; All 2 entry points to here already disable interrupts 172 169 173 170 .Lrestore_regs: 171 + 172 + # Interrpts are actually disabled from this point on, but will get 173 + # reenabled after we return from interrupt/exception. 174 + # But irq tracer needs to be told now... 175 + TRACE_ASM_IRQ_ENABLE 174 176 175 177 ld r0, [sp, PT_status32] ; U/K mode at time of entry 176 178 lr r10, [AUX_IRQ_ACT]
+3
arch/arc/kernel/entry-compact.S
··· 341 341 342 342 .Lrestore_regs: 343 343 344 + # Interrpts are actually disabled from this point on, but will get 345 + # reenabled after we return from interrupt/exception. 346 + # But irq tracer needs to be told now... 344 347 TRACE_ASM_IRQ_ENABLE 345 348 346 349 lr r10, [status32]
+4
arch/arc/mm/init.c
··· 13 13 #ifdef CONFIG_BLK_DEV_INITRD 14 14 #include <linux/initrd.h> 15 15 #endif 16 + #include <linux/of_fdt.h> 16 17 #include <linux/swap.h> 17 18 #include <linux/module.h> 18 19 #include <linux/highmem.h> ··· 136 135 if (initrd_start) 137 136 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 138 137 #endif 138 + 139 + early_init_fdt_reserve_self(); 140 + early_init_fdt_scan_reserved_mem(); 139 141 140 142 memblock_dump_all(); 141 143
+1 -1
arch/arm/boot/dts/am33xx.dtsi
··· 860 860 ti,no-idle-on-init; 861 861 reg = <0x50000000 0x2000>; 862 862 interrupts = <100>; 863 - dmas = <&edma 52>; 863 + dmas = <&edma 52 0>; 864 864 dma-names = "rxtx"; 865 865 gpmc,num-cs = <7>; 866 866 gpmc,num-waitpins = <2>;
+1 -1
arch/arm/boot/dts/am4372.dtsi
··· 884 884 gpmc: gpmc@50000000 { 885 885 compatible = "ti,am3352-gpmc"; 886 886 ti,hwmods = "gpmc"; 887 - dmas = <&edma 52>; 887 + dmas = <&edma 52 0>; 888 888 dma-names = "rxtx"; 889 889 clocks = <&l3s_gclk>; 890 890 clock-names = "fck";
-17
arch/arm/boot/dts/am57xx-beagle-x15.dts
··· 99 99 #cooling-cells = <2>; 100 100 }; 101 101 102 - extcon_usb1: extcon_usb1 { 103 - compatible = "linux,extcon-usb-gpio"; 104 - id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>; 105 - pinctrl-names = "default"; 106 - pinctrl-0 = <&extcon_usb1_pins>; 107 - }; 108 - 109 102 hdmi0: connector { 110 103 compatible = "hdmi-connector"; 111 104 label = "hdmi"; ··· 339 346 usb1_pins: pinmux_usb1_pins { 340 347 pinctrl-single,pins = < 341 348 DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */ 342 - >; 343 - }; 344 - 345 - extcon_usb1_pins: extcon_usb1_pins { 346 - pinctrl-single,pins = < 347 - DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */ 348 349 >; 349 350 }; 350 351 ··· 691 704 dr_mode = "host"; 692 705 pinctrl-names = "default"; 693 706 pinctrl-0 = <&usb1_pins>; 694 - }; 695 - 696 - &omap_dwc3_1 { 697 - extcon = <&extcon_usb1>; 698 707 }; 699 708 700 709 &omap_dwc3_2 {
+212 -31
arch/arm/boot/dts/dm814x-clocks.dtsi
··· 4 4 * published by the Free Software Foundation. 5 5 */ 6 6 7 + &pllss { 8 + /* 9 + * See TRM "2.6.10 Connected outputso DPLLS" and 10 + * "2.6.11 Connected Outputs of DPLLJ". Only clkout is 11 + * connected except for hdmi and usb. 12 + */ 13 + adpll_mpu_ck: adpll@40 { 14 + #clock-cells = <1>; 15 + compatible = "ti,dm814-adpll-s-clock"; 16 + reg = <0x40 0x40>; 17 + clocks = <&devosc_ck &devosc_ck &devosc_ck>; 18 + clock-names = "clkinp", "clkinpulow", "clkinphif"; 19 + clock-output-names = "481c5040.adpll.dcoclkldo", 20 + "481c5040.adpll.clkout", 21 + "481c5040.adpll.clkoutx2", 22 + "481c5040.adpll.clkouthif"; 23 + }; 24 + 25 + adpll_dsp_ck: adpll@80 { 26 + #clock-cells = <1>; 27 + compatible = "ti,dm814-adpll-lj-clock"; 28 + reg = <0x80 0x30>; 29 + clocks = <&devosc_ck &devosc_ck>; 30 + clock-names = "clkinp", "clkinpulow"; 31 + clock-output-names = "481c5080.adpll.dcoclkldo", 32 + "481c5080.adpll.clkout", 33 + "481c5080.adpll.clkoutldo"; 34 + }; 35 + 36 + adpll_sgx_ck: adpll@b0 { 37 + #clock-cells = <1>; 38 + compatible = "ti,dm814-adpll-lj-clock"; 39 + reg = <0xb0 0x30>; 40 + clocks = <&devosc_ck &devosc_ck>; 41 + clock-names = "clkinp", "clkinpulow"; 42 + clock-output-names = "481c50b0.adpll.dcoclkldo", 43 + "481c50b0.adpll.clkout", 44 + "481c50b0.adpll.clkoutldo"; 45 + }; 46 + 47 + adpll_hdvic_ck: adpll@e0 { 48 + #clock-cells = <1>; 49 + compatible = "ti,dm814-adpll-lj-clock"; 50 + reg = <0xe0 0x30>; 51 + clocks = <&devosc_ck &devosc_ck>; 52 + clock-names = "clkinp", "clkinpulow"; 53 + clock-output-names = "481c50e0.adpll.dcoclkldo", 54 + "481c50e0.adpll.clkout", 55 + "481c50e0.adpll.clkoutldo"; 56 + }; 57 + 58 + adpll_l3_ck: adpll@110 { 59 + #clock-cells = <1>; 60 + compatible = "ti,dm814-adpll-lj-clock"; 61 + reg = <0x110 0x30>; 62 + clocks = <&devosc_ck &devosc_ck>; 63 + clock-names = "clkinp", "clkinpulow"; 64 + clock-output-names = "481c5110.adpll.dcoclkldo", 65 + "481c5110.adpll.clkout", 66 + "481c5110.adpll.clkoutldo"; 67 + }; 68 + 69 + adpll_isp_ck: adpll@140 { 70 + #clock-cells = <1>; 71 + compatible = "ti,dm814-adpll-lj-clock"; 72 + reg = <0x140 0x30>; 73 + clocks = <&devosc_ck &devosc_ck>; 74 + clock-names = "clkinp", "clkinpulow"; 75 + clock-output-names = "481c5140.adpll.dcoclkldo", 76 + "481c5140.adpll.clkout", 77 + "481c5140.adpll.clkoutldo"; 78 + }; 79 + 80 + adpll_dss_ck: adpll@170 { 81 + #clock-cells = <1>; 82 + compatible = "ti,dm814-adpll-lj-clock"; 83 + reg = <0x170 0x30>; 84 + clocks = <&devosc_ck &devosc_ck>; 85 + clock-names = "clkinp", "clkinpulow"; 86 + clock-output-names = "481c5170.adpll.dcoclkldo", 87 + "481c5170.adpll.clkout", 88 + "481c5170.adpll.clkoutldo"; 89 + }; 90 + 91 + adpll_video0_ck: adpll@1a0 { 92 + #clock-cells = <1>; 93 + compatible = "ti,dm814-adpll-lj-clock"; 94 + reg = <0x1a0 0x30>; 95 + clocks = <&devosc_ck &devosc_ck>; 96 + clock-names = "clkinp", "clkinpulow"; 97 + clock-output-names = "481c51a0.adpll.dcoclkldo", 98 + "481c51a0.adpll.clkout", 99 + "481c51a0.adpll.clkoutldo"; 100 + }; 101 + 102 + adpll_video1_ck: adpll@1d0 { 103 + #clock-cells = <1>; 104 + compatible = "ti,dm814-adpll-lj-clock"; 105 + reg = <0x1d0 0x30>; 106 + clocks = <&devosc_ck &devosc_ck>; 107 + clock-names = "clkinp", "clkinpulow"; 108 + clock-output-names = "481c51d0.adpll.dcoclkldo", 109 + "481c51d0.adpll.clkout", 110 + "481c51d0.adpll.clkoutldo"; 111 + }; 112 + 113 + adpll_hdmi_ck: adpll@200 { 114 + #clock-cells = <1>; 115 + compatible = "ti,dm814-adpll-lj-clock"; 116 + reg = <0x200 0x30>; 117 + clocks = <&devosc_ck &devosc_ck>; 118 + clock-names = "clkinp", "clkinpulow"; 119 + clock-output-names = "481c5200.adpll.dcoclkldo", 120 + "481c5200.adpll.clkout", 121 + "481c5200.adpll.clkoutldo"; 122 + }; 123 + 124 + adpll_audio_ck: adpll@230 { 125 + #clock-cells = <1>; 126 + compatible = "ti,dm814-adpll-lj-clock"; 127 + reg = <0x230 0x30>; 128 + clocks = <&devosc_ck &devosc_ck>; 129 + clock-names = "clkinp", "clkinpulow"; 130 + clock-output-names = "481c5230.adpll.dcoclkldo", 131 + "481c5230.adpll.clkout", 132 + "481c5230.adpll.clkoutldo"; 133 + }; 134 + 135 + adpll_usb_ck: adpll@260 { 136 + #clock-cells = <1>; 137 + compatible = "ti,dm814-adpll-lj-clock"; 138 + reg = <0x260 0x30>; 139 + clocks = <&devosc_ck &devosc_ck>; 140 + clock-names = "clkinp", "clkinpulow"; 141 + clock-output-names = "481c5260.adpll.dcoclkldo", 142 + "481c5260.adpll.clkout", 143 + "481c5260.adpll.clkoutldo"; 144 + }; 145 + 146 + adpll_ddr_ck: adpll@290 { 147 + #clock-cells = <1>; 148 + compatible = "ti,dm814-adpll-lj-clock"; 149 + reg = <0x290 0x30>; 150 + clocks = <&devosc_ck &devosc_ck>; 151 + clock-names = "clkinp", "clkinpulow"; 152 + clock-output-names = "481c5290.adpll.dcoclkldo", 153 + "481c5290.adpll.clkout", 154 + "481c5290.adpll.clkoutldo"; 155 + }; 156 + }; 157 + 7 158 &pllss_clocks { 8 159 timer1_fck: timer1_fck { 9 160 #clock-cells = <0>; ··· 172 21 &aud_clkin2_ck &devosc_ck &auxosc_ck &tclkin_ck>; 173 22 ti,bit-shift = <6>; 174 23 reg = <0x2e0>; 24 + }; 25 + 26 + /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */ 27 + cpsw_cpts_rft_clk: cpsw_cpts_rft_clk { 28 + #clock-cells = <0>; 29 + compatible = "ti,mux-clock"; 30 + clocks = <&adpll_video0_ck 1 31 + &adpll_video1_ck 1 32 + &adpll_audio_ck 1>; 33 + ti,bit-shift = <1>; 34 + reg = <0x2e8>; 35 + }; 36 + 37 + /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */ 38 + cpsw_125mhz_gclk: cpsw_125mhz_gclk { 39 + #clock-cells = <0>; 40 + compatible = "fixed-clock"; 41 + clock-frequency = <125000000>; 175 42 }; 176 43 177 44 sysclk18_ck: sysclk18_ck { ··· 248 79 compatible = "fixed-clock"; 249 80 clock-frequency = <1000000000>; 250 81 }; 251 - 252 - sysclk4_ck: sysclk4_ck { 253 - #clock-cells = <0>; 254 - compatible = "fixed-clock"; 255 - clock-frequency = <222000000>; 256 - }; 257 - 258 - sysclk6_ck: sysclk6_ck { 259 - #clock-cells = <0>; 260 - compatible = "fixed-clock"; 261 - clock-frequency = <100000000>; 262 - }; 263 - 264 - sysclk10_ck: sysclk10_ck { 265 - #clock-cells = <0>; 266 - compatible = "fixed-clock"; 267 - clock-frequency = <48000000>; 268 - }; 269 - 270 - cpsw_125mhz_gclk: cpsw_125mhz_gclk { 271 - #clock-cells = <0>; 272 - compatible = "fixed-clock"; 273 - clock-frequency = <125000000>; 274 - }; 275 - 276 - cpsw_cpts_rft_clk: cpsw_cpts_rft_clk { 277 - #clock-cells = <0>; 278 - compatible = "fixed-clock"; 279 - clock-frequency = <250000000>; 280 - }; 281 - 282 82 }; 283 83 284 84 &prcm_clocks { ··· 274 136 clocks = <&devosc_ck>; 275 137 clock-mult = <128>; 276 138 clock-div = <78125>; 139 + }; 140 + 141 + /* L4_HS 220 MHz*/ 142 + sysclk4_ck: sysclk4_ck { 143 + #clock-cells = <0>; 144 + compatible = "ti,fixed-factor-clock"; 145 + clocks = <&adpll_l3_ck 1>; 146 + ti,clock-mult = <1>; 147 + ti,clock-div = <1>; 148 + }; 149 + 150 + /* L4_FWCFG */ 151 + sysclk5_ck: sysclk5_ck { 152 + #clock-cells = <0>; 153 + compatible = "ti,fixed-factor-clock"; 154 + clocks = <&adpll_l3_ck 1>; 155 + ti,clock-mult = <1>; 156 + ti,clock-div = <2>; 157 + }; 158 + 159 + /* L4_LS 110 MHz */ 160 + sysclk6_ck: sysclk6_ck { 161 + #clock-cells = <0>; 162 + compatible = "ti,fixed-factor-clock"; 163 + clocks = <&adpll_l3_ck 1>; 164 + ti,clock-mult = <1>; 165 + ti,clock-div = <2>; 166 + }; 167 + 168 + sysclk8_ck: sysclk8_ck { 169 + #clock-cells = <0>; 170 + compatible = "ti,fixed-factor-clock"; 171 + clocks = <&adpll_usb_ck 1>; 172 + ti,clock-mult = <1>; 173 + ti,clock-div = <1>; 174 + }; 175 + 176 + sysclk10_ck: sysclk10_ck { 177 + compatible = "ti,divider-clock"; 178 + reg = <0x324>; 179 + ti,max-div = <7>; 180 + #clock-cells = <0>; 181 + clocks = <&adpll_usb_ck 1>; 277 182 }; 278 183 279 184 aud_clkin0_ck: aud_clkin0_ck {
+26
arch/arm/boot/dts/dra62x-clocks.dtsi
··· 6 6 7 7 #include "dm814x-clocks.dtsi" 8 8 9 + /* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */ 10 + &adpll_hdvic_ck { 11 + status = "disabled"; 12 + }; 13 + 14 + &adpll_l3_ck { 15 + status = "disabled"; 16 + }; 17 + 18 + &adpll_dss_ck { 19 + status = "disabled"; 20 + }; 21 + 22 + /* Compared to dm814x, dra62x has interconnect clocks on isp PLL */ 23 + &sysclk4_ck { 24 + clocks = <&adpll_isp_ck 1>; 25 + }; 26 + 27 + &sysclk5_ck { 28 + clocks = <&adpll_isp_ck 1>; 29 + }; 30 + 31 + &sysclk6_ck { 32 + clocks = <&adpll_isp_ck 1>; 33 + }; 34 + 9 35 /* 10 36 * Compared to dm814x, dra62x has different shifts and more mux options. 11 37 * Please add the extra options for ysclk_14 and 16 if really needed.
+17 -1
arch/arm/boot/dts/dra7xx-clocks.dtsi
··· 98 98 clock-frequency = <32768>; 99 99 }; 100 100 101 - sys_32k_ck: sys_32k_ck { 101 + sys_clk32_crystal_ck: sys_clk32_crystal_ck { 102 102 #clock-cells = <0>; 103 103 compatible = "fixed-clock"; 104 104 clock-frequency = <32768>; 105 + }; 106 + 107 + sys_clk32_pseudo_ck: sys_clk32_pseudo_ck { 108 + #clock-cells = <0>; 109 + compatible = "fixed-factor-clock"; 110 + clocks = <&sys_clkin1>; 111 + clock-mult = <1>; 112 + clock-div = <610>; 105 113 }; 106 114 107 115 virt_12000000_ck: virt_12000000_ck { ··· 2177 2169 clocks = <&l4_root_clk_div>; 2178 2170 ti,bit-shift = <22>; 2179 2171 reg = <0x0558>; 2172 + }; 2173 + 2174 + sys_32k_ck: sys_32k_ck { 2175 + #clock-cells = <0>; 2176 + compatible = "ti,mux-clock"; 2177 + clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>; 2178 + ti,bit-shift = <8>; 2179 + reg = <0x6c4>; 2180 2180 }; 2181 2181 };
+1 -13
arch/arm/boot/dts/qcom-msm8974.dtsi
··· 1 1 /dts-v1/; 2 2 3 - #include <dt-bindings/interrupt-controller/arm-gic.h> 3 + #include <dt-bindings/interrupt-controller/irq.h> 4 4 #include <dt-bindings/clock/qcom,gcc-msm8974.h> 5 5 #include "skeleton.dtsi" 6 6 ··· 460 460 clock-names = "core", "iface"; 461 461 #address-cells = <1>; 462 462 #size-cells = <0>; 463 - dmas = <&blsp2_dma 20>, <&blsp2_dma 21>; 464 - dma-names = "tx", "rx"; 465 463 }; 466 464 467 465 spmi_bus: spmi@fc4cf000 { ··· 476 478 #size-cells = <0>; 477 479 interrupt-controller; 478 480 #interrupt-cells = <4>; 479 - }; 480 - 481 - blsp2_dma: dma-controller@f9944000 { 482 - compatible = "qcom,bam-v1.4.0"; 483 - reg = <0xf9944000 0x19000>; 484 - interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>; 485 - clocks = <&gcc GCC_BLSP2_AHB_CLK>; 486 - clock-names = "bam_clk"; 487 - #dma-cells = <1>; 488 - qcom,ee = <0>; 489 481 }; 490 482 }; 491 483
+1
arch/arm/boot/dts/r8a7791-koelsch.dts
··· 661 661 }; 662 662 663 663 &pcie_bus_clk { 664 + clock-frequency = <100000000>; 664 665 status = "okay"; 665 666 }; 666 667
+1 -13
arch/arm/boot/dts/r8a7791-porter.dts
··· 143 143 }; 144 144 145 145 &pfc { 146 - pinctrl-0 = <&scif_clk_pins>; 147 - pinctrl-names = "default"; 148 - 149 146 scif0_pins: serial0 { 150 147 renesas,groups = "scif0_data_d"; 151 148 renesas,function = "scif0"; 152 - }; 153 - 154 - scif_clk_pins: scif_clk { 155 - renesas,groups = "scif_clk"; 156 - renesas,function = "scif_clk"; 157 149 }; 158 150 159 151 ether_pins: ether { ··· 218 226 pinctrl-0 = <&scif0_pins>; 219 227 pinctrl-names = "default"; 220 228 221 - status = "okay"; 222 - }; 223 - 224 - &scif_clk { 225 - clock-frequency = <14745600>; 226 229 status = "okay"; 227 230 }; 228 231 ··· 401 414 }; 402 415 403 416 &pcie_bus_clk { 417 + clock-frequency = <100000000>; 404 418 status = "okay"; 405 419 }; 406 420
+1 -4
arch/arm/boot/dts/r8a7791.dtsi
··· 1083 1083 pcie_bus_clk: pcie_bus_clk { 1084 1084 compatible = "fixed-clock"; 1085 1085 #clock-cells = <0>; 1086 - clock-frequency = <100000000>; 1086 + clock-frequency = <0>; 1087 1087 clock-output-names = "pcie_bus"; 1088 - status = "disabled"; 1089 1088 }; 1090 1089 1091 1090 /* External SCIF clock */ ··· 1093 1094 #clock-cells = <0>; 1094 1095 /* This value must be overridden by the board. */ 1095 1096 clock-frequency = <0>; 1096 - status = "disabled"; 1097 1097 }; 1098 1098 1099 1099 /* External USB clock - can be overridden by the board */ ··· 1110 1112 /* This value must be overridden by the board. */ 1111 1113 clock-frequency = <0>; 1112 1114 clock-output-names = "can_clk"; 1113 - status = "disabled"; 1114 1115 }; 1115 1116 1116 1117 /* Special CPG clocks */
+1 -1
arch/arm/include/asm/cputype.h
··· 276 276 int feature = (features >> field) & 15; 277 277 278 278 /* feature registers are signed values */ 279 - if (feature > 8) 279 + if (feature > 7) 280 280 feature -= 16; 281 281 282 282 return feature;
+1 -1
arch/arm/kernel/setup.c
··· 512 512 */ 513 513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 514 514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 515 - cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) 515 + cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3)) 516 516 elf_hwcap &= ~HWCAP_SWP; 517 517 } 518 518
+3 -2
arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
··· 71 71 if (!pdata) 72 72 pdata = &default_esdhc_pdata; 73 73 74 - return imx_add_platform_device(data->devid, data->id, res, 75 - ARRAY_SIZE(res), pdata, sizeof(*pdata)); 74 + return imx_add_platform_device_dmamask(data->devid, data->id, res, 75 + ARRAY_SIZE(res), pdata, sizeof(*pdata), 76 + DMA_BIT_MASK(32)); 76 77 }
+1 -1
arch/arm/mach-omap2/clockdomains7xx_data.c
··· 461 461 .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, 462 462 .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, 463 463 .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, 464 - .flags = CLKDM_CAN_HWSUP_SWSUP, 464 + .flags = CLKDM_CAN_SWSUP, 465 465 }; 466 466 467 467 static struct clockdomain mpu1_7xx_clkdm = {
+2 -1
arch/arm/mach-omap2/io.c
··· 737 737 #ifdef CONFIG_SOC_DRA7XX 738 738 void __init dra7xx_init_early(void) 739 739 { 740 - omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); 740 + omap2_set_globals_tap(DRA7XX_CLASS, 741 + OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); 741 742 omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); 742 743 omap2_control_base_init(); 743 744 omap4_pm_init_early();
+7
arch/arm/mach-omap2/omap-wakeupgen.c
··· 274 274 */ 275 275 static void irq_save_context(void) 276 276 { 277 + /* DRA7 has no SAR to save */ 278 + if (soc_is_dra7xx()) 279 + return; 280 + 277 281 if (!sar_base) 278 282 sar_base = omap4_get_sar_ram_base(); 279 283 ··· 294 290 { 295 291 u32 val; 296 292 u32 offset = SAR_BACKUP_STATUS_OFFSET; 293 + /* DRA7 has no SAR to save */ 294 + if (soc_is_dra7xx()) 295 + return; 297 296 298 297 if (soc_is_omap54xx()) 299 298 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
+13 -10
arch/arm/mach-omap2/pm34xx.c
··· 198 198 int per_next_state = PWRDM_POWER_ON; 199 199 int core_next_state = PWRDM_POWER_ON; 200 200 int per_going_off; 201 - int core_prev_state; 202 201 u32 sdrc_pwr = 0; 203 202 204 203 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); ··· 277 278 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 278 279 279 280 /* CORE */ 280 - if (core_next_state < PWRDM_POWER_ON) { 281 - core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); 282 - if (core_prev_state == PWRDM_POWER_OFF) { 283 - omap3_core_restore_context(); 284 - omap3_cm_restore_context(); 285 - omap3_sram_restore_context(); 286 - omap2_sms_restore_context(); 287 - } 281 + if (core_next_state < PWRDM_POWER_ON && 282 + pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) { 283 + omap3_core_restore_context(); 284 + omap3_cm_restore_context(); 285 + omap3_sram_restore_context(); 286 + omap2_sms_restore_context(); 287 + } else { 288 + /* 289 + * In off-mode resume path above, omap3_core_restore_context 290 + * also handles the INTC autoidle restore done here so limit 291 + * this to non-off mode resume paths so we don't do it twice. 292 + */ 293 + omap3_intc_resume_idle(); 288 294 } 289 - omap3_intc_resume_idle(); 290 295 291 296 pwrdm_post_transition(NULL); 292 297
+11 -17
arch/arm/mach-shmobile/timer.c
··· 40 40 void __init shmobile_init_delay(void) 41 41 { 42 42 struct device_node *np, *cpus; 43 - bool is_a7_a8_a9 = false; 44 - bool is_a15 = false; 43 + unsigned int div = 0; 45 44 bool has_arch_timer = false; 46 45 u32 max_freq = 0; 47 46 ··· 54 55 if (!of_property_read_u32(np, "clock-frequency", &freq)) 55 56 max_freq = max(max_freq, freq); 56 57 57 - if (of_device_is_compatible(np, "arm,cortex-a8") || 58 - of_device_is_compatible(np, "arm,cortex-a9")) { 59 - is_a7_a8_a9 = true; 60 - } else if (of_device_is_compatible(np, "arm,cortex-a7")) { 61 - is_a7_a8_a9 = true; 62 - has_arch_timer = true; 63 - } else if (of_device_is_compatible(np, "arm,cortex-a15")) { 64 - is_a15 = true; 58 + if (of_device_is_compatible(np, "arm,cortex-a8")) { 59 + div = 2; 60 + } else if (of_device_is_compatible(np, "arm,cortex-a9")) { 61 + div = 1; 62 + } else if (of_device_is_compatible(np, "arm,cortex-a7") || 63 + of_device_is_compatible(np, "arm,cortex-a15")) { 64 + div = 1; 65 65 has_arch_timer = true; 66 66 } 67 67 } 68 68 69 69 of_node_put(cpus); 70 70 71 - if (!max_freq) 71 + if (!max_freq || !div) 72 72 return; 73 73 74 - if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 75 - if (is_a7_a8_a9) 76 - shmobile_setup_delay_hz(max_freq, 1, 3); 77 - else if (is_a15) 78 - shmobile_setup_delay_hz(max_freq, 2, 4); 79 - } 74 + if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) 75 + shmobile_setup_delay_hz(max_freq, 1, div); 80 76 }
+2 -1
arch/arm/mm/dma-mapping.c
··· 762 762 if (!mask) 763 763 return NULL; 764 764 765 - buf = kzalloc(sizeof(*buf), gfp); 765 + buf = kzalloc(sizeof(*buf), 766 + gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 766 767 if (!buf) 767 768 return NULL; 768 769
-1
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
··· 70 70 i2c3 = &i2c3; 71 71 i2c4 = &i2c4; 72 72 i2c5 = &i2c5; 73 - i2c6 = &i2c6; 74 73 }; 75 74 }; 76 75
+5 -15
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
··· 201 201 202 202 i2c2: i2c@58782000 { 203 203 compatible = "socionext,uniphier-fi2c"; 204 - status = "disabled"; 205 204 reg = <0x58782000 0x80>; 206 205 #address-cells = <1>; 207 206 #size-cells = <0>; 208 207 interrupts = <0 43 4>; 209 - pinctrl-names = "default"; 210 - pinctrl-0 = <&pinctrl_i2c2>; 211 208 clocks = <&i2c_clk>; 212 - clock-frequency = <100000>; 209 + clock-frequency = <400000>; 213 210 }; 214 211 215 212 i2c3: i2c@58783000 { ··· 224 227 225 228 i2c4: i2c@58784000 { 226 229 compatible = "socionext,uniphier-fi2c"; 230 + status = "disabled"; 227 231 reg = <0x58784000 0x80>; 228 232 #address-cells = <1>; 229 233 #size-cells = <0>; 230 234 interrupts = <0 45 4>; 235 + pinctrl-names = "default"; 236 + pinctrl-0 = <&pinctrl_i2c4>; 231 237 clocks = <&i2c_clk>; 232 - clock-frequency = <400000>; 238 + clock-frequency = <100000>; 233 239 }; 234 240 235 241 i2c5: i2c@58785000 { ··· 241 241 #address-cells = <1>; 242 242 #size-cells = <0>; 243 243 interrupts = <0 25 4>; 244 - clocks = <&i2c_clk>; 245 - clock-frequency = <400000>; 246 - }; 247 - 248 - i2c6: i2c@58786000 { 249 - compatible = "socionext,uniphier-fi2c"; 250 - reg = <0x58786000 0x80>; 251 - #address-cells = <1>; 252 - #size-cells = <0>; 253 - interrupts = <0 26 4>; 254 244 clocks = <&i2c_clk>; 255 245 clock-frequency = <400000>; 256 246 };
+12 -1
arch/arm64/kernel/head.S
··· 588 588 msr vpidr_el2, x0 589 589 msr vmpidr_el2, x1 590 590 591 + /* 592 + * When VHE is not in use, early init of EL2 and EL1 needs to be 593 + * done here. 594 + * When VHE _is_ in use, EL1 will not be used in the host and 595 + * requires no configuration, and all non-hyp-specific EL2 setup 596 + * will be done via the _EL1 system register aliases in __cpu_setup. 597 + */ 598 + cbnz x2, 1f 599 + 591 600 /* sctlr_el1 */ 592 601 mov x0, #0x0800 // Set/clear RES{1,0} bits 593 602 CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems ··· 606 597 /* Coprocessor traps. */ 607 598 mov x0, #0x33ff 608 599 msr cptr_el2, x0 // Disable copro. traps to EL2 600 + 1: 609 601 610 602 #ifdef CONFIG_COMPAT 611 603 msr hstr_el2, xzr // Disable CP15 traps to EL2 ··· 744 734 745 735 .macro update_early_cpu_boot_status status, tmp1, tmp2 746 736 mov \tmp2, #\status 747 - str_l \tmp2, __early_cpu_boot_status, \tmp1 737 + adr_l \tmp1, __early_cpu_boot_status 738 + str \tmp2, [\tmp1] 748 739 dmb sy 749 740 dc ivac, \tmp1 // Invalidate potentially stale cache line 750 741 .endm
+6 -5
arch/arm64/kernel/smp_spin_table.c
··· 52 52 static int smp_spin_table_cpu_init(unsigned int cpu) 53 53 { 54 54 struct device_node *dn; 55 + int ret; 55 56 56 57 dn = of_get_cpu_node(cpu, NULL); 57 58 if (!dn) ··· 61 60 /* 62 61 * Determine the address from which the CPU is polling. 63 62 */ 64 - if (of_property_read_u64(dn, "cpu-release-addr", 65 - &cpu_release_addr[cpu])) { 63 + ret = of_property_read_u64(dn, "cpu-release-addr", 64 + &cpu_release_addr[cpu]); 65 + if (ret) 66 66 pr_err("CPU %d: missing or invalid cpu-release-addr property\n", 67 67 cpu); 68 68 69 - return -1; 70 - } 69 + of_node_put(dn); 71 70 72 - return 0; 71 + return ret; 73 72 } 74 73 75 74 static int smp_spin_table_cpu_prepare(unsigned int cpu)
+1 -1
arch/nios2/lib/memset.c
··· 68 68 "=r" (charcnt), /* %1 Output */ 69 69 "=r" (dwordcnt), /* %2 Output */ 70 70 "=r" (fill8reg), /* %3 Output */ 71 - "=r" (wrkrega) /* %4 Output */ 71 + "=&r" (wrkrega) /* %4 Output only */ 72 72 : "r" (c), /* %5 Input */ 73 73 "0" (s), /* %0 Input/Output */ 74 74 "1" (count) /* %1 Input/Output */
+2
arch/powerpc/include/asm/systbl.h
··· 384 384 SYSCALL(ni_syscall) 385 385 SYSCALL(mlock2) 386 386 SYSCALL(copy_file_range) 387 + COMPAT_SYS_SPU(preadv2) 388 + COMPAT_SYS_SPU(pwritev2)
+1 -1
arch/powerpc/include/asm/unistd.h
··· 12 12 #include <uapi/asm/unistd.h> 13 13 14 14 15 - #define NR_syscalls 380 15 + #define NR_syscalls 382 16 16 17 17 #define __NR__exit __NR_exit 18 18
+1
arch/powerpc/include/uapi/asm/cputable.h
··· 31 31 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 32 32 0x00000040 33 33 34 + /* Reserved - do not use 0x00000004 */ 34 35 #define PPC_FEATURE_TRUE_LE 0x00000002 35 36 #define PPC_FEATURE_PPC_LE 0x00000001 36 37
+2
arch/powerpc/include/uapi/asm/unistd.h
··· 390 390 #define __NR_membarrier 365 391 391 #define __NR_mlock2 378 392 392 #define __NR_copy_file_range 379 393 + #define __NR_preadv2 380 394 + #define __NR_pwritev2 381 393 395 394 396 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+15 -11
arch/powerpc/kernel/prom.c
··· 148 148 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 149 149 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 150 150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 151 + unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ 151 152 unsigned char pabyte; /* byte number in ibm,pa-features */ 152 153 unsigned char pabit; /* bit number (big-endian) */ 153 154 unsigned char invert; /* if 1, pa bit set => clear feature */ 154 155 } ibm_pa_features[] __initdata = { 155 - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 156 - {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 157 - {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, 158 - {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, 159 - {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 160 - {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 161 - {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 156 + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0}, 157 + {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0}, 158 + {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0}, 159 + {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0}, 160 + {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1}, 161 + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0}, 162 + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0}, 162 163 /* 163 - * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), 164 - * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP 165 - * which is 0 if the kernel doesn't support TM. 164 + * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n), 165 + * we don't want to turn on TM here, so we use the *_COMP versions 166 + * which are 0 if the kernel doesn't support TM. 166 167 */ 167 - {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, 168 + {CPU_FTR_TM_COMP, 0, 0, 169 + PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, 168 170 }; 169 171 170 172 static void __init scan_features(unsigned long node, const unsigned char *ftrs, ··· 197 195 if (bit ^ fp->invert) { 198 196 cur_cpu_spec->cpu_features |= fp->cpu_features; 199 197 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 198 + cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2; 200 199 cur_cpu_spec->mmu_features |= fp->mmu_features; 201 200 } else { 202 201 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 203 202 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 203 + cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2; 204 204 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 205 205 } 206 206 }
+3
arch/s390/Kconfig
··· 4 4 config ZONE_DMA 5 5 def_bool y 6 6 7 + config CPU_BIG_ENDIAN 8 + def_bool y 9 + 7 10 config LOCKDEP_SUPPORT 8 11 def_bool y 9 12
+1 -1
arch/s390/include/asm/mmu.h
··· 11 11 spinlock_t list_lock; 12 12 struct list_head pgtable_list; 13 13 struct list_head gmap_list; 14 - unsigned long asce_bits; 14 + unsigned long asce; 15 15 unsigned long asce_limit; 16 16 unsigned long vdso_base; 17 17 /* The mmu context allocates 4K page tables. */
+22 -6
arch/s390/include/asm/mmu_context.h
··· 26 26 mm->context.has_pgste = 0; 27 27 mm->context.use_skey = 0; 28 28 #endif 29 - if (mm->context.asce_limit == 0) { 29 + switch (mm->context.asce_limit) { 30 + case 1UL << 42: 31 + /* 32 + * forked 3-level task, fall through to set new asce with new 33 + * mm->pgd 34 + */ 35 + case 0: 30 36 /* context created by exec, set asce limit to 4TB */ 31 - mm->context.asce_bits = _ASCE_TABLE_LENGTH | 32 - _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 33 37 mm->context.asce_limit = STACK_TOP_MAX; 34 - } else if (mm->context.asce_limit == (1UL << 31)) { 38 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 39 + _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 40 + break; 41 + case 1UL << 53: 42 + /* forked 4-level task, set new asce with new mm->pgd */ 43 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 44 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 45 + break; 46 + case 1UL << 31: 47 + /* forked 2-level compat task, set new asce with new mm->pgd */ 48 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 49 + _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 50 + /* pgd_alloc() did not increase mm->nr_pmds */ 35 51 mm_inc_nr_pmds(mm); 36 52 } 37 53 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); ··· 58 42 59 43 static inline void set_user_asce(struct mm_struct *mm) 60 44 { 61 - S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); 45 + S390_lowcore.user_asce = mm->context.asce; 62 46 if (current->thread.mm_segment.ar4) 63 47 __ctl_load(S390_lowcore.user_asce, 7, 7); 64 48 set_cpu_flag(CIF_ASCE); ··· 87 71 { 88 72 int cpu = smp_processor_id(); 89 73 90 - S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); 74 + S390_lowcore.user_asce = next->context.asce; 91 75 if (prev == next) 92 76 return; 93 77 if (MACHINE_HAS_TLB_LC)
+2 -1
arch/s390/include/asm/pci.h
··· 44 44 u64 rpcit_ops; 45 45 u64 dma_rbytes; 46 46 u64 dma_wbytes; 47 - } __packed __aligned(64); 47 + u64 pad[2]; 48 + } __packed __aligned(128); 48 49 49 50 enum zpci_state { 50 51 ZPCI_FN_STATE_RESERVED,
+2 -2
arch/s390/include/asm/pgalloc.h
··· 52 52 return _REGION2_ENTRY_EMPTY; 53 53 } 54 54 55 - int crst_table_upgrade(struct mm_struct *, unsigned long limit); 56 - void crst_table_downgrade(struct mm_struct *, unsigned long limit); 55 + int crst_table_upgrade(struct mm_struct *); 56 + void crst_table_downgrade(struct mm_struct *); 57 57 58 58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 59 59 {
+1 -1
arch/s390/include/asm/processor.h
··· 175 175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 176 176 regs->psw.addr = new_psw; \ 177 177 regs->gprs[15] = new_stackp; \ 178 - crst_table_downgrade(current->mm, 1UL << 31); \ 178 + crst_table_downgrade(current->mm); \ 179 179 execve_tail(); \ 180 180 } while (0) 181 181
+2
arch/s390/include/asm/seccomp.h
··· 13 13 #define __NR_seccomp_exit_32 __NR_exit 14 14 #define __NR_seccomp_sigreturn_32 __NR_sigreturn 15 15 16 + #include <asm-generic/seccomp.h> 17 + 16 18 #endif /* _ASM_S390_SECCOMP_H */
+3 -6
arch/s390/include/asm/tlbflush.h
··· 110 110 static inline void __tlb_flush_kernel(void) 111 111 { 112 112 if (MACHINE_HAS_IDTE) 113 - __tlb_flush_idte((unsigned long) init_mm.pgd | 114 - init_mm.context.asce_bits); 113 + __tlb_flush_idte(init_mm.context.asce); 115 114 else 116 115 __tlb_flush_global(); 117 116 } ··· 132 133 static inline void __tlb_flush_kernel(void) 133 134 { 134 135 if (MACHINE_HAS_TLB_LC) 135 - __tlb_flush_idte_local((unsigned long) init_mm.pgd | 136 - init_mm.context.asce_bits); 136 + __tlb_flush_idte_local(init_mm.context.asce); 137 137 else 138 138 __tlb_flush_local(); 139 139 } ··· 146 148 * only ran on the local cpu. 147 149 */ 148 150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) 149 - __tlb_flush_asce(mm, (unsigned long) mm->pgd | 150 - mm->context.asce_bits); 151 + __tlb_flush_asce(mm, mm->context.asce); 151 152 else 152 153 __tlb_flush_full(mm); 153 154 }
+1
arch/s390/lib/spinlock.c
··· 105 105 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 106 106 return; 107 107 local_irq_restore(flags); 108 + continue; 108 109 } 109 110 /* Check if the lock owner is running. */ 110 111 if (first_diag && cpu_is_preempted(~owner)) {
+2 -1
arch/s390/mm/init.c
··· 89 89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 90 90 pgd_type = _REGION3_ENTRY_EMPTY; 91 91 } 92 - S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 92 + init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 93 + S390_lowcore.kernel_asce = init_mm.context.asce; 93 94 clear_table((unsigned long *) init_mm.pgd, pgd_type, 94 95 sizeof(unsigned long)*2048); 95 96 vmem_map_init();
+3 -3
arch/s390/mm/mmap.c
··· 174 174 if (!(flags & MAP_FIXED)) 175 175 addr = 0; 176 176 if ((addr + len) >= TASK_SIZE) 177 - return crst_table_upgrade(current->mm, TASK_MAX_SIZE); 177 + return crst_table_upgrade(current->mm); 178 178 return 0; 179 179 } 180 180 ··· 191 191 return area; 192 192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 193 193 /* Upgrade the page table to 4 levels and retry. */ 194 - rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 194 + rc = crst_table_upgrade(mm); 195 195 if (rc) 196 196 return (unsigned long) rc; 197 197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); ··· 213 213 return area; 214 214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 215 215 /* Upgrade the page table to 4 levels and retry. */ 216 - rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 216 + rc = crst_table_upgrade(mm); 217 217 if (rc) 218 218 return (unsigned long) rc; 219 219 area = arch_get_unmapped_area_topdown(filp, addr, len,
+28 -57
arch/s390/mm/pgalloc.c
··· 76 76 __tlb_flush_local(); 77 77 } 78 78 79 - int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 79 + int crst_table_upgrade(struct mm_struct *mm) 80 80 { 81 81 unsigned long *table, *pgd; 82 - unsigned long entry; 83 - int flush; 84 82 85 - BUG_ON(limit > TASK_MAX_SIZE); 86 - flush = 0; 87 - repeat: 83 + /* upgrade should only happen from 3 to 4 levels */ 84 + BUG_ON(mm->context.asce_limit != (1UL << 42)); 85 + 88 86 table = crst_table_alloc(mm); 89 87 if (!table) 90 88 return -ENOMEM; 89 + 91 90 spin_lock_bh(&mm->page_table_lock); 92 - if (mm->context.asce_limit < limit) { 93 - pgd = (unsigned long *) mm->pgd; 94 - if (mm->context.asce_limit <= (1UL << 31)) { 95 - entry = _REGION3_ENTRY_EMPTY; 96 - mm->context.asce_limit = 1UL << 42; 97 - mm->context.asce_bits = _ASCE_TABLE_LENGTH | 98 - _ASCE_USER_BITS | 99 - _ASCE_TYPE_REGION3; 100 - } else { 101 - entry = _REGION2_ENTRY_EMPTY; 102 - mm->context.asce_limit = 1UL << 53; 103 - mm->context.asce_bits = _ASCE_TABLE_LENGTH | 104 - _ASCE_USER_BITS | 105 - _ASCE_TYPE_REGION2; 106 - } 107 - crst_table_init(table, entry); 108 - pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 109 - mm->pgd = (pgd_t *) table; 110 - mm->task_size = mm->context.asce_limit; 111 - table = NULL; 112 - flush = 1; 113 - } 91 + pgd = (unsigned long *) mm->pgd; 92 + crst_table_init(table, _REGION2_ENTRY_EMPTY); 93 + pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 94 + mm->pgd = (pgd_t *) table; 95 + mm->context.asce_limit = 1UL << 53; 96 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 97 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 98 + mm->task_size = mm->context.asce_limit; 114 99 spin_unlock_bh(&mm->page_table_lock); 115 - if (table) 116 - crst_table_free(mm, table); 117 - if (mm->context.asce_limit < limit) 118 - goto repeat; 119 - if (flush) 120 - on_each_cpu(__crst_table_upgrade, mm, 0); 100 + 101 + on_each_cpu(__crst_table_upgrade, mm, 0); 121 102 return 0; 122 103 } 123 104 124 - void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 105 + void crst_table_downgrade(struct mm_struct *mm) 125 106 { 126 107 pgd_t *pgd; 108 + 109 + /* downgrade should only happen from 3 to 2 levels (compat only) */ 110 + BUG_ON(mm->context.asce_limit != (1UL << 42)); 127 111 128 112 if (current->active_mm == mm) { 129 113 clear_user_asce(); 130 114 __tlb_flush_mm(mm); 131 115 } 132 - while (mm->context.asce_limit > limit) { 133 - pgd = mm->pgd; 134 - switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 135 - case _REGION_ENTRY_TYPE_R2: 136 - mm->context.asce_limit = 1UL << 42; 137 - mm->context.asce_bits = _ASCE_TABLE_LENGTH | 138 - _ASCE_USER_BITS | 139 - _ASCE_TYPE_REGION3; 140 - break; 141 - case _REGION_ENTRY_TYPE_R3: 142 - mm->context.asce_limit = 1UL << 31; 143 - mm->context.asce_bits = _ASCE_TABLE_LENGTH | 144 - _ASCE_USER_BITS | 145 - _ASCE_TYPE_SEGMENT; 146 - break; 147 - default: 148 - BUG(); 149 - } 150 - mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 151 - mm->task_size = mm->context.asce_limit; 152 - crst_table_free(mm, (unsigned long *) pgd); 153 - } 116 + 117 + pgd = mm->pgd; 118 + mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 119 + mm->context.asce_limit = 1UL << 31; 120 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 121 + _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 122 + mm->task_size = mm->context.asce_limit; 123 + crst_table_free(mm, (unsigned long *) pgd); 124 + 154 125 if (current->active_mm == mm) 155 126 set_user_asce(mm); 156 127 }
+10 -6
arch/s390/pci/pci_dma.c
··· 457 457 zdev->dma_table = dma_alloc_cpu_table(); 458 458 if (!zdev->dma_table) { 459 459 rc = -ENOMEM; 460 - goto out_clean; 460 + goto out; 461 461 } 462 462 463 463 /* ··· 477 477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 478 478 if (!zdev->iommu_bitmap) { 479 479 rc = -ENOMEM; 480 - goto out_reg; 480 + goto free_dma_table; 481 481 } 482 482 483 483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 484 484 (u64) zdev->dma_table); 485 485 if (rc) 486 - goto out_reg; 487 - return 0; 486 + goto free_bitmap; 488 487 489 - out_reg: 488 + return 0; 489 + free_bitmap: 490 + vfree(zdev->iommu_bitmap); 491 + zdev->iommu_bitmap = NULL; 492 + free_dma_table: 490 493 dma_free_cpu_table(zdev->dma_table); 491 - out_clean: 494 + zdev->dma_table = NULL; 495 + out: 492 496 return rc; 493 497 } 494 498
+2 -2
arch/x86/crypto/sha-mb/sha1_mb.c
··· 453 453 454 454 req = cast_mcryptd_ctx_to_req(req_ctx); 455 455 if (irqs_disabled()) 456 - rctx->complete(&req->base, ret); 456 + req_ctx->complete(&req->base, ret); 457 457 else { 458 458 local_bh_disable(); 459 - rctx->complete(&req->base, ret); 459 + req_ctx->complete(&req->base, ret); 460 460 local_bh_enable(); 461 461 } 462 462 }
+1
arch/x86/events/amd/iommu.c
··· 474 474 475 475 static struct perf_amd_iommu __perf_iommu = { 476 476 .pmu = { 477 + .task_ctx_nr = perf_invalid_context, 477 478 .event_init = perf_iommu_event_init, 478 479 .add = perf_iommu_add, 479 480 .del = perf_iommu_del,
+2
arch/x86/events/intel/core.c
··· 3794 3794 pr_cont("Knights Landing events, "); 3795 3795 break; 3796 3796 3797 + case 142: /* 14nm Kabylake Mobile */ 3798 + case 158: /* 14nm Kabylake Desktop */ 3797 3799 case 78: /* 14nm Skylake Mobile */ 3798 3800 case 94: /* 14nm Skylake Desktop */ 3799 3801 case 85: /* 14nm Skylake Server */
+1
arch/x86/include/asm/hugetlb.h
··· 4 4 #include <asm/page.h> 5 5 #include <asm-generic/hugetlb.h> 6 6 7 + #define hugepages_supported() cpu_has_pse 7 8 8 9 static inline int is_hugepage_only_range(struct mm_struct *mm, 9 10 unsigned long addr,
+2 -1
arch/x86/kernel/apic/vector.c
··· 256 256 struct irq_desc *desc; 257 257 int cpu, vector; 258 258 259 - BUG_ON(!data->cfg.vector); 259 + if (!data->cfg.vector) 260 + return; 260 261 261 262 vector = data->cfg.vector; 262 263 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
+12
arch/x86/kernel/cpu/mshyperv.c
··· 152 152 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 153 153 }; 154 154 155 + static unsigned char hv_get_nmi_reason(void) 156 + { 157 + return 0; 158 + } 159 + 155 160 static void __init ms_hyperv_init_platform(void) 156 161 { 157 162 /* ··· 196 191 machine_ops.crash_shutdown = hv_machine_crash_shutdown; 197 192 #endif 198 193 mark_tsc_unstable("running on Hyper-V"); 194 + 195 + /* 196 + * Generation 2 instances don't support reading the NMI status from 197 + * 0x61 port. 198 + */ 199 + if (efi_enabled(EFI_BOOT)) 200 + x86_platform.get_nmi_reason = hv_get_nmi_reason; 199 201 } 200 202 201 203 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
-6
arch/x86/kernel/head_32.S
··· 389 389 /* Make changes effective */ 390 390 wrmsr 391 391 392 - /* 393 - * And make sure that all the mappings we set up have NX set from 394 - * the beginning. 395 - */ 396 - orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4) 397 - 398 392 enable_paging: 399 393 400 394 /*
+3 -2
arch/x86/mm/setup_nx.c
··· 32 32 33 33 void x86_configure_nx(void) 34 34 { 35 - /* If disable_nx is set, clear NX on all new mappings going forward. */ 36 - if (disable_nx) 35 + if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) 36 + __supported_pte_mask |= _PAGE_NX; 37 + else 37 38 __supported_pte_mask &= ~_PAGE_NX; 38 39 } 39 40
+6
arch/x86/xen/spinlock.c
··· 27 27 28 28 static void xen_qlock_kick(int cpu) 29 29 { 30 + int irq = per_cpu(lock_kicker_irq, cpu); 31 + 32 + /* Don't kick if the target's kicker interrupt is not initialized. */ 33 + if (irq == -1) 34 + return; 35 + 30 36 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 31 37 } 32 38
+6 -6
crypto/rsa-pkcs1pad.c
··· 387 387 req_ctx->child_req.src = req->src; 388 388 req_ctx->child_req.src_len = req->src_len; 389 389 req_ctx->child_req.dst = req_ctx->out_sg; 390 - req_ctx->child_req.dst_len = ctx->key_size - 1; 390 + req_ctx->child_req.dst_len = ctx->key_size ; 391 391 392 - req_ctx->out_buf = kmalloc(ctx->key_size - 1, 392 + req_ctx->out_buf = kmalloc(ctx->key_size, 393 393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 394 394 GFP_KERNEL : GFP_ATOMIC); 395 395 if (!req_ctx->out_buf) 396 396 return -ENOMEM; 397 397 398 398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 399 - ctx->key_size - 1, NULL); 399 + ctx->key_size, NULL); 400 400 401 401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 402 402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, ··· 595 595 req_ctx->child_req.src = req->src; 596 596 req_ctx->child_req.src_len = req->src_len; 597 597 req_ctx->child_req.dst = req_ctx->out_sg; 598 - req_ctx->child_req.dst_len = ctx->key_size - 1; 598 + req_ctx->child_req.dst_len = ctx->key_size; 599 599 600 - req_ctx->out_buf = kmalloc(ctx->key_size - 1, 600 + req_ctx->out_buf = kmalloc(ctx->key_size, 601 601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 602 602 GFP_KERNEL : GFP_ATOMIC); 603 603 if (!req_ctx->out_buf) 604 604 return -ENOMEM; 605 605 606 606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 607 - ctx->key_size - 1, NULL); 607 + ctx->key_size, NULL); 608 608 609 609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 610 610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+4 -13
drivers/bcma/main.c
··· 136 136 return false; 137 137 } 138 138 139 - #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 140 139 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 141 140 struct bcma_device *core) 142 141 { ··· 183 184 struct of_phandle_args out_irq; 184 185 int ret; 185 186 186 - if (!parent || !parent->dev.of_node) 187 + if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node) 187 188 return 0; 188 189 189 190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); ··· 201 202 { 202 203 struct device_node *node; 203 204 205 + if (!IS_ENABLED(CONFIG_OF_IRQ)) 206 + return; 207 + 204 208 node = bcma_of_find_child_device(parent, core); 205 209 if (node) 206 210 core->dev.of_node = node; 207 211 208 212 core->irq = bcma_of_get_irq(parent, core, 0); 209 213 } 210 - #else 211 - static void bcma_of_fill_device(struct platform_device *parent, 212 - struct bcma_device *core) 213 - { 214 - } 215 - static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 216 - struct bcma_device *core, int num) 217 - { 218 - return 0; 219 - } 220 - #endif /* CONFIG_OF */ 221 214 222 215 unsigned int bcma_core_irq(struct bcma_device *core, int num) 223 216 {
+25 -27
drivers/block/rbd.c
··· 538 538 u8 *order, u64 *snap_size); 539 539 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 540 540 u64 *snap_features); 541 - static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name); 542 541 543 542 static int rbd_open(struct block_device *bdev, fmode_t mode) 544 543 { ··· 3126 3127 struct rbd_device *rbd_dev = (struct rbd_device *)data; 3127 3128 int ret; 3128 3129 3129 - if (!rbd_dev) 3130 - return; 3131 - 3132 3130 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 3133 3131 rbd_dev->header_name, (unsigned long long)notify_id, 3134 3132 (unsigned int)opcode); ··· 3259 3263 3260 3264 ceph_osdc_cancel_event(rbd_dev->watch_event); 3261 3265 rbd_dev->watch_event = NULL; 3266 + 3267 + dout("%s flushing notifies\n", __func__); 3268 + ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 3262 3269 } 3263 3270 3264 3271 /* ··· 3641 3642 static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3642 3643 { 3643 3644 sector_t size; 3644 - bool removing; 3645 3645 3646 3646 /* 3647 - * Don't hold the lock while doing disk operations, 3648 - * or lock ordering will conflict with the bdev mutex via: 3649 - * rbd_add() -> blkdev_get() -> rbd_open() 3647 + * If EXISTS is not set, rbd_dev->disk may be NULL, so don't 3648 + * try to update its size. If REMOVING is set, updating size 3649 + * is just useless work since the device can't be opened. 3650 3650 */ 3651 - spin_lock_irq(&rbd_dev->lock); 3652 - removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3653 - spin_unlock_irq(&rbd_dev->lock); 3654 - /* 3655 - * If the device is being removed, rbd_dev->disk has 3656 - * been destroyed, so don't try to update its size 3657 - */ 3658 - if (!removing) { 3651 + if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && 3652 + !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { 3659 3653 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3660 3654 dout("setting size to %llu sectors", (unsigned long long)size); 3661 3655 set_capacity(rbd_dev->disk, size); ··· 4183 4191 __le64 features; 4184 4192 __le64 incompat; 4185 4193 } __attribute__ ((packed)) features_buf = { 0 }; 4186 - u64 incompat; 4194 + u64 unsup; 4187 4195 int ret; 4188 4196 4189 4197 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, ··· 4196 4204 if (ret < sizeof (features_buf)) 4197 4205 return -ERANGE; 4198 4206 4199 - incompat = le64_to_cpu(features_buf.incompat); 4200 - if (incompat & ~RBD_FEATURES_SUPPORTED) 4207 + unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; 4208 + if (unsup) { 4209 + rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", 4210 + unsup); 4201 4211 return -ENXIO; 4212 + } 4202 4213 4203 4214 *snap_features = le64_to_cpu(features_buf.features); 4204 4215 ··· 5182 5187 return ret; 5183 5188 } 5184 5189 5190 + /* 5191 + * rbd_dev->header_rwsem must be locked for write and will be unlocked 5192 + * upon return. 5193 + */ 5185 5194 static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5186 5195 { 5187 5196 int ret; ··· 5194 5195 5195 5196 ret = rbd_dev_id_get(rbd_dev); 5196 5197 if (ret) 5197 - return ret; 5198 + goto err_out_unlock; 5198 5199 5199 5200 BUILD_BUG_ON(DEV_NAME_LEN 5200 5201 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); ··· 5235 5236 /* Everything's ready. Announce the disk to the world. */ 5236 5237 5237 5238 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5238 - add_disk(rbd_dev->disk); 5239 + up_write(&rbd_dev->header_rwsem); 5239 5240 5241 + add_disk(rbd_dev->disk); 5240 5242 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 5241 5243 (unsigned long long) rbd_dev->mapping.size); 5242 5244 ··· 5252 5252 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5253 5253 err_out_id: 5254 5254 rbd_dev_id_put(rbd_dev); 5255 + err_out_unlock: 5256 + up_write(&rbd_dev->header_rwsem); 5255 5257 return ret; 5256 5258 } 5257 5259 ··· 5444 5442 spec = NULL; /* rbd_dev now owns this */ 5445 5443 rbd_opts = NULL; /* rbd_dev now owns this */ 5446 5444 5445 + down_write(&rbd_dev->header_rwsem); 5447 5446 rc = rbd_dev_image_probe(rbd_dev, 0); 5448 5447 if (rc < 0) 5449 5448 goto err_out_rbd_dev; ··· 5474 5471 return rc; 5475 5472 5476 5473 err_out_rbd_dev: 5474 + up_write(&rbd_dev->header_rwsem); 5477 5475 rbd_dev_destroy(rbd_dev); 5478 5476 err_out_client: 5479 5477 rbd_put_client(rbdc); ··· 5581 5577 return ret; 5582 5578 5583 5579 rbd_dev_header_unwatch_sync(rbd_dev); 5584 - /* 5585 - * flush remaining watch callbacks - these must be complete 5586 - * before the osd_client is shutdown 5587 - */ 5588 - dout("%s: flushing notifies", __func__); 5589 - ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 5590 5580 5591 5581 /* 5592 5582 * Don't free anything from rbd_dev->disk until after all
+1 -1
drivers/clocksource/tango_xtal.c
··· 42 42 43 43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, 44 44 32, clocksource_mmio_readl_up); 45 - if (!ret) { 45 + if (ret) { 46 46 pr_err("%s: registration failed\n", np->full_name); 47 47 return; 48 48 }
+3
drivers/cpufreq/cpufreq.c
··· 1491 1491 { 1492 1492 unsigned int new_freq; 1493 1493 1494 + if (cpufreq_suspended) 1495 + return 0; 1496 + 1494 1497 new_freq = cpufreq_driver->get(policy->cpu); 1495 1498 if (!new_freq) 1496 1499 return 0;
+2 -6
drivers/cpufreq/cpufreq_governor.c
··· 193 193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 194 194 j_cdbs->prev_cpu_wall = cur_wall_time; 195 195 196 - if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 197 - idle_time = 0; 198 - } else { 199 - idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; 200 - j_cdbs->prev_cpu_idle = cur_idle_time; 201 - } 196 + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; 197 + j_cdbs->prev_cpu_idle = cur_idle_time; 202 198 203 199 if (ignore_nice) { 204 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+9
drivers/cpufreq/intel_pstate.c
··· 813 813 if (err) 814 814 goto skip_tar; 815 815 816 + /* For level 1 and 2, bits[23:16] contain the ratio */ 817 + if (tdp_ctrl) 818 + tdp_ratio >>= 16; 819 + 820 + tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 816 821 if (tdp_ratio - 1 == tar) { 817 822 max_pstate = tar; 818 823 pr_debug("max_pstate=TAC %x\n", max_pstate); ··· 1135 1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1136 1131 int_tofp(duration_ns)); 1137 1132 core_busy = mul_fp(core_busy, sample_ratio); 1133 + } else { 1134 + sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1135 + if (sample_ratio < int_tofp(1)) 1136 + core_busy = 0; 1138 1137 } 1139 1138 1140 1139 cpu->sample.busy_scaled = core_busy;
+3
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
··· 225 225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); 226 226 struct ccp_aes_cmac_exp_ctx state; 227 227 228 + /* Don't let anything leak to 'out' */ 229 + memset(&state, 0, sizeof(state)); 230 + 228 231 state.null_msg = rctx->null_msg; 229 232 memcpy(state.iv, rctx->iv, sizeof(state.iv)); 230 233 state.buf_count = rctx->buf_count;
+3
drivers/crypto/ccp/ccp-crypto-sha.c
··· 212 212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 213 213 struct ccp_sha_exp_ctx state; 214 214 215 + /* Don't let anything leak to 'out' */ 216 + memset(&state, 0, sizeof(state)); 217 + 215 218 state.type = rctx->type; 216 219 state.msg_bits = rctx->msg_bits; 217 220 state.first = rctx->first;
+57 -30
drivers/crypto/talitos.c
··· 63 63 ptr->eptr = upper_32_bits(dma_addr); 64 64 } 65 65 66 + static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, 67 + struct talitos_ptr *src_ptr, bool is_sec1) 68 + { 69 + dst_ptr->ptr = src_ptr->ptr; 70 + if (!is_sec1) 71 + dst_ptr->eptr = src_ptr->eptr; 72 + } 73 + 66 74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 67 75 bool is_sec1) 68 76 { ··· 1091 1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1092 1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1093 1085 : DMA_TO_DEVICE); 1094 - 1095 1086 /* hmac data */ 1096 1087 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1097 1088 if (sg_count > 1 && 1098 1089 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1099 1090 areq->assoclen, 1100 1091 &edesc->link_tbl[tbl_off])) > 1) { 1101 - tbl_off += ret; 1102 - 1103 1092 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1104 1093 sizeof(struct talitos_ptr), 0); 1105 1094 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1106 1095 1107 1096 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1108 1097 edesc->dma_len, DMA_BIDIRECTIONAL); 1098 + 1099 + tbl_off += ret; 1109 1100 } else { 1110 1101 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1111 1102 desc->ptr[1].j_extent = 0; ··· 1133 1126 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1134 1127 sg_link_tbl_len += authsize; 1135 1128 1136 - if (sg_count > 1 && 1137 - (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, 1138 - sg_link_tbl_len, 1139 - &edesc->link_tbl[tbl_off])) > 1) { 1140 - tbl_off += ret; 1129 + if (sg_count == 1) { 1130 + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + 1131 + areq->assoclen, 0); 1132 + } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, 1133 + areq->assoclen, sg_link_tbl_len, 1134 + &edesc->link_tbl[tbl_off])) > 1135 + 1) { 1141 1136 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1142 1137 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1143 1138 tbl_off * ··· 1147 1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1148 1139 edesc->dma_len, 1149 1140 DMA_BIDIRECTIONAL); 1150 - } else 1151 - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1141 + tbl_off += ret; 1142 + } else { 1143 + copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0); 1144 + } 1152 1145 1153 1146 /* cipher out */ 1154 1147 desc->ptr[5].len = cpu_to_be16(cryptlen); ··· 1162 1151 1163 1152 edesc->icv_ool = false; 1164 1153 1165 - if (sg_count > 1 && 1166 - (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, 1154 + if (sg_count == 1) { 1155 + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + 1156 + areq->assoclen, 0); 1157 + } else if ((sg_count = 1158 + sg_to_link_tbl_offset(areq->dst, sg_count, 1167 1159 areq->assoclen, cryptlen, 1168 - &edesc->link_tbl[tbl_off])) > 1169 - 1) { 1160 + &edesc->link_tbl[tbl_off])) > 1) { 1170 1161 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1171 1162 1172 1163 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + ··· 1191 1178 edesc->dma_len, DMA_BIDIRECTIONAL); 1192 1179 1193 1180 edesc->icv_ool = true; 1194 - } else 1195 - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1181 + } else { 1182 + copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); 1183 + } 1196 1184 1197 1185 /* iv out */ 1198 1186 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, ··· 2643 2629 struct talitos_alg_template algt; 2644 2630 }; 2645 2631 2646 - static int talitos_cra_init(struct crypto_tfm *tfm) 2632 + static int talitos_init_common(struct talitos_ctx *ctx, 2633 + struct talitos_crypto_alg *talitos_alg) 2647 2634 { 2648 - struct crypto_alg *alg = tfm->__crt_alg; 2649 - struct talitos_crypto_alg *talitos_alg; 2650 - struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2651 2635 struct talitos_private *priv; 2652 - 2653 - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 2654 - talitos_alg = container_of(__crypto_ahash_alg(alg), 2655 - struct talitos_crypto_alg, 2656 - algt.alg.hash); 2657 - else 2658 - talitos_alg = container_of(alg, struct talitos_crypto_alg, 2659 - algt.alg.crypto); 2660 2636 2661 2637 /* update context with ptr to dev */ 2662 2638 ctx->dev = talitos_alg->dev; ··· 2665 2661 return 0; 2666 2662 } 2667 2663 2664 + static int talitos_cra_init(struct crypto_tfm *tfm) 2665 + { 2666 + struct crypto_alg *alg = tfm->__crt_alg; 2667 + struct talitos_crypto_alg *talitos_alg; 2668 + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2669 + 2670 + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 2671 + talitos_alg = container_of(__crypto_ahash_alg(alg), 2672 + struct talitos_crypto_alg, 2673 + algt.alg.hash); 2674 + else 2675 + talitos_alg = container_of(alg, struct talitos_crypto_alg, 2676 + algt.alg.crypto); 2677 + 2678 + return talitos_init_common(ctx, talitos_alg); 2679 + } 2680 + 2668 2681 static int talitos_cra_init_aead(struct crypto_aead *tfm) 2669 2682 { 2670 - talitos_cra_init(crypto_aead_tfm(tfm)); 2671 - return 0; 2683 + struct aead_alg *alg = crypto_aead_alg(tfm); 2684 + struct talitos_crypto_alg *talitos_alg; 2685 + struct talitos_ctx *ctx = crypto_aead_ctx(tfm); 2686 + 2687 + talitos_alg = container_of(alg, struct talitos_crypto_alg, 2688 + algt.alg.aead); 2689 + 2690 + return talitos_init_common(ctx, talitos_alg); 2672 2691 } 2673 2692 2674 2693 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+1 -1
drivers/edac/i7core_edac.c
··· 1866 1866 1867 1867 i7_dev = get_i7core_dev(mce->socketid); 1868 1868 if (!i7_dev) 1869 - return NOTIFY_BAD; 1869 + return NOTIFY_DONE; 1870 1870 1871 1871 mci = i7_dev->mci; 1872 1872 pvt = mci->pvt_info;
+27 -5
drivers/edac/sb_edac.c
··· 362 362 363 363 /* Memory type detection */ 364 364 bool is_mirrored, is_lockstep, is_close_pg; 365 + bool is_chan_hash; 365 366 366 367 /* Fifo double buffers */ 367 368 struct mce mce_entry[MCE_LOG_LEN]; ··· 1061 1060 return (pkg >> 2) & 0x1; 1062 1061 } 1063 1062 1063 + static int haswell_chan_hash(int idx, u64 addr) 1064 + { 1065 + int i; 1066 + 1067 + /* 1068 + * XOR even bits from 12:26 to bit0 of idx, 1069 + * odd bits from 13:27 to bit1 1070 + */ 1071 + for (i = 12; i < 28; i += 2) 1072 + idx ^= (addr >> i) & 3; 1073 + 1074 + return idx; 1075 + } 1076 + 1064 1077 /**************************************************************************** 1065 1078 Memory check routines 1066 1079 ****************************************************************************/ ··· 1631 1616 KNL_MAX_CHANNELS : NUM_CHANNELS; 1632 1617 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1633 1618 1619 + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 1620 + pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg); 1621 + pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); 1622 + } 1634 1623 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1635 1624 pvt->info.type == KNIGHTS_LANDING) 1636 1625 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); ··· 2137 2118 } 2138 2119 2139 2120 ch_way = TAD_CH(reg) + 1; 2140 - sck_way = 1 << TAD_SOCK(reg); 2121 + sck_way = TAD_SOCK(reg); 2141 2122 2142 2123 if (ch_way == 3) 2143 2124 idx = addr >> 6; 2144 - else 2125 + else { 2145 2126 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2127 + if (pvt->is_chan_hash) 2128 + idx = haswell_chan_hash(idx, addr); 2129 + } 2146 2130 idx = idx % ch_way; 2147 2131 2148 2132 /* ··· 2179 2157 switch(ch_way) { 2180 2158 case 2: 2181 2159 case 4: 2182 - sck_xch = 1 << sck_way * (ch_way >> 1); 2160 + sck_xch = (1 << sck_way) * (ch_way >> 1); 2183 2161 break; 2184 2162 default: 2185 2163 sprintf(msg, "Invalid mirror set. Can't decode addr"); ··· 2215 2193 2216 2194 ch_addr = addr - offset; 2217 2195 ch_addr >>= (6 + shiftup); 2218 - ch_addr /= ch_way * sck_way; 2196 + ch_addr /= sck_xch; 2219 2197 ch_addr <<= (6 + shiftup); 2220 2198 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2221 2199 ··· 3168 3146 3169 3147 mci = get_mci_for_node_id(mce->socketid); 3170 3148 if (!mci) 3171 - return NOTIFY_BAD; 3149 + return NOTIFY_DONE; 3172 3150 pvt = mci->pvt_info; 3173 3151 3174 3152 /*
+26 -11
drivers/firmware/efi/vars.c
··· 202 202 { NULL_GUID, "", NULL }, 203 203 }; 204 204 205 + /* 206 + * Check if @var_name matches the pattern given in @match_name. 207 + * 208 + * @var_name: an array of @len non-NUL characters. 209 + * @match_name: a NUL-terminated pattern string, optionally ending in "*". A 210 + * final "*" character matches any trailing characters @var_name, 211 + * including the case when there are none left in @var_name. 212 + * @match: on output, the number of non-wildcard characters in @match_name 213 + * that @var_name matches, regardless of the return value. 214 + * @return: whether @var_name fully matches @match_name. 215 + */ 205 216 static bool 206 217 variable_matches(const char *var_name, size_t len, const char *match_name, 207 218 int *match) 208 219 { 209 220 for (*match = 0; ; (*match)++) { 210 221 char c = match_name[*match]; 211 - char u = var_name[*match]; 212 222 213 - /* Wildcard in the matching name means we've matched */ 214 - if (c == '*') 223 + switch (c) { 224 + case '*': 225 + /* Wildcard in @match_name means we've matched. */ 215 226 return true; 216 227 217 - /* Case sensitive match */ 218 - if (!c && *match == len) 219 - return true; 228 + case '\0': 229 + /* @match_name has ended. Has @var_name too? */ 230 + return (*match == len); 220 231 221 - if (c != u) 232 + default: 233 + /* 234 + * We've reached a non-wildcard char in @match_name. 235 + * Continue only if there's an identical character in 236 + * @var_name. 237 + */ 238 + if (*match < len && c == var_name[*match]) 239 + continue; 222 240 return false; 223 - 224 - if (!c) 225 - return true; 241 + } 226 242 } 227 - return true; 228 243 } 229 244 230 245 bool
+1 -1
drivers/firmware/psci.c
··· 360 360 .init = psci_dt_cpu_init_idle, 361 361 }; 362 362 363 - CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); 363 + CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); 364 364 #endif 365 365 #endif 366 366
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1591 1591 struct amdgpu_bo *vcpu_bo; 1592 1592 void *cpu_addr; 1593 1593 uint64_t gpu_addr; 1594 + unsigned fw_version; 1594 1595 void *saved_bo; 1595 1596 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1596 1597 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 425 425 struct acp_pm_domain *apd; 426 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 427 427 428 + /* return early if no ACP */ 429 + if (!adev->acp.acp_genpd) 430 + return 0; 431 + 428 432 /* SMU block will power on ACP irrespective of ACP runtime status. 429 433 * Power off explicitly based on genpd ACP runtime status so that ACP 430 434 * hw and ACP-genpd status are in sync.
+7 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
··· 63 63 return amdgpu_atpx_priv.atpx_detected; 64 64 } 65 65 66 - bool amdgpu_has_atpx_dgpu_power_cntl(void) { 67 - return amdgpu_atpx_priv.atpx.functions.power_cntl; 68 - } 69 - 70 66 /** 71 67 * amdgpu_atpx_call - call an ATPX method 72 68 * ··· 142 146 */ 143 147 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 144 148 { 149 + /* make sure required functions are enabled */ 150 + /* dGPU power control is required */ 151 + if (atpx->functions.power_cntl == false) { 152 + printk("ATPX dGPU power cntl not present, forcing\n"); 153 + atpx->functions.power_cntl = true; 154 + } 155 + 145 156 if (atpx->functions.px_params) { 146 157 union acpi_object *info; 147 158 struct atpx_px_params output;
+1 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 62 62 "LAST", 63 63 }; 64 64 65 - #if defined(CONFIG_VGA_SWITCHEROO) 66 - bool amdgpu_has_atpx_dgpu_power_cntl(void); 67 - #else 68 - static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 69 - #endif 70 - 71 65 bool amdgpu_device_is_px(struct drm_device *dev) 72 66 { 73 67 struct amdgpu_device *adev = dev->dev_private; ··· 1479 1485 1480 1486 if (amdgpu_runtime_pm == 1) 1481 1487 runtime = true; 1482 - if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) 1488 + if (amdgpu_device_is_px(ddev)) 1483 1489 runtime = true; 1484 1490 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1485 1491 if (runtime)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 303 303 fw_info.feature = adev->vce.fb_version; 304 304 break; 305 305 case AMDGPU_INFO_FW_UVD: 306 - fw_info.ver = 0; 306 + fw_info.ver = adev->uvd.fw_version; 307 307 fw_info.feature = 0; 308 308 break; 309 309 case AMDGPU_INFO_FW_GMC:
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 53 53 54 54 #define AMDGPU_MAX_HPD_PINS 6 55 55 #define AMDGPU_MAX_CRTCS 6 56 - #define AMDGPU_MAX_AFMT_BLOCKS 7 56 + #define AMDGPU_MAX_AFMT_BLOCKS 9 57 57 58 58 enum amdgpu_rmx_type { 59 59 RMX_OFF, ··· 309 309 struct atom_context *atom_context; 310 310 struct card_info *atom_card_info; 311 311 bool mode_config_initialized; 312 - struct amdgpu_crtc *crtcs[6]; 313 - struct amdgpu_afmt *afmt[7]; 312 + struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; 313 + struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; 314 314 /* DVI-I properties */ 315 315 struct drm_property *coherent_mode_property; 316 316 /* DAC enable load detect */
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 223 223 { 224 224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); 225 225 226 + if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 227 + return -EPERM; 226 228 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 227 229 } 228 230
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 158 158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 159 159 version_major, version_minor, family_id); 160 160 161 + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | 162 + (family_id << 8)); 163 + 161 164 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 162 165 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 163 166 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, ··· 257 254 258 255 if (i == AMDGPU_MAX_UVD_HANDLES) 259 256 return 0; 257 + 258 + cancel_delayed_work_sync(&adev->uvd.idle_work); 260 259 261 260 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 262 261 ptr = adev->uvd.cpu_addr;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 234 234 if (i == AMDGPU_MAX_VCE_HANDLES) 235 235 return 0; 236 236 237 + cancel_delayed_work_sync(&adev->vce.idle_work); 237 238 /* TODO: suspending running encoding sessions isn't supported */ 238 239 return -EINVAL; 239 240 }
+4 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 910 910 { 911 911 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 912 912 913 - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 913 + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 914 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 915 + else 916 + return 0; 914 917 } 915 918 916 919 static int gmc_v7_0_sw_init(void *handle)
+4 -1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 870 870 { 871 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 872 872 873 - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 874 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 875 + else 876 + return 0; 874 877 } 875 878 876 879 #define mmMC_SEQ_MISC0_FIJI 0xA71
+28 -1
drivers/gpu/drm/drm_dp_mst_topology.c
··· 1672 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1673 1673 int i; 1674 1674 1675 + port = drm_dp_get_validated_port_ref(mgr, port); 1676 + if (!port) 1677 + return -EINVAL; 1678 + 1675 1679 port_num = port->port_num; 1676 1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1677 1681 if (!mstb) { 1678 1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1679 1683 1680 - if (!mstb) 1684 + if (!mstb) { 1685 + drm_dp_put_port(port); 1681 1686 return -EINVAL; 1687 + } 1682 1688 } 1683 1689 1684 1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); ··· 1713 1707 kfree(txmsg); 1714 1708 fail_put: 1715 1709 drm_dp_put_mst_branch_device(mstb); 1710 + drm_dp_put_port(port); 1716 1711 return ret; 1717 1712 } 1718 1713 ··· 1796 1789 req_payload.start_slot = cur_slots; 1797 1790 if (mgr->proposed_vcpis[i]) { 1798 1791 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1792 + port = drm_dp_get_validated_port_ref(mgr, port); 1793 + if (!port) { 1794 + mutex_unlock(&mgr->payload_lock); 1795 + return -EINVAL; 1796 + } 1799 1797 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1800 1798 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1801 1799 } else { ··· 1828 1816 mgr->payloads[i].payload_state = req_payload.payload_state; 1829 1817 } 1830 1818 cur_slots += req_payload.num_slots; 1819 + 1820 + if (port) 1821 + drm_dp_put_port(port); 1831 1822 } 1832 1823 1833 1824 for (i = 0; i < mgr->max_payloads; i++) { ··· 2136 2121 2137 2122 if (mgr->mst_primary) { 2138 2123 int sret; 2124 + u8 guid[16]; 2125 + 2139 2126 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2140 2127 if (sret != DP_RECEIVER_CAP_SIZE) { 2141 2128 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); ··· 2152 2135 ret = -1; 2153 2136 goto out_unlock; 2154 2137 } 2138 + 2139 + /* Some hubs forget their guids after they resume */ 2140 + sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2141 + if (sret != 16) { 2142 + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2143 + ret = -1; 2144 + goto out_unlock; 2145 + } 2146 + drm_dp_check_mstb_guid(mgr->mst_primary, guid); 2147 + 2155 2148 ret = 0; 2156 2149 } else 2157 2150 ret = -1;
+18 -13
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 572 572 goto fail; 573 573 } 574 574 575 + /* 576 + * Set the GPU linear window to be at the end of the DMA window, where 577 + * the CMA area is likely to reside. This ensures that we are able to 578 + * map the command buffers while having the linear window overlap as 579 + * much RAM as possible, so we can optimize mappings for other buffers. 580 + * 581 + * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads 582 + * to different views of the memory on the individual engines. 583 + */ 584 + if (!(gpu->identity.features & chipFeatures_PIPE_3D) || 585 + (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { 586 + u32 dma_mask = (u32)dma_get_required_mask(gpu->dev); 587 + if (dma_mask < PHYS_OFFSET + SZ_2G) 588 + gpu->memory_base = PHYS_OFFSET; 589 + else 590 + gpu->memory_base = dma_mask - SZ_2G + 1; 591 + } 592 + 575 593 ret = etnaviv_hw_reset(gpu); 576 594 if (ret) 577 595 goto fail; ··· 1584 1566 { 1585 1567 struct device *dev = &pdev->dev; 1586 1568 struct etnaviv_gpu *gpu; 1587 - u32 dma_mask; 1588 1569 int err = 0; 1589 1570 1590 1571 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); ··· 1592 1575 1593 1576 gpu->dev = &pdev->dev; 1594 1577 mutex_init(&gpu->lock); 1595 - 1596 - /* 1597 - * Set the GPU linear window to be at the end of the DMA window, where 1598 - * the CMA area is likely to reside. This ensures that we are able to 1599 - * map the command buffers while having the linear window overlap as 1600 - * much RAM as possible, so we can optimize mappings for other buffers. 1601 - */ 1602 - dma_mask = (u32)dma_get_required_mask(dev); 1603 - if (dma_mask < PHYS_OFFSET + SZ_2G) 1604 - gpu->memory_base = PHYS_OFFSET; 1605 - else 1606 - gpu->memory_base = dma_mask - SZ_2G + 1; 1607 1578 1608 1579 /* Map registers: */ 1609 1580 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
+3 -2
drivers/gpu/drm/i915/i915_drv.h
··· 2634 2634 2635 2635 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2636 2636 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2637 - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2638 - IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2637 + IS_SKL_GT3(dev) || \ 2638 + IS_SKL_GT4(dev)) 2639 + 2639 2640 /* 2640 2641 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2641 2642 * even when in MSI mode. This results in spurious interrupt warnings if the
+16 -11
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 501 501 if (pvec != NULL) { 502 502 struct mm_struct *mm = obj->userptr.mm->mm; 503 503 504 - down_read(&mm->mmap_sem); 505 - while (pinned < npages) { 506 - ret = get_user_pages_remote(work->task, mm, 507 - obj->userptr.ptr + pinned * PAGE_SIZE, 508 - npages - pinned, 509 - !obj->userptr.read_only, 0, 510 - pvec + pinned, NULL); 511 - if (ret < 0) 512 - break; 504 + ret = -EFAULT; 505 + if (atomic_inc_not_zero(&mm->mm_users)) { 506 + down_read(&mm->mmap_sem); 507 + while (pinned < npages) { 508 + ret = get_user_pages_remote 509 + (work->task, mm, 510 + obj->userptr.ptr + pinned * PAGE_SIZE, 511 + npages - pinned, 512 + !obj->userptr.read_only, 0, 513 + pvec + pinned, NULL); 514 + if (ret < 0) 515 + break; 513 516 514 - pinned += ret; 517 + pinned += ret; 518 + } 519 + up_read(&mm->mmap_sem); 520 + mmput(mm); 515 521 } 516 - up_read(&mm->mmap_sem); 517 522 } 518 523 519 524 mutex_lock(&dev->struct_mutex);
+11 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 841 841 if (unlikely(total_bytes > remain_usable)) { 842 842 /* 843 843 * The base request will fit but the reserved space 844 - * falls off the end. So only need to to wait for the 845 - * reserved size after flushing out the remainder. 844 + * falls off the end. So don't need an immediate wrap 845 + * and only need to effectively wait for the reserved 846 + * size space from the start of ringbuffer. 846 847 */ 847 848 wait_bytes = remain_actual + ringbuf->reserved_size; 848 - need_wrap = true; 849 849 } else if (total_bytes > ringbuf->space) { 850 850 /* No wrapping required, just waiting. */ 851 851 wait_bytes = total_bytes; ··· 1913 1913 struct intel_ringbuffer *ringbuf = request->ringbuf; 1914 1914 int ret; 1915 1915 1916 - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1916 + ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); 1917 1917 if (ret) 1918 1918 return ret; 1919 + 1920 + /* We're using qword write, seqno should be aligned to 8 bytes. */ 1921 + BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 1919 1922 1920 1923 /* w/a for post sync ops following a GPGPU operation we 1921 1924 * need a prior CS_STALL, which is emitted by the flush 1922 1925 * following the batch. 1923 1926 */ 1924 - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); 1927 + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1925 1928 intel_logical_ring_emit(ringbuf, 1926 1929 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1927 1930 PIPE_CONTROL_CS_STALL | ··· 1932 1929 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); 1933 1930 intel_logical_ring_emit(ringbuf, 0); 1934 1931 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1932 + /* We're thrashing one dword of HWS. */ 1933 + intel_logical_ring_emit(ringbuf, 0); 1935 1934 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1935 + intel_logical_ring_emit(ringbuf, MI_NOOP); 1936 1936 return intel_logical_ring_advance_and_submit(request); 1937 1937 } 1938 1938
+28 -14
drivers/gpu/drm/i915/intel_pm.c
··· 2876 2876 const struct drm_plane_state *pstate, 2877 2877 int y) 2878 2878 { 2879 - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2879 + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 2880 2880 struct drm_framebuffer *fb = pstate->fb; 2881 + uint32_t width = 0, height = 0; 2882 + 2883 + width = drm_rect_width(&intel_pstate->src) >> 16; 2884 + height = drm_rect_height(&intel_pstate->src) >> 16; 2885 + 2886 + if (intel_rotation_90_or_270(pstate->rotation)) 2887 + swap(width, height); 2881 2888 2882 2889 /* for planar format */ 2883 2890 if (fb->pixel_format == DRM_FORMAT_NV12) { 2884 2891 if (y) /* y-plane data rate */ 2885 - return intel_crtc->config->pipe_src_w * 2886 - intel_crtc->config->pipe_src_h * 2892 + return width * height * 2887 2893 drm_format_plane_cpp(fb->pixel_format, 0); 2888 2894 else /* uv-plane data rate */ 2889 - return (intel_crtc->config->pipe_src_w/2) * 2890 - (intel_crtc->config->pipe_src_h/2) * 2895 + return (width / 2) * (height / 2) * 2891 2896 drm_format_plane_cpp(fb->pixel_format, 1); 2892 2897 } 2893 2898 2894 2899 /* for packed formats */ 2895 - return intel_crtc->config->pipe_src_w * 2896 - intel_crtc->config->pipe_src_h * 2897 - drm_format_plane_cpp(fb->pixel_format, 0); 2900 + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 2898 2901 } 2899 2902 2900 2903 /* ··· 2976 2973 struct drm_framebuffer *fb = plane->state->fb; 2977 2974 int id = skl_wm_plane_id(intel_plane); 2978 2975 2979 - if (fb == NULL) 2976 + if (!to_intel_plane_state(plane->state)->visible) 2980 2977 continue; 2978 + 2981 2979 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2982 2980 continue; 2983 2981 ··· 3004 3000 uint16_t plane_blocks, y_plane_blocks = 0; 3005 3001 int id = skl_wm_plane_id(intel_plane); 3006 3002 3007 - if (pstate->fb == NULL) 3003 + if (!to_intel_plane_state(pstate)->visible) 3008 3004 continue; 3009 3005 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3010 3006 continue; ··· 3127 3123 { 3128 3124 struct drm_plane *plane = &intel_plane->base; 3129 3125 struct drm_framebuffer *fb = plane->state->fb; 3126 + struct intel_plane_state *intel_pstate = 3127 + to_intel_plane_state(plane->state); 3130 3128 uint32_t latency = dev_priv->wm.skl_latency[level]; 3131 3129 uint32_t method1, method2; 3132 3130 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3133 3131 uint32_t res_blocks, res_lines; 3134 3132 uint32_t selected_result; 3135 3133 uint8_t cpp; 3134 + uint32_t width = 0, height = 0; 3136 3135 3137 - if (latency == 0 || !cstate->base.active || !fb) 3136 + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3138 3137 return false; 3138 + 3139 + width = drm_rect_width(&intel_pstate->src) >> 16; 3140 + height = drm_rect_height(&intel_pstate->src) >> 16; 3141 + 3142 + if (intel_rotation_90_or_270(plane->state->rotation)) 3143 + swap(width, height); 3139 3144 3140 3145 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3141 3146 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3142 3147 cpp, latency); 3143 3148 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3144 3149 cstate->base.adjusted_mode.crtc_htotal, 3145 - cstate->pipe_src_w, 3146 - cpp, fb->modifier[0], 3150 + width, 3151 + cpp, 3152 + fb->modifier[0], 3147 3153 latency); 3148 3154 3149 - plane_bytes_per_line = cstate->pipe_src_w * cpp; 3155 + plane_bytes_per_line = width * cpp; 3150 3156 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3151 3157 3152 3158 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+11 -7
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 968 968 969 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 970 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 971 - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 + if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || 972 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 973 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 974 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); ··· 1085 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1086 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1087 1087 1088 - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 + /* This is tied to WaForceContextSaveRestoreNonCoherent */ 1089 + if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { 1089 1090 /* 1090 1091 *Use Force Non-Coherent whenever executing a 3D context. This 1091 1092 * is a workaround for a possible hang in the unlikely event ··· 2091 2090 { 2092 2091 struct drm_i915_private *dev_priv = to_i915(dev); 2093 2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2094 + unsigned flags = PIN_OFFSET_BIAS | 4096; 2094 2095 int ret; 2095 2096 2096 2097 if (HAS_LLC(dev_priv) && !obj->stolen) { 2097 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2098 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); 2098 2099 if (ret) 2099 2100 return ret; 2100 2101 ··· 2112 2109 return -ENOMEM; 2113 2110 } 2114 2111 } else { 2115 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2112 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 2113 + flags | PIN_MAPPABLE); 2116 2114 if (ret) 2117 2115 return ret; 2118 2116 ··· 2458 2454 if (unlikely(total_bytes > remain_usable)) { 2459 2455 /* 2460 2456 * The base request will fit but the reserved space 2461 - * falls off the end. So only need to to wait for the 2462 - * reserved size after flushing out the remainder. 2457 + * falls off the end. So don't need an immediate wrap 2458 + * and only need to effectively wait for the reserved 2459 + * size space from the start of ringbuffer. 2463 2460 */ 2464 2461 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 - need_wrap = true; 2466 2462 } else if (total_bytes > ringbuf->space) { 2467 2463 /* No wrapping required, just waiting. */ 2468 2464 wait_bytes = total_bytes;
+5 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 1189 1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1190 1190 dev_priv->uncore.funcs.force_wake_get = 1191 1191 fw_domains_get_with_thread_status; 1192 - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1192 + if (IS_HASWELL(dev)) 1193 + dev_priv->uncore.funcs.force_wake_put = 1194 + fw_domains_put_with_fifo; 1195 + else 1196 + dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1193 1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1194 1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1195 1199 } else if (IS_IVYBRIDGE(dev)) {
+2 -2
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1276 1276 break; 1277 1277 default: 1278 1278 if (disp->dithering_mode) { 1279 + nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1279 1280 drm_object_attach_property(&connector->base, 1280 1281 disp->dithering_mode, 1281 1282 nv_connector-> 1282 1283 dithering_mode); 1283 - nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1284 1284 } 1285 1285 if (disp->dithering_depth) { 1286 + nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1286 1287 drm_object_attach_property(&connector->base, 1287 1288 disp->dithering_depth, 1288 1289 nv_connector-> 1289 1290 dithering_depth); 1290 - nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1291 1291 } 1292 1292 break; 1293 1293 }
+2
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
··· 1832 1832 1833 1833 gf100_gr_mmio(gr, gr->func->mmio); 1834 1834 1835 + nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); 1836 + 1835 1837 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1836 1838 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1837 1839 do {
+153 -1
drivers/gpu/drm/radeon/evergreen.c
··· 2608 2608 WREG32(VM_CONTEXT1_CNTL, 0); 2609 2609 } 2610 2610 2611 + static const unsigned ni_dig_offsets[] = 2612 + { 2613 + NI_DIG0_REGISTER_OFFSET, 2614 + NI_DIG1_REGISTER_OFFSET, 2615 + NI_DIG2_REGISTER_OFFSET, 2616 + NI_DIG3_REGISTER_OFFSET, 2617 + NI_DIG4_REGISTER_OFFSET, 2618 + NI_DIG5_REGISTER_OFFSET 2619 + }; 2620 + 2621 + static const unsigned ni_tx_offsets[] = 2622 + { 2623 + NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1, 2624 + NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1, 2625 + NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1, 2626 + NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1, 2627 + NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1, 2628 + NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 2629 + }; 2630 + 2631 + static const unsigned evergreen_dp_offsets[] = 2632 + { 2633 + EVERGREEN_DP0_REGISTER_OFFSET, 2634 + EVERGREEN_DP1_REGISTER_OFFSET, 2635 + EVERGREEN_DP2_REGISTER_OFFSET, 2636 + EVERGREEN_DP3_REGISTER_OFFSET, 2637 + EVERGREEN_DP4_REGISTER_OFFSET, 2638 + EVERGREEN_DP5_REGISTER_OFFSET 2639 + }; 2640 + 2641 + 2642 + /* 2643 + * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc 2644 + * We go from crtc to connector and it is not relible since it 2645 + * should be an opposite direction .If crtc is enable then 2646 + * find the dig_fe which selects this crtc and insure that it enable. 2647 + * if such dig_fe is found then find dig_be which selects found dig_be and 2648 + * insure that it enable and in DP_SST mode. 2649 + * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing 2650 + * from dp symbols clocks . 2651 + */ 2652 + static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev, 2653 + unsigned crtc_id, unsigned *ret_dig_fe) 2654 + { 2655 + unsigned i; 2656 + unsigned dig_fe; 2657 + unsigned dig_be; 2658 + unsigned dig_en_be; 2659 + unsigned uniphy_pll; 2660 + unsigned digs_fe_selected; 2661 + unsigned dig_be_mode; 2662 + unsigned dig_fe_mask; 2663 + bool is_enabled = false; 2664 + bool found_crtc = false; 2665 + 2666 + /* loop through all running dig_fe to find selected crtc */ 2667 + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { 2668 + dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]); 2669 + if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON && 2670 + crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) { 2671 + /* found running pipe */ 2672 + found_crtc = true; 2673 + dig_fe_mask = 1 << i; 2674 + dig_fe = i; 2675 + break; 2676 + } 2677 + } 2678 + 2679 + if (found_crtc) { 2680 + /* loop through all running dig_be to find selected dig_fe */ 2681 + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { 2682 + dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]); 2683 + /* if dig_fe_selected by dig_be? */ 2684 + digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be); 2685 + dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be); 2686 + if (dig_fe_mask & digs_fe_selected && 2687 + /* if dig_be in sst mode? */ 2688 + dig_be_mode == NI_DIG_BE_DPSST) { 2689 + dig_en_be = RREG32(NI_DIG_BE_EN_CNTL + 2690 + ni_dig_offsets[i]); 2691 + uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 + 2692 + ni_tx_offsets[i]); 2693 + /* dig_be enable and tx is running */ 2694 + if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE && 2695 + dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON && 2696 + uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) { 2697 + is_enabled = true; 2698 + *ret_dig_fe = dig_fe; 2699 + break; 2700 + } 2701 + } 2702 + } 2703 + } 2704 + 2705 + return is_enabled; 2706 + } 2707 + 2708 + /* 2709 + * Blank dig when in dp sst mode 2710 + * Dig ignores crtc timing 2711 + */ 2712 + static void evergreen_blank_dp_output(struct radeon_device *rdev, 2713 + unsigned dig_fe) 2714 + { 2715 + unsigned stream_ctrl; 2716 + unsigned fifo_ctrl; 2717 + unsigned counter = 0; 2718 + 2719 + if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) { 2720 + DRM_ERROR("invalid dig_fe %d\n", dig_fe); 2721 + return; 2722 + } 2723 + 2724 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + 2725 + evergreen_dp_offsets[dig_fe]); 2726 + if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) { 2727 + DRM_ERROR("dig %d , should be enable\n", dig_fe); 2728 + return; 2729 + } 2730 + 2731 + stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; 2732 + WREG32(EVERGREEN_DP_VID_STREAM_CNTL + 2733 + evergreen_dp_offsets[dig_fe], stream_ctrl); 2734 + 2735 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + 2736 + evergreen_dp_offsets[dig_fe]); 2737 + while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) { 2738 + msleep(1); 2739 + counter++; 2740 + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + 2741 + evergreen_dp_offsets[dig_fe]); 2742 + } 2743 + if (counter >= 32 ) 2744 + DRM_ERROR("counter exceeds %d\n", counter); 2745 + 2746 + fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]); 2747 + fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET; 2748 + WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl); 2749 + 2750 + } 2751 + 2611 2752 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 2612 2753 { 2613 2754 u32 crtc_enabled, tmp, frame_count, blackout; 2614 2755 int i, j; 2756 + unsigned dig_fe; 2615 2757 2616 2758 if (!ASIC_IS_NODCE(rdev)) { 2617 2759 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); ··· 2793 2651 break; 2794 2652 udelay(1); 2795 2653 } 2796 - 2654 + /*we should disable dig if it drives dp sst*/ 2655 + /*but we are in radeon_device_init and the topology is unknown*/ 2656 + /*and it is available after radeon_modeset_init*/ 2657 + /*the following method radeon_atom_encoder_dpms_dig*/ 2658 + /*does the job if we initialize it properly*/ 2659 + /*for now we do it this manually*/ 2660 + /**/ 2661 + if (ASIC_IS_DCE5(rdev) && 2662 + evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe)) 2663 + evergreen_blank_dp_output(rdev, dig_fe); 2664 + /*we could remove 6 lines below*/ 2797 2665 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 2798 2666 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2799 2667 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+46
drivers/gpu/drm/radeon/evergreen_reg.h
··· 250 250 251 251 /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 252 252 #define EVERGREEN_HDMI_BASE 0x7030 253 + /*DIG block*/ 254 + #define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000) 255 + #define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000) 256 + #define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000) 257 + #define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000) 258 + #define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000) 259 + #define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000) 260 + 261 + 262 + #define NI_DIG_FE_CNTL 0x7000 263 + # define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3) 264 + # define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24) 265 + 266 + 267 + #define NI_DIG_BE_CNTL 0x7140 268 + # define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F) 269 + # define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 ) 270 + 271 + #define NI_DIG_BE_EN_CNTL 0x7144 272 + # define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0) 273 + # define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8) 274 + # define NI_DIG_BE_DPSST 0 253 275 254 276 /* Display Port block */ 277 + #define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C) 278 + #define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C) 279 + #define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C) 280 + #define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C) 281 + #define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C) 282 + #define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C) 283 + 284 + 285 + #define EVERGREEN_DP_VID_STREAM_CNTL 0x730C 286 + # define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0) 287 + # define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16) 288 + #define EVERGREEN_DP_STEER_FIFO 0x7310 289 + # define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0) 255 290 #define EVERGREEN_DP_SEC_CNTL 0x7280 256 291 # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) 257 292 # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) ··· 300 265 #define EVERGREEN_DP_SEC_AUD_N 0x7294 301 266 # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) 302 267 # define EVERGREEN_DP_SEC_SS_EN (1 << 28) 268 + 269 + /*DCIO_UNIPHY block*/ 270 + #define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600) 271 + #define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600) 272 + #define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600) 273 + #define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600) 274 + #define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600) 275 + #define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600) 276 + 277 + #define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618 278 + # define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0) 303 279 304 280 #endif
+7 -4
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 62 62 return radeon_atpx_priv.atpx_detected; 63 63 } 64 64 65 - bool radeon_has_atpx_dgpu_power_cntl(void) { 66 - return radeon_atpx_priv.atpx.functions.power_cntl; 67 - } 68 - 69 65 /** 70 66 * radeon_atpx_call - call an ATPX method 71 67 * ··· 141 145 */ 142 146 static int radeon_atpx_validate(struct radeon_atpx *atpx) 143 147 { 148 + /* make sure required functions are enabled */ 149 + /* dGPU power control is required */ 150 + if (atpx->functions.power_cntl == false) { 151 + printk("ATPX dGPU power cntl not present, forcing\n"); 152 + atpx->functions.power_cntl = true; 153 + } 154 + 144 155 if (atpx->functions.px_params) { 145 156 union acpi_object *info; 146 157 struct atpx_px_params output;
+6 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 2002 2002 rdev->mode_info.dither_property, 2003 2003 RADEON_FMT_DITHER_DISABLE); 2004 2004 2005 - if (radeon_audio != 0) 2005 + if (radeon_audio != 0) { 2006 2006 drm_object_attach_property(&radeon_connector->base.base, 2007 2007 rdev->mode_info.audio_property, 2008 2008 RADEON_AUDIO_AUTO); 2009 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2010 + } 2009 2011 if (ASIC_IS_DCE5(rdev)) 2010 2012 drm_object_attach_property(&radeon_connector->base.base, 2011 2013 rdev->mode_info.output_csc_property, ··· 2132 2130 drm_object_attach_property(&radeon_connector->base.base, 2133 2131 rdev->mode_info.audio_property, 2134 2132 RADEON_AUDIO_AUTO); 2133 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2135 2134 } 2136 2135 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 2137 2136 radeon_connector->dac_load_detect = true; ··· 2188 2185 drm_object_attach_property(&radeon_connector->base.base, 2189 2186 rdev->mode_info.audio_property, 2190 2187 RADEON_AUDIO_AUTO); 2188 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2191 2189 } 2192 2190 if (ASIC_IS_DCE5(rdev)) 2193 2191 drm_object_attach_property(&radeon_connector->base.base, ··· 2241 2237 drm_object_attach_property(&radeon_connector->base.base, 2242 2238 rdev->mode_info.audio_property, 2243 2239 RADEON_AUDIO_AUTO); 2240 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2244 2241 } 2245 2242 if (ASIC_IS_DCE5(rdev)) 2246 2243 drm_object_attach_property(&radeon_connector->base.base,
+4 -10
drivers/gpu/drm/radeon/radeon_device.c
··· 103 103 "LAST", 104 104 }; 105 105 106 - #if defined(CONFIG_VGA_SWITCHEROO) 107 - bool radeon_has_atpx_dgpu_power_cntl(void); 108 - #else 109 - static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 110 - #endif 111 - 112 106 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 113 107 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 114 108 ··· 1299 1305 } 1300 1306 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1301 1307 1302 - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1303 - radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1304 - pdev->subsystem_vendor, pdev->subsystem_device); 1308 + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1309 + radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1310 + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 1305 1311 1306 1312 /* mutex initialization are all done here so we 1307 1313 * can recall function without having locking issues */ ··· 1433 1439 * ignore it */ 1434 1440 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1435 1441 1436 - if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) 1442 + if (rdev->flags & RADEON_IS_PX) 1437 1443 runtime = true; 1438 1444 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1439 1445 if (runtime)
+2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 235 235 { 236 236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 237 237 238 + if (radeon_ttm_tt_has_userptr(bo->ttm)) 239 + return -EPERM; 238 240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 239 241 } 240 242
+1
drivers/gpu/drm/radeon/si_dpm.c
··· 2931 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2932 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2933 2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, 2934 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, 2934 2935 { 0, 0, 0, 0 }, 2935 2936 }; 2936 2937
+4 -13
drivers/gpu/drm/ttm/ttm_bo.c
··· 230 230 231 231 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 232 232 { 233 - struct ttm_bo_device *bdev = bo->bdev; 234 - struct ttm_mem_type_manager *man; 233 + int put_count = 0; 235 234 236 235 lockdep_assert_held(&bo->resv->lock.base); 237 236 238 - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 239 - list_del_init(&bo->swap); 240 - list_del_init(&bo->lru); 241 - 242 - } else { 243 - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) 244 - list_move_tail(&bo->swap, &bo->glob->swap_lru); 245 - 246 - man = &bdev->man[bo->mem.mem_type]; 247 - list_move_tail(&bo->lru, &man->lru); 248 - } 237 + put_count = ttm_bo_del_from_lru(bo); 238 + ttm_bo_list_ref_sub(bo, put_count, true); 239 + ttm_bo_add_to_lru(bo); 249 240 } 250 241 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 251 242
+12
drivers/gpu/drm/virtio/virtgpu_display.c
··· 267 267 return 0; 268 268 } 269 269 270 + static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, 271 + struct drm_crtc_state *old_state) 272 + { 273 + unsigned long flags; 274 + 275 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 276 + if (crtc->state->event) 277 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 278 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 279 + } 280 + 270 281 static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { 271 282 .enable = virtio_gpu_crtc_enable, 272 283 .disable = virtio_gpu_crtc_disable, 273 284 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, 274 285 .atomic_check = virtio_gpu_crtc_atomic_check, 286 + .atomic_flush = virtio_gpu_crtc_atomic_flush, 275 287 }; 276 288 277 289 static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 3293 3293 &vmw_cmd_dx_cid_check, true, false, true), 3294 3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3295 3295 true, false, true), 3296 - VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, 3296 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, 3297 3297 true, false, true), 3298 3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3299 3299 true, false, true), 3300 3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3301 - &vmw_cmd_ok, true, false, true), 3302 - VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, 3301 + &vmw_cmd_dx_cid_check, true, false, true), 3302 + VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, 3303 3303 true, false, true), 3304 - VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, 3304 + VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, 3305 3305 true, false, true), 3306 3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3307 3307 true, false, true), 3308 - VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 3308 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, 3309 3309 true, false, true), 3310 3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3311 3311 true, false, true),
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 573 573 mode = old_mode; 574 574 old_mode = NULL; 575 575 } else if (!vmw_kms_validate_mode_vram(vmw_priv, 576 - mode->hdisplay * 577 - (var->bits_per_pixel + 7) / 8, 578 - mode->vdisplay)) { 576 + mode->hdisplay * 577 + DIV_ROUND_UP(var->bits_per_pixel, 8), 578 + mode->vdisplay)) { 579 579 drm_mode_destroy(vmw_priv->dev, mode); 580 580 return -EINVAL; 581 581 }
+2 -2
drivers/i2c/busses/Kconfig
··· 975 975 976 976 config I2C_XLP9XX 977 977 tristate "XLP9XX I2C support" 978 - depends on CPU_XLP || COMPILE_TEST 978 + depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST 979 979 help 980 980 This driver enables support for the on-chip I2C interface of 981 - the Broadcom XLP9xx/XLP5xx MIPS processors. 981 + the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors. 982 982 983 983 This driver can also be built as a module. If so, the module will 984 984 be called i2c-xlp9xx.
+2 -2
drivers/i2c/busses/i2c-cpm.c
··· 116 116 cbd_t __iomem *rbase; 117 117 u_char *txbuf[CPM_MAXBD]; 118 118 u_char *rxbuf[CPM_MAXBD]; 119 - u32 txdma[CPM_MAXBD]; 120 - u32 rxdma[CPM_MAXBD]; 119 + dma_addr_t txdma[CPM_MAXBD]; 120 + dma_addr_t rxdma[CPM_MAXBD]; 121 121 }; 122 122 123 123 static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
+19 -5
drivers/i2c/busses/i2c-exynos5.c
··· 671 671 return -EIO; 672 672 } 673 673 674 - clk_prepare_enable(i2c->clk); 674 + ret = clk_enable(i2c->clk); 675 + if (ret) 676 + return ret; 675 677 676 678 for (i = 0; i < num; i++, msgs++) { 677 679 stop = (i == num - 1); ··· 697 695 } 698 696 699 697 out: 700 - clk_disable_unprepare(i2c->clk); 698 + clk_disable(i2c->clk); 701 699 return ret; 702 700 } 703 701 ··· 749 747 return -ENOENT; 750 748 } 751 749 752 - clk_prepare_enable(i2c->clk); 750 + ret = clk_prepare_enable(i2c->clk); 751 + if (ret) 752 + return ret; 753 753 754 754 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 755 755 i2c->regs = devm_ioremap_resource(&pdev->dev, mem); ··· 803 799 804 800 platform_set_drvdata(pdev, i2c); 805 801 802 + clk_disable(i2c->clk); 803 + 804 + return 0; 805 + 806 806 err_clk: 807 807 clk_disable_unprepare(i2c->clk); 808 808 return ret; ··· 817 809 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 818 810 819 811 i2c_del_adapter(&i2c->adap); 812 + 813 + clk_unprepare(i2c->clk); 820 814 821 815 return 0; 822 816 } ··· 831 821 832 822 i2c->suspended = 1; 833 823 824 + clk_unprepare(i2c->clk); 825 + 834 826 return 0; 835 827 } 836 828 ··· 842 830 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 843 831 int ret = 0; 844 832 845 - clk_prepare_enable(i2c->clk); 833 + ret = clk_prepare_enable(i2c->clk); 834 + if (ret) 835 + return ret; 846 836 847 837 ret = exynos5_hsi2c_clock_setup(i2c); 848 838 if (ret) { ··· 853 839 } 854 840 855 841 exynos5_i2c_init(i2c); 856 - clk_disable_unprepare(i2c->clk); 842 + clk_disable(i2c->clk); 857 843 i2c->suspended = 0; 858 844 859 845 return 0;
+2
drivers/i2c/busses/i2c-ismt.c
··· 75 75 /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 76 76 #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 77 77 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 78 + #define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac 78 79 #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 79 80 80 81 #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ ··· 181 180 static const struct pci_device_id ismt_ids[] = { 182 181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 183 182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 183 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) }, 184 184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, 185 185 { 0, } 186 186 };
+1
drivers/i2c/busses/i2c-rk3x.c
··· 855 855 static const struct of_device_id rk3x_i2c_match[] = { 856 856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, 857 857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, 858 + { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] }, 858 859 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, 859 860 {}, 860 861 };
+2 -1
drivers/infiniband/core/cache.c
··· 691 691 NULL); 692 692 693 693 /* Coudn't find default GID location */ 694 - WARN_ON(ix < 0); 694 + if (WARN_ON(ix < 0)) 695 + goto release; 695 696 696 697 zattr_type.gid_type = gid_type; 697 698
+4
drivers/infiniband/core/ucm.c
··· 48 48 49 49 #include <asm/uaccess.h> 50 50 51 + #include <rdma/ib.h> 51 52 #include <rdma/ib_cm.h> 52 53 #include <rdma/ib_user_cm.h> 53 54 #include <rdma/ib_marshall.h> ··· 1103 1102 struct ib_ucm_file *file = filp->private_data; 1104 1103 struct ib_ucm_cmd_hdr hdr; 1105 1104 ssize_t result; 1105 + 1106 + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) 1107 + return -EACCES; 1106 1108 1107 1109 if (len < sizeof(hdr)) 1108 1110 return -EINVAL;
+3
drivers/infiniband/core/ucma.c
··· 1574 1574 struct rdma_ucm_cmd_hdr hdr; 1575 1575 ssize_t ret; 1576 1576 1577 + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) 1578 + return -EACCES; 1579 + 1577 1580 if (len < sizeof(hdr)) 1578 1581 return -EINVAL; 1579 1582
+5
drivers/infiniband/core/uverbs_main.c
··· 48 48 49 49 #include <asm/uaccess.h> 50 50 51 + #include <rdma/ib.h> 52 + 51 53 #include "uverbs.h" 52 54 53 55 MODULE_AUTHOR("Roland Dreier"); ··· 710 708 __u32 flags; 711 709 int srcu_key; 712 710 ssize_t ret; 711 + 712 + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) 713 + return -EACCES; 713 714 714 715 if (count < sizeof hdr) 715 716 return -EINVAL;
+2 -1
drivers/infiniband/core/verbs.c
··· 1860 1860 void ib_drain_qp(struct ib_qp *qp) 1861 1861 { 1862 1862 ib_drain_sq(qp); 1863 - ib_drain_rq(qp); 1863 + if (!qp->srq) 1864 + ib_drain_rq(qp); 1864 1865 } 1865 1866 EXPORT_SYMBOL(ib_drain_qp);
+2
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 1390 1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1391 1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1392 1392 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1393 + memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name, 1394 + sizeof(dev->ibdev.iwcm->ifname)); 1393 1395 1394 1396 ret = ib_register_device(&dev->ibdev, NULL); 1395 1397 if (ret)
+1 -1
drivers/infiniband/hw/cxgb4/cq.c
··· 162 162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, 163 163 &cq->bar2_qid, 164 164 user ? &cq->bar2_pa : NULL); 165 - if (user && !cq->bar2_va) { 165 + if (user && !cq->bar2_pa) { 166 166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", 167 167 pci_name(rdev->lldi.pdev), cq->cqid); 168 168 ret = -EINVAL;
+2
drivers/infiniband/hw/cxgb4/provider.c
··· 580 580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; 581 581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 582 582 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 583 + memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name, 584 + sizeof(dev->ibdev.iwcm->ifname)); 583 585 584 586 ret = ib_register_device(&dev->ibdev, NULL); 585 587 if (ret)
+21 -3
drivers/infiniband/hw/cxgb4/qp.c
··· 185 185 186 186 if (pbar2_pa) 187 187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; 188 + 189 + if (is_t4(rdev->lldi.adapter_type)) 190 + return NULL; 191 + 188 192 return rdev->bar2_kva + bar2_qoffset; 189 193 } 190 194 ··· 274 270 /* 275 271 * User mode must have bar2 access. 276 272 */ 277 - if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { 273 + if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { 278 274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", 279 275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); 280 276 goto free_dma; ··· 1899 1895 void c4iw_drain_sq(struct ib_qp *ibqp) 1900 1896 { 1901 1897 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1898 + unsigned long flag; 1899 + bool need_to_wait; 1902 1900 1903 - wait_for_completion(&qp->sq_drained); 1901 + spin_lock_irqsave(&qp->lock, flag); 1902 + need_to_wait = !t4_sq_empty(&qp->wq); 1903 + spin_unlock_irqrestore(&qp->lock, flag); 1904 + 1905 + if (need_to_wait) 1906 + wait_for_completion(&qp->sq_drained); 1904 1907 } 1905 1908 1906 1909 void c4iw_drain_rq(struct ib_qp *ibqp) 1907 1910 { 1908 1911 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1912 + unsigned long flag; 1913 + bool need_to_wait; 1909 1914 1910 - wait_for_completion(&qp->rq_drained); 1915 + spin_lock_irqsave(&qp->lock, flag); 1916 + need_to_wait = !t4_rq_empty(&qp->wq); 1917 + spin_unlock_irqrestore(&qp->lock, flag); 1918 + 1919 + if (need_to_wait) 1920 + wait_for_completion(&qp->rq_drained); 1911 1921 }
+3 -3
drivers/infiniband/hw/mlx5/main.c
··· 530 530 sizeof(struct mlx5_wqe_ctrl_seg)) / 531 531 sizeof(struct mlx5_wqe_data_seg); 532 532 props->max_sge = min(max_rq_sg, max_sq_sg); 533 - props->max_sge_rd = props->max_sge; 533 + props->max_sge_rd = MLX5_MAX_SGE_RD; 534 534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 535 535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 536 536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); ··· 671 671 struct mlx5_ib_dev *dev = to_mdev(ibdev); 672 672 struct mlx5_core_dev *mdev = dev->mdev; 673 673 struct mlx5_hca_vport_context *rep; 674 - int max_mtu; 675 - int oper_mtu; 674 + u16 max_mtu; 675 + u16 oper_mtu; 676 676 int err; 677 677 u8 ib_link_width_oper; 678 678 u8 vl_hw_cap;
-3
drivers/infiniband/hw/nes/nes_nic.c
··· 500 500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 501 501 */ 502 502 503 - if (!netif_carrier_ok(netdev)) 504 - return NETDEV_TX_OK; 505 - 506 503 if (netif_queue_stopped(netdev)) 507 504 return NETDEV_TX_BUSY; 508 505
+5
drivers/infiniband/hw/qib/qib_file_ops.c
··· 45 45 #include <linux/export.h> 46 46 #include <linux/uio.h> 47 47 48 + #include <rdma/ib.h> 49 + 48 50 #include "qib.h" 49 51 #include "qib_common.h" 50 52 #include "qib_user_sdma.h" ··· 2068 2066 struct qib_cmd cmd; 2069 2067 ssize_t ret = 0; 2070 2068 void *dest; 2069 + 2070 + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) 2071 + return -EACCES; 2071 2072 2072 2073 if (count < sizeof(cmd.type)) { 2073 2074 ret = -EINVAL;
+2 -2
drivers/infiniband/sw/rdmavt/qp.c
··· 1637 1637 spin_unlock_irqrestore(&qp->s_hlock, flags); 1638 1638 if (nreq) { 1639 1639 if (call_send) 1640 - rdi->driver_f.schedule_send_no_lock(qp); 1641 - else 1642 1640 rdi->driver_f.do_send(qp); 1641 + else 1642 + rdi->driver_f.schedule_send_no_lock(qp); 1643 1643 } 1644 1644 return err; 1645 1645 }
+2
drivers/input/joystick/xpad.c
··· 153 153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 154 154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 155 155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, 156 + { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, 156 157 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 157 158 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, 158 159 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, ··· 305 304 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 306 305 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 307 306 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 307 + XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ 308 308 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 309 309 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 310 310 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
-1
drivers/input/misc/arizona-haptics.c
··· 178 178 input_set_drvdata(haptics->input_dev, haptics); 179 179 180 180 haptics->input_dev->name = "arizona:haptics"; 181 - haptics->input_dev->dev.parent = pdev->dev.parent; 182 181 haptics->input_dev->close = arizona_haptics_close; 183 182 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit); 184 183
+4 -3
drivers/input/misc/pmic8xxx-pwrkey.c
··· 353 353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay)) 354 354 kpd_delay = 15625; 355 355 356 - if (kpd_delay > 62500 || kpd_delay == 0) { 356 + /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */ 357 + if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) { 357 358 dev_err(&pdev->dev, "invalid power key trigger delay\n"); 358 359 return -EINVAL; 359 360 } ··· 386 385 pwr->name = "pmic8xxx_pwrkey"; 387 386 pwr->phys = "pmic8xxx_pwrkey/input0"; 388 387 389 - delay = (kpd_delay << 10) / USEC_PER_SEC; 390 - delay = 1 + ilog2(delay); 388 + delay = (kpd_delay << 6) / USEC_PER_SEC; 389 + delay = ilog2(delay); 391 390 392 391 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl); 393 392 if (err < 0) {
-1
drivers/input/misc/twl4030-vibra.c
··· 222 222 223 223 info->input_dev->name = "twl4030:vibrator"; 224 224 info->input_dev->id.version = 1; 225 - info->input_dev->dev.parent = pdev->dev.parent; 226 225 info->input_dev->close = twl4030_vibra_close; 227 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 228 227
+1 -7
drivers/input/misc/twl6040-vibra.c
··· 45 45 struct vibra_info { 46 46 struct device *dev; 47 47 struct input_dev *input_dev; 48 - struct workqueue_struct *workqueue; 49 48 struct work_struct play_work; 50 49 struct mutex mutex; 51 50 int irq; ··· 212 213 info->strong_speed = effect->u.rumble.strong_magnitude; 213 214 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; 214 215 215 - ret = queue_work(info->workqueue, &info->play_work); 216 - if (!ret) { 217 - dev_info(&input->dev, "work is already on queue\n"); 218 - return ret; 219 - } 216 + schedule_work(&info->play_work); 220 217 221 218 return 0; 222 219 } ··· 357 362 358 363 info->input_dev->name = "twl6040:vibrator"; 359 364 info->input_dev->id.version = 1; 360 - info->input_dev->dev.parent = pdev->dev.parent; 361 365 info->input_dev->close = twl6040_vibra_close; 362 366 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 363 367
+9 -1
drivers/input/tablet/gtco.c
··· 858 858 goto err_free_buf; 859 859 } 860 860 861 + /* Sanity check that a device has an endpoint */ 862 + if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { 863 + dev_err(&usbinterface->dev, 864 + "Invalid number of endpoints\n"); 865 + error = -EINVAL; 866 + goto err_free_urb; 867 + } 868 + 861 869 /* 862 870 * The endpoint is always altsetting 0, we know this since we know 863 871 * this device only has one interrupt endpoint ··· 887 879 * HID report descriptor 888 880 */ 889 881 if (usb_get_extra_descriptor(usbinterface->cur_altsetting, 890 - HID_DEVICE_TYPE, &hid_desc) != 0){ 882 + HID_DEVICE_TYPE, &hid_desc) != 0) { 891 883 dev_err(&usbinterface->dev, 892 884 "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); 893 885 error = -EIO;
+76 -11
drivers/iommu/amd_iommu.c
··· 92 92 struct list_head dev_data_list; /* For global dev_data_list */ 93 93 struct protection_domain *domain; /* Domain the device is bound to */ 94 94 u16 devid; /* PCI Device ID */ 95 + u16 alias; /* Alias Device ID */ 95 96 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 97 bool passthrough; /* Device is identity mapped */ 97 98 struct { ··· 167 166 return container_of(dom, struct protection_domain, domain); 168 167 } 169 168 169 + static inline u16 get_device_id(struct device *dev) 170 + { 171 + struct pci_dev *pdev = to_pci_dev(dev); 172 + 173 + return PCI_DEVID(pdev->bus->number, pdev->devfn); 174 + } 175 + 170 176 static struct iommu_dev_data *alloc_dev_data(u16 devid) 171 177 { 172 178 struct iommu_dev_data *dev_data; ··· 211 203 return dev_data; 212 204 } 213 205 206 + static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) 207 + { 208 + *(u16 *)data = alias; 209 + return 0; 210 + } 211 + 212 + static u16 get_alias(struct device *dev) 213 + { 214 + struct pci_dev *pdev = to_pci_dev(dev); 215 + u16 devid, ivrs_alias, pci_alias; 216 + 217 + devid = get_device_id(dev); 218 + ivrs_alias = amd_iommu_alias_table[devid]; 219 + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); 220 + 221 + if (ivrs_alias == pci_alias) 222 + return ivrs_alias; 223 + 224 + /* 225 + * DMA alias showdown 226 + * 227 + * The IVRS is fairly reliable in telling us about aliases, but it 228 + * can't know about every screwy device. If we don't have an IVRS 229 + * reported alias, use the PCI reported alias. In that case we may 230 + * still need to initialize the rlookup and dev_table entries if the 231 + * alias is to a non-existent device. 232 + */ 233 + if (ivrs_alias == devid) { 234 + if (!amd_iommu_rlookup_table[pci_alias]) { 235 + amd_iommu_rlookup_table[pci_alias] = 236 + amd_iommu_rlookup_table[devid]; 237 + memcpy(amd_iommu_dev_table[pci_alias].data, 238 + amd_iommu_dev_table[devid].data, 239 + sizeof(amd_iommu_dev_table[pci_alias].data)); 240 + } 241 + 242 + return pci_alias; 243 + } 244 + 245 + pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d " 246 + "for device %s[%04x:%04x], kernel reported alias " 247 + "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), 248 + PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device, 249 + PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), 250 + PCI_FUNC(pci_alias)); 251 + 252 + /* 253 + * If we don't have a PCI DMA alias and the IVRS alias is on the same 254 + * bus, then the IVRS table may know about a quirk that we don't. 255 + */ 256 + if (pci_alias == devid && 257 + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { 258 + pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; 259 + pdev->dma_alias_devfn = ivrs_alias & 0xff; 260 + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", 261 + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), 262 + dev_name(dev)); 263 + } 264 + 265 + return ivrs_alias; 266 + } 267 + 214 268 static struct iommu_dev_data *find_dev_data(u16 devid) 215 269 { 216 270 struct iommu_dev_data *dev_data; ··· 283 213 dev_data = alloc_dev_data(devid); 284 214 285 215 return dev_data; 286 - } 287 - 288 - static inline u16 get_device_id(struct device *dev) 289 - { 290 - struct pci_dev *pdev = to_pci_dev(dev); 291 - 292 - return PCI_DEVID(pdev->bus->number, pdev->devfn); 293 216 } 294 217 295 218 static struct iommu_dev_data *get_dev_data(struct device *dev) ··· 412 349 if (!dev_data) 413 350 return -ENOMEM; 414 351 352 + dev_data->alias = get_alias(dev); 353 + 415 354 if (pci_iommuv2_capable(pdev)) { 416 355 struct amd_iommu *iommu; 417 356 ··· 434 369 u16 devid, alias; 435 370 436 371 devid = get_device_id(dev); 437 - alias = amd_iommu_alias_table[devid]; 372 + alias = get_alias(dev); 438 373 439 374 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 440 375 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); ··· 1126 1061 int ret; 1127 1062 1128 1063 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1129 - alias = amd_iommu_alias_table[dev_data->devid]; 1064 + alias = dev_data->alias; 1130 1065 1131 1066 ret = iommu_flush_dte(iommu, dev_data->devid); 1132 1067 if (!ret && alias != dev_data->devid) ··· 2104 2039 bool ats; 2105 2040 2106 2041 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2107 - alias = amd_iommu_alias_table[dev_data->devid]; 2042 + alias = dev_data->alias; 2108 2043 ats = dev_data->ats.enabled; 2109 2044 2110 2045 /* Update data structures */ ··· 2138 2073 return; 2139 2074 2140 2075 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2141 - alias = amd_iommu_alias_table[dev_data->devid]; 2076 + alias = dev_data->alias; 2142 2077 2143 2078 /* decrease reference counters */ 2144 2079 dev_data->domain->dev_iommu[iommu->index] -= 1;
+16 -8
drivers/iommu/arm-smmu.c
··· 826 826 if (smmu_domain->smmu) 827 827 goto out_unlock; 828 828 829 + /* We're bypassing these SIDs, so don't allocate an actual context */ 830 + if (domain->type == IOMMU_DOMAIN_DMA) { 831 + smmu_domain->smmu = smmu; 832 + goto out_unlock; 833 + } 834 + 829 835 /* 830 836 * Mapping the requested stage onto what we support is surprisingly 831 837 * complicated, mainly because the spec allows S1+S2 SMMUs without ··· 954 948 void __iomem *cb_base; 955 949 int irq; 956 950 957 - if (!smmu) 951 + if (!smmu || domain->type == IOMMU_DOMAIN_DMA) 958 952 return; 959 953 960 954 /* ··· 1095 1089 struct arm_smmu_device *smmu = smmu_domain->smmu; 1096 1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1097 1091 1092 + /* 1093 + * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1094 + * for all devices behind the SMMU. Note that we need to take 1095 + * care configuring SMRs for devices both a platform_device and 1096 + * and a PCI device (i.e. a PCI host controller) 1097 + */ 1098 + if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1099 + return 0; 1100 + 1098 1101 /* Devices in an IOMMU group may already be configured */ 1099 1102 ret = arm_smmu_master_configure_smrs(smmu, cfg); 1100 1103 if (ret) 1101 1104 return ret == -EEXIST ? 0 : ret; 1102 - 1103 - /* 1104 - * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1105 - * for all devices behind the SMMU. 1106 - */ 1107 - if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1108 - return 0; 1109 1105 1110 1106 for (i = 0; i < cfg->num_streamids; ++i) { 1111 1107 u32 idx, s2cr;
+2 -2
drivers/irqchip/irq-mips-gic.c
··· 467 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 468 468 469 469 /* Update the pcpu_masks */ 470 - for (i = 0; i < gic_vpes; i++) 470 + for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 471 471 clear_bit(irq, pcpu_masks[i].pcpu_mask); 472 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 473 473 ··· 707 707 spin_lock_irqsave(&gic_lock, flags); 708 708 gic_map_to_pin(intr, gic_cpu_pin); 709 709 gic_map_to_vpe(intr, vpe); 710 - for (i = 0; i < gic_vpes; i++) 710 + for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 711 711 clear_bit(intr, pcpu_masks[i].pcpu_mask); 712 712 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 713 713 spin_unlock_irqrestore(&gic_lock, flags);
+3
drivers/isdn/mISDN/socket.c
··· 715 715 if (!maddr || maddr->family != AF_ISDN) 716 716 return -EINVAL; 717 717 718 + if (addr_len < sizeof(struct sockaddr_mISDN)) 719 + return -EINVAL; 720 + 718 721 lock_sock(sk); 719 722 720 723 if (_pms(sk)->dev) {
-7
drivers/media/usb/usbvision/usbvision-video.c
··· 1452 1452 printk(KERN_INFO "%s: %s found\n", __func__, 1453 1453 usbvision_device_data[model].model_string); 1454 1454 1455 - /* 1456 - * this is a security check. 1457 - * an exploit using an incorrect bInterfaceNumber is known 1458 - */ 1459 - if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) 1460 - return -ENODEV; 1461 - 1462 1455 if (usbvision_device_data[model].interface >= 0) 1463 1456 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; 1464 1457 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+15 -5
drivers/media/v4l2-core/videobuf2-core.c
··· 1645 1645 * Will sleep if required for nonblocking == false. 1646 1646 */ 1647 1647 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1648 - int nonblocking) 1648 + void *pb, int nonblocking) 1649 1649 { 1650 1650 unsigned long flags; 1651 1651 int ret; ··· 1666 1666 /* 1667 1667 * Only remove the buffer from done_list if v4l2_buffer can handle all 1668 1668 * the planes. 1669 - * Verifying planes is NOT necessary since it already has been checked 1670 - * before the buffer is queued/prepared. So it can never fail. 1671 1669 */ 1672 - list_del(&(*vb)->done_entry); 1670 + ret = call_bufop(q, verify_planes_array, *vb, pb); 1671 + if (!ret) 1672 + list_del(&(*vb)->done_entry); 1673 1673 spin_unlock_irqrestore(&q->done_lock, flags); 1674 1674 1675 1675 return ret; ··· 1748 1748 struct vb2_buffer *vb = NULL; 1749 1749 int ret; 1750 1750 1751 - ret = __vb2_get_done_vb(q, &vb, nonblocking); 1751 + ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 1752 1752 if (ret < 0) 1753 1753 return ret; 1754 1754 ··· 2295 2295 * error flag is set. 2296 2296 */ 2297 2297 if (!vb2_is_streaming(q) || q->error) 2298 + return POLLERR; 2299 + 2300 + /* 2301 + * If this quirk is set and QBUF hasn't been called yet then 2302 + * return POLLERR as well. This only affects capture queues, output 2303 + * queues will always initialize waiting_for_buffers to false. 2304 + * This quirk is set by V4L2 for backwards compatibility reasons. 2305 + */ 2306 + if (q->quirk_poll_must_check_waiting_for_buffers && 2307 + q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) 2298 2308 return POLLERR; 2299 2309 2300 2310 /*
+1 -1
drivers/media/v4l2-core/videobuf2-memops.c
··· 49 49 vec = frame_vector_create(nr); 50 50 if (!vec) 51 51 return ERR_PTR(-ENOMEM); 52 - ret = get_vaddr_frames(start, nr, write, 1, vec); 52 + ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); 53 53 if (ret < 0) 54 54 goto out_destroy; 55 55 /* We accept only complete set of PFNs */
+12 -8
drivers/media/v4l2-core/videobuf2-v4l2.c
··· 74 74 return 0; 75 75 } 76 76 77 + static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) 78 + { 79 + return __verify_planes_array(vb, pb); 80 + } 81 + 77 82 /** 78 83 * __verify_length() - Verify that the bytesused value for each plane fits in 79 84 * the plane length and that the data offset doesn't exceed the bytesused value. ··· 442 437 } 443 438 444 439 static const struct vb2_buf_ops v4l2_buf_ops = { 440 + .verify_planes_array = __verify_planes_array_core, 445 441 .fill_user_buffer = __fill_v4l2_buffer, 446 442 .fill_vb2_buffer = __fill_vb2_buffer, 447 443 .copy_timestamp = __copy_timestamp, ··· 771 765 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 772 766 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 773 767 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 768 + /* 769 + * For compatibility with vb1: if QBUF hasn't been called yet, then 770 + * return POLLERR as well. This only affects capture queues, output 771 + * queues will always initialize waiting_for_buffers to false. 772 + */ 773 + q->quirk_poll_must_check_waiting_for_buffers = true; 774 774 775 775 return vb2_core_queue_init(q); 776 776 } ··· 829 817 else if (req_events & POLLPRI) 830 818 poll_wait(file, &fh->wait, wait); 831 819 } 832 - 833 - /* 834 - * For compatibility with vb1: if QBUF hasn't been called yet, then 835 - * return POLLERR as well. This only affects capture queues, output 836 - * queues will always initialize waiting_for_buffers to false. 837 - */ 838 - if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) 839 - return POLLERR; 840 820 841 821 return res | vb2_core_poll(q, file, wait); 842 822 }
+7
drivers/misc/cxl/context.c
··· 223 223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); 224 224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 225 225 226 + /* 227 + * Wait until no further interrupts are presented by the PSL 228 + * for this context. 229 + */ 230 + if (cxl_ops->irq_wait) 231 + cxl_ops->irq_wait(ctx); 232 + 226 233 /* release the reference to the group leader and mm handling pid */ 227 234 put_pid(ctx->pid); 228 235 put_pid(ctx->glpid);
+2
drivers/misc/cxl/cxl.h
··· 274 274 #define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ 275 275 #define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ 276 276 #define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ 277 + #define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC) 277 278 /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ 278 279 #define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ 279 280 #define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ ··· 856 855 u64 dsisr, u64 errstat); 857 856 irqreturn_t (*psl_interrupt)(int irq, void *data); 858 857 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 858 + void (*irq_wait)(struct cxl_context *ctx); 859 859 int (*attach_process)(struct cxl_context *ctx, bool kernel, 860 860 u64 wed, u64 amr); 861 861 int (*detach_process)(struct cxl_context *ctx);
-1
drivers/misc/cxl/irq.c
··· 203 203 void cxl_unmap_irq(unsigned int virq, void *cookie) 204 204 { 205 205 free_irq(virq, cookie); 206 - irq_dispose_mapping(virq); 207 206 } 208 207 209 208 int cxl_register_one_irq(struct cxl *adapter,
+31
drivers/misc/cxl/native.c
··· 14 14 #include <linux/mutex.h> 15 15 #include <linux/mm.h> 16 16 #include <linux/uaccess.h> 17 + #include <linux/delay.h> 17 18 #include <asm/synch.h> 18 19 #include <misc/cxl-base.h> 19 20 ··· 798 797 return fail_psl_irq(afu, &irq_info); 799 798 } 800 799 800 + void native_irq_wait(struct cxl_context *ctx) 801 + { 802 + u64 dsisr; 803 + int timeout = 1000; 804 + int ph; 805 + 806 + /* 807 + * Wait until no further interrupts are presented by the PSL 808 + * for this context. 809 + */ 810 + while (timeout--) { 811 + ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; 812 + if (ph != ctx->pe) 813 + return; 814 + dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 815 + if ((dsisr & CXL_PSL_DSISR_PENDING) == 0) 816 + return; 817 + /* 818 + * We are waiting for the workqueue to process our 819 + * irq, so need to let that run here. 820 + */ 821 + msleep(1); 822 + } 823 + 824 + dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" 825 + " DSISR %016llx!\n", ph, dsisr); 826 + return; 827 + } 828 + 801 829 static irqreturn_t native_slice_irq_err(int irq, void *data) 802 830 { 803 831 struct cxl_afu *afu = data; ··· 1106 1076 .handle_psl_slice_error = native_handle_psl_slice_error, 1107 1077 .psl_interrupt = NULL, 1108 1078 .ack_irq = native_ack_irq, 1079 + .irq_wait = native_irq_wait, 1109 1080 .attach_process = native_attach_process, 1110 1081 .detach_process = native_detach_process, 1111 1082 .support_attributes = native_support_attributes,
+1
drivers/mmc/host/Kconfig
··· 97 97 config MMC_SDHCI_ACPI 98 98 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 99 99 depends on MMC_SDHCI && ACPI 100 + select IOSF_MBI if X86 100 101 help 101 102 This selects support for ACPI enumerated SDHCI controllers, 102 103 identified by ACPI Compatibility ID PNP0D40 or specific
+81
drivers/mmc/host/sdhci-acpi.c
··· 41 41 #include <linux/mmc/pm.h> 42 42 #include <linux/mmc/slot-gpio.h> 43 43 44 + #ifdef CONFIG_X86 45 + #include <asm/cpu_device_id.h> 46 + #include <asm/iosf_mbi.h> 47 + #endif 48 + 44 49 #include "sdhci.h" 45 50 46 51 enum { ··· 120 115 static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { 121 116 .ops = &sdhci_acpi_ops_int, 122 117 }; 118 + 119 + #ifdef CONFIG_X86 120 + 121 + static bool sdhci_acpi_byt(void) 122 + { 123 + static const struct x86_cpu_id byt[] = { 124 + { X86_VENDOR_INTEL, 6, 0x37 }, 125 + {} 126 + }; 127 + 128 + return x86_match_cpu(byt); 129 + } 130 + 131 + #define BYT_IOSF_SCCEP 0x63 132 + #define BYT_IOSF_OCP_NETCTRL0 0x1078 133 + #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 134 + 135 + static void sdhci_acpi_byt_setting(struct device *dev) 136 + { 137 + u32 val = 0; 138 + 139 + if (!sdhci_acpi_byt()) 140 + return; 141 + 142 + if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 143 + &val)) { 144 + dev_err(dev, "%s read error\n", __func__); 145 + return; 146 + } 147 + 148 + if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 149 + return; 150 + 151 + val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 152 + 153 + if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 154 + val)) { 155 + dev_err(dev, "%s write error\n", __func__); 156 + return; 157 + } 158 + 159 + dev_dbg(dev, "%s completed\n", __func__); 160 + } 161 + 162 + static bool sdhci_acpi_byt_defer(struct device *dev) 163 + { 164 + if (!sdhci_acpi_byt()) 165 + return false; 166 + 167 + if (!iosf_mbi_available()) 168 + return true; 169 + 170 + sdhci_acpi_byt_setting(dev); 171 + 172 + return false; 173 + } 174 + 175 + #else 176 + 177 + static inline void sdhci_acpi_byt_setting(struct device *dev) 178 + { 179 + } 180 + 181 + static inline bool sdhci_acpi_byt_defer(struct device *dev) 182 + { 183 + return false; 184 + } 185 + 186 + #endif 123 187 124 188 static int bxt_get_cd(struct mmc_host *mmc) 125 189 { ··· 396 322 if (acpi_bus_get_status(device) || !device->status.present) 397 323 return -ENODEV; 398 324 325 + if (sdhci_acpi_byt_defer(dev)) 326 + return -EPROBE_DEFER; 327 + 399 328 hid = acpi_device_hid(device); 400 329 uid = device->pnp.unique_id; 401 330 ··· 524 447 { 525 448 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 526 449 450 + sdhci_acpi_byt_setting(&c->pdev->dev); 451 + 527 452 return sdhci_resume_host(c->host); 528 453 } 529 454 ··· 548 469 static int sdhci_acpi_runtime_resume(struct device *dev) 549 470 { 550 471 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 472 + 473 + sdhci_acpi_byt_setting(&c->pdev->dev); 551 474 552 475 return sdhci_runtime_resume_host(c->host); 553 476 }
+5
drivers/mmc/host/sunxi-mmc.c
··· 1129 1129 MMC_CAP_1_8V_DDR | 1130 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1131 1131 1132 + /* TODO MMC DDR is not working on A80 */ 1133 + if (of_device_is_compatible(pdev->dev.of_node, 1134 + "allwinner,sun9i-a80-mmc")) 1135 + mmc->caps &= ~MMC_CAP_1_8V_DDR; 1136 + 1132 1137 ret = mmc_of_parse(mmc); 1133 1138 if (ret) 1134 1139 goto error_free_dma;
+3 -3
drivers/net/Kconfig
··· 62 62 this device is consigned into oblivion) with a configurable IP 63 63 address. It is most commonly used in order to make your currently 64 64 inactive SLIP address seem like a real address for local programs. 65 - If you use SLIP or PPP, you might want to say Y here. Since this 66 - thing often comes in handy, the default is Y. It won't enlarge your 67 - kernel either. What a deal. Read about it in the Network 65 + If you use SLIP or PPP, you might want to say Y here. It won't 66 + enlarge your kernel. What a deal. Read about it in the Network 68 67 Administrator's Guide, available from 69 68 <http://www.tldp.org/docs.html#guide>. 70 69 ··· 194 195 195 196 config MACSEC 196 197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 198 + select CRYPTO 197 199 select CRYPTO_AES 198 200 select CRYPTO_GCM 199 201 ---help---
+5 -29
drivers/net/dsa/mv88e6xxx.c
··· 2181 2181 struct net_device *bridge) 2182 2182 { 2183 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2184 - u16 fid; 2185 2184 int i, err; 2186 2185 2187 2186 mutex_lock(&ps->smi_mutex); 2188 - 2189 - /* Get or create the bridge FID and assign it to the port */ 2190 - for (i = 0; i < ps->num_ports; ++i) 2191 - if (ps->ports[i].bridge_dev == bridge) 2192 - break; 2193 - 2194 - if (i < ps->num_ports) 2195 - err = _mv88e6xxx_port_fid_get(ds, i, &fid); 2196 - else 2197 - err = _mv88e6xxx_fid_new(ds, &fid); 2198 - if (err) 2199 - goto unlock; 2200 - 2201 - err = _mv88e6xxx_port_fid_set(ds, port, fid); 2202 - if (err) 2203 - goto unlock; 2204 2187 2205 2188 /* Assign the bridge and remap each port's VLANTable */ 2206 2189 ps->ports[port].bridge_dev = bridge; ··· 2196 2213 } 2197 2214 } 2198 2215 2199 - unlock: 2200 2216 mutex_unlock(&ps->smi_mutex); 2201 2217 2202 2218 return err; ··· 2205 2223 { 2206 2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2207 2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2208 - u16 fid; 2209 2226 int i; 2210 2227 2211 2228 mutex_lock(&ps->smi_mutex); 2212 - 2213 - /* Give the port a fresh Filtering Information Database */ 2214 - if (_mv88e6xxx_fid_new(ds, &fid) || 2215 - _mv88e6xxx_port_fid_set(ds, port, fid)) 2216 - netdev_warn(ds->ports[port], "failed to assign a new FID\n"); 2217 2229 2218 2230 /* Unassign the bridge and remap each port's VLANTable */ 2219 2231 ps->ports[port].bridge_dev = NULL; ··· 2452 2476 * the other bits clear. 2453 2477 */ 2454 2478 reg = 1 << port; 2455 - /* Disable learning for DSA and CPU ports */ 2456 - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2457 - reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2479 + /* Disable learning for CPU port */ 2480 + if (dsa_is_cpu_port(ds, port)) 2481 + reg = 0; 2458 2482 2459 2483 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2460 2484 if (ret) ··· 2534 2558 if (ret) 2535 2559 goto abort; 2536 2560 2537 - /* Port based VLAN map: give each port its own address 2561 + /* Port based VLAN map: give each port the same default address 2538 2562 * database, and allow bidirectional communication between the 2539 2563 * CPU and DSA port(s), and the other ports. 2540 2564 */ 2541 - ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2565 + ret = _mv88e6xxx_port_fid_set(ds, port, 0); 2542 2566 if (ret) 2543 2567 goto abort; 2544 2568
+1 -1
drivers/net/ethernet/atheros/atlx/atl2.c
··· 1412 1412 1413 1413 err = -EIO; 1414 1414 1415 - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; 1416 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1417 1417 1418 1418 /* Init PHY as early as possible due to power saving issue */
+5
drivers/net/ethernet/broadcom/bgmac.c
··· 1572 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1573 1573 } 1574 1574 1575 + /* This (reset &) enable is not preset in specs or reference driver but 1576 + * Broadcom does it in arch PCI code when enabling fake PCI device. 1577 + */ 1578 + bcma_core_enable(core, 0); 1579 + 1575 1580 /* Allocation and references */ 1576 1581 net_dev = alloc_etherdev(sizeof(*bgmac)); 1577 1582 if (!net_dev)
+3 -3
drivers/net/ethernet/broadcom/bgmac.h
··· 199 199 #define BGMAC_CMDCFG_TAI 0x00000200 200 200 #define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 201 201 #define BGMAC_CMDCFG_HD_SHIFT 10 202 - #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 203 - #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 204 - #define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 202 + #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ 203 + #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ 204 + #define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 205 205 #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 206 206 #define BGMAC_CMDCFG_AE 0x00400000 207 207 #define BGMAC_CMDCFG_CFE 0x00800000
+5 -1
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 878 878 else 879 879 p = (char *)priv; 880 880 p += s->stat_offset; 881 - data[i] = *(u32 *)p; 881 + if (sizeof(unsigned long) != sizeof(u32) && 882 + s->stat_sizeof == sizeof(unsigned long)) 883 + data[i] = *(unsigned long *)p; 884 + else 885 + data[i] = *(u32 *)p; 882 886 } 883 887 } 884 888
+3 -2
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1011 1011 } 1012 1012 1013 1013 lmac++; 1014 - if (lmac == MAX_LMAC_PER_BGX) 1014 + if (lmac == MAX_LMAC_PER_BGX) { 1015 + of_node_put(node); 1015 1016 break; 1017 + } 1016 1018 } 1017 - of_node_put(node); 1018 1019 return 0; 1019 1020 1020 1021 defer:
+3
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 1451 1451 unsigned int mmd, unsigned int reg, u16 *valp); 1452 1452 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1453 1453 unsigned int mmd, unsigned int reg, u16 val); 1454 + int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 1455 + unsigned int vf, unsigned int iqtype, unsigned int iqid, 1456 + unsigned int fl0id, unsigned int fl1id); 1454 1457 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1455 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1456 1459 unsigned int fl0id, unsigned int fl1id);
+17 -3
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2981 2981 void t4_free_sge_resources(struct adapter *adap) 2982 2982 { 2983 2983 int i; 2984 - struct sge_eth_rxq *eq = adap->sge.ethrxq; 2985 - struct sge_eth_txq *etq = adap->sge.ethtxq; 2984 + struct sge_eth_rxq *eq; 2985 + struct sge_eth_txq *etq; 2986 + 2987 + /* stop all Rx queues in order to start them draining */ 2988 + for (i = 0; i < adap->sge.ethqsets; i++) { 2989 + eq = &adap->sge.ethrxq[i]; 2990 + if (eq->rspq.desc) 2991 + t4_iq_stop(adap, adap->mbox, adap->pf, 0, 2992 + FW_IQ_TYPE_FL_INT_CAP, 2993 + eq->rspq.cntxt_id, 2994 + eq->fl.size ? eq->fl.cntxt_id : 0xffff, 2995 + 0xffff); 2996 + } 2986 2997 2987 2998 /* clean up Ethernet Tx/Rx queues */ 2988 - for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 + for (i = 0; i < adap->sge.ethqsets; i++) { 3000 + eq = &adap->sge.ethrxq[i]; 2989 3001 if (eq->rspq.desc) 2990 3002 free_rspq_fl(adap, &eq->rspq, 2991 3003 eq->fl.size ? &eq->fl : NULL); 3004 + 3005 + etq = &adap->sge.ethtxq[i]; 2992 3006 if (etq->q.desc) { 2993 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 2994 3008 etq->q.cntxt_id);
+43
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 2557 2557 } 2558 2558 2559 2559 #define EEPROM_STAT_ADDR 0x7bfc 2560 + #define VPD_SIZE 0x800 2560 2561 #define VPD_BASE 0x400 2561 2562 #define VPD_BASE_OLD 0 2562 2563 #define VPD_LEN 1024 ··· 2594 2593 vpd = vmalloc(VPD_LEN); 2595 2594 if (!vpd) 2596 2595 return -ENOMEM; 2596 + 2597 + /* We have two VPD data structures stored in the adapter VPD area. 2598 + * By default, Linux calculates the size of the VPD area by traversing 2599 + * the first VPD area at offset 0x0, so we need to tell the OS what 2600 + * our real VPD size is. 2601 + */ 2602 + ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); 2603 + if (ret < 0) 2604 + goto out; 2597 2605 2598 2606 /* Card information normally starts at VPD_BASE but early cards had 2599 2607 * it at 0. ··· 6946 6936 FW_VI_ENABLE_CMD_VIID_V(viid)); 6947 6937 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); 6948 6938 c.blinkdur = cpu_to_be16(nblinks); 6939 + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6940 + } 6941 + 6942 + /** 6943 + * t4_iq_stop - stop an ingress queue and its FLs 6944 + * @adap: the adapter 6945 + * @mbox: mailbox to use for the FW command 6946 + * @pf: the PF owning the queues 6947 + * @vf: the VF owning the queues 6948 + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 6949 + * @iqid: ingress queue id 6950 + * @fl0id: FL0 queue id or 0xffff if no attached FL0 6951 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 6952 + * 6953 + * Stops an ingress queue and its associated FLs, if any. This causes 6954 + * any current or future data/messages destined for these queues to be 6955 + * tossed. 6956 + */ 6957 + int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 6958 + unsigned int vf, unsigned int iqtype, unsigned int iqid, 6959 + unsigned int fl0id, unsigned int fl1id) 6960 + { 6961 + struct fw_iq_cmd c; 6962 + 6963 + memset(&c, 0, sizeof(c)); 6964 + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 6965 + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | 6966 + FW_IQ_CMD_VFN_V(vf)); 6967 + c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c)); 6968 + c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 6969 + c.iqid = cpu_to_be16(iqid); 6970 + c.fl0id = cpu_to_be16(fl0id); 6971 + c.fl1id = cpu_to_be16(fl1id); 6949 6972 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6950 6973 } 6951 6974
+22 -8
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
··· 1223 1223 if (err) 1224 1224 return err; 1225 1225 1226 - /* verify upper 16 bits are zero */ 1227 - if (vid >> 16) 1228 - return FM10K_ERR_PARAM; 1229 - 1230 1226 set = !(vid & FM10K_VLAN_CLEAR); 1231 1227 vid &= ~FM10K_VLAN_CLEAR; 1232 1228 1233 - err = fm10k_iov_select_vid(vf_info, (u16)vid); 1234 - if (err < 0) 1235 - return err; 1229 + /* if the length field has been set, this is a multi-bit 1230 + * update request. For multi-bit requests, simply disallow 1231 + * them when the pf_vid has been set. In this case, the PF 1232 + * should have already cleared the VLAN_TABLE, and if we 1233 + * allowed them, it could allow a rogue VF to receive traffic 1234 + * on a VLAN it was not assigned. In the single-bit case, we 1235 + * need to modify requests for VLAN 0 to use the default PF or 1236 + * SW vid when assigned. 1237 + */ 1236 1238 1237 - vid = err; 1239 + if (vid >> 16) { 1240 + /* prevent multi-bit requests when PF has 1241 + * administratively set the VLAN for this VF 1242 + */ 1243 + if (vf_info->pf_vid) 1244 + return FM10K_ERR_PARAM; 1245 + } else { 1246 + err = fm10k_iov_select_vid(vf_info, (u16)vid); 1247 + if (err < 0) 1248 + return err; 1249 + 1250 + vid = err; 1251 + } 1238 1252 1239 1253 /* update VSI info for VF in regards to VLAN table */ 1240 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+24 -25
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 2594 2594 } 2595 2595 2596 2596 /** 2597 - * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 + * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 2598 2598 * @skb: send buffer 2599 2599 * 2600 - * Note: Our HW can't scatter-gather more than 8 fragments to build 2601 - * a packet on the wire and so we need to figure out the cases where we 2602 - * need to linearize the skb. 2600 + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 2601 + * and so we need to figure out the cases where we need to linearize the skb. 2602 + * 2603 + * For TSO we need to count the TSO header and segment payload separately. 2604 + * As such we need to check cases where we have 7 fragments or more as we 2605 + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2606 + * the segment payload in the first descriptor, and another 7 for the 2607 + * fragments. 2603 2608 **/ 2604 2609 bool __i40e_chk_linearize(struct sk_buff *skb) 2605 2610 { 2606 2611 const struct skb_frag_struct *frag, *stale; 2607 - int gso_size, nr_frags, sum; 2612 + int nr_frags, sum; 2608 2613 2609 - /* check to see if TSO is enabled, if so we may get a repreive */ 2610 - gso_size = skb_shinfo(skb)->gso_size; 2611 - if (unlikely(!gso_size)) 2612 - return true; 2613 - 2614 - /* no need to check if number of frags is less than 8 */ 2614 + /* no need to check if number of frags is less than 7 */ 2615 2615 nr_frags = skb_shinfo(skb)->nr_frags; 2616 - if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 2617 2617 return false; 2618 2618 2619 2619 /* We need to walk through the list and validate that each group 2620 2620 * of 6 fragments totals at least gso_size. However we don't need 2621 - * to perform such validation on the first or last 6 since the first 2622 - * 6 cannot inherit any data from a descriptor before them, and the 2623 - * last 6 cannot inherit any data from a descriptor after them. 2621 + * to perform such validation on the last 6 since the last 6 cannot 2622 + * inherit any data from a descriptor after them. 2624 2623 */ 2625 - nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 + nr_frags -= I40E_MAX_BUFFER_TXD - 2; 2626 2625 frag = &skb_shinfo(skb)->frags[0]; 2627 2626 2628 2627 /* Initialize size to the negative value of gso_size minus 1. We ··· 2630 2631 * descriptors for a single transmit as the header and previous 2631 2632 * fragment are already consuming 2 descriptors. 2632 2633 */ 2633 - sum = 1 - gso_size; 2634 + sum = 1 - skb_shinfo(skb)->gso_size; 2634 2635 2635 - /* Add size of frags 1 through 5 to create our initial sum */ 2636 - sum += skb_frag_size(++frag); 2637 - sum += skb_frag_size(++frag); 2638 - sum += skb_frag_size(++frag); 2639 - sum += skb_frag_size(++frag); 2640 - sum += skb_frag_size(++frag); 2636 + /* Add size of frags 0 through 4 to create our initial sum */ 2637 + sum += skb_frag_size(frag++); 2638 + sum += skb_frag_size(frag++); 2639 + sum += skb_frag_size(frag++); 2640 + sum += skb_frag_size(frag++); 2641 + sum += skb_frag_size(frag++); 2641 2642 2642 2643 /* Walk through fragments adding latest fragment, testing it, and 2643 2644 * then removing stale fragments from the sum. 2644 2645 */ 2645 2646 stale = &skb_shinfo(skb)->frags[0]; 2646 2647 for (;;) { 2647 - sum += skb_frag_size(++frag); 2648 + sum += skb_frag_size(frag++); 2648 2649 2649 2650 /* if sum is negative we failed to make sufficient progress */ 2650 2651 if (sum < 0) ··· 2654 2655 if (!--nr_frags) 2655 2656 break; 2656 2657 2657 - sum -= skb_frag_size(++stale); 2658 + sum -= skb_frag_size(stale++); 2658 2659 } 2659 2660 2660 2661 return false;
+7 -3
drivers/net/ethernet/intel/i40e/i40e_txrx.h
··· 413 413 **/ 414 414 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 415 415 { 416 - /* we can only support up to 8 data buffers for a single send */ 417 - if (likely(count <= I40E_MAX_BUFFER_TXD)) 416 + /* Both TSO and single send will work if count is less than 8 */ 417 + if (likely(count < I40E_MAX_BUFFER_TXD)) 418 418 return false; 419 419 420 - return __i40e_chk_linearize(skb); 420 + if (skb_is_gso(skb)) 421 + return __i40e_chk_linearize(skb); 422 + 423 + /* we can support up to 8 data buffers for a single send */ 424 + return count != I40E_MAX_BUFFER_TXD; 421 425 } 422 426 #endif /* _I40E_TXRX_H_ */
+24 -25
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1796 1796 } 1797 1797 1798 1798 /** 1799 - * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 + * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet 1800 1800 * @skb: send buffer 1801 1801 * 1802 - * Note: Our HW can't scatter-gather more than 8 fragments to build 1803 - * a packet on the wire and so we need to figure out the cases where we 1804 - * need to linearize the skb. 1802 + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 1803 + * and so we need to figure out the cases where we need to linearize the skb. 1804 + * 1805 + * For TSO we need to count the TSO header and segment payload separately. 1806 + * As such we need to check cases where we have 7 fragments or more as we 1807 + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 1808 + * the segment payload in the first descriptor, and another 7 for the 1809 + * fragments. 1805 1810 **/ 1806 1811 bool __i40evf_chk_linearize(struct sk_buff *skb) 1807 1812 { 1808 1813 const struct skb_frag_struct *frag, *stale; 1809 - int gso_size, nr_frags, sum; 1814 + int nr_frags, sum; 1810 1815 1811 - /* check to see if TSO is enabled, if so we may get a repreive */ 1812 - gso_size = skb_shinfo(skb)->gso_size; 1813 - if (unlikely(!gso_size)) 1814 - return true; 1815 - 1816 - /* no need to check if number of frags is less than 8 */ 1816 + /* no need to check if number of frags is less than 7 */ 1817 1817 nr_frags = skb_shinfo(skb)->nr_frags; 1818 - if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 1819 1819 return false; 1820 1820 1821 1821 /* We need to walk through the list and validate that each group 1822 1822 * of 6 fragments totals at least gso_size. However we don't need 1823 - * to perform such validation on the first or last 6 since the first 1824 - * 6 cannot inherit any data from a descriptor before them, and the 1825 - * last 6 cannot inherit any data from a descriptor after them. 1823 + * to perform such validation on the last 6 since the last 6 cannot 1824 + * inherit any data from a descriptor after them. 1826 1825 */ 1827 - nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 + nr_frags -= I40E_MAX_BUFFER_TXD - 2; 1828 1827 frag = &skb_shinfo(skb)->frags[0]; 1829 1828 1830 1829 /* Initialize size to the negative value of gso_size minus 1. We ··· 1832 1833 * descriptors for a single transmit as the header and previous 1833 1834 * fragment are already consuming 2 descriptors. 1834 1835 */ 1835 - sum = 1 - gso_size; 1836 + sum = 1 - skb_shinfo(skb)->gso_size; 1836 1837 1837 - /* Add size of frags 1 through 5 to create our initial sum */ 1838 - sum += skb_frag_size(++frag); 1839 - sum += skb_frag_size(++frag); 1840 - sum += skb_frag_size(++frag); 1841 - sum += skb_frag_size(++frag); 1842 - sum += skb_frag_size(++frag); 1838 + /* Add size of frags 0 through 4 to create our initial sum */ 1839 + sum += skb_frag_size(frag++); 1840 + sum += skb_frag_size(frag++); 1841 + sum += skb_frag_size(frag++); 1842 + sum += skb_frag_size(frag++); 1843 + sum += skb_frag_size(frag++); 1843 1844 1844 1845 /* Walk through fragments adding latest fragment, testing it, and 1845 1846 * then removing stale fragments from the sum. 1846 1847 */ 1847 1848 stale = &skb_shinfo(skb)->frags[0]; 1848 1849 for (;;) { 1849 - sum += skb_frag_size(++frag); 1850 + sum += skb_frag_size(frag++); 1850 1851 1851 1852 /* if sum is negative we failed to make sufficient progress */ 1852 1853 if (sum < 0) ··· 1856 1857 if (!--nr_frags) 1857 1858 break; 1858 1859 1859 - sum -= skb_frag_size(++stale); 1860 + sum -= skb_frag_size(stale++); 1860 1861 } 1861 1862 1862 1863 return false;
+7 -3
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
··· 395 395 **/ 396 396 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 397 397 { 398 - /* we can only support up to 8 data buffers for a single send */ 399 - if (likely(count <= I40E_MAX_BUFFER_TXD)) 398 + /* Both TSO and single send will work if count is less than 8 */ 399 + if (likely(count < I40E_MAX_BUFFER_TXD)) 400 400 return false; 401 401 402 - return __i40evf_chk_linearize(skb); 402 + if (skb_is_gso(skb)) 403 + return __i40evf_chk_linearize(skb); 404 + 405 + /* we can support up to 8 data buffers for a single send */ 406 + return count != I40E_MAX_BUFFER_TXD; 403 407 } 404 408 #endif /* _I40E_TXRX_H_ */
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 337 337 case ETH_SS_STATS: 338 338 return bitmap_iterator_count(&it) + 339 339 (priv->tx_ring_num * 2) + 340 - (priv->rx_ring_num * 2); 340 + (priv->rx_ring_num * 3); 341 341 case ETH_SS_TEST: 342 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 343 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; ··· 404 404 for (i = 0; i < priv->rx_ring_num; i++) { 405 405 data[index++] = priv->rx_ring[i]->packets; 406 406 data[index++] = priv->rx_ring[i]->bytes; 407 + data[index++] = priv->rx_ring[i]->dropped; 407 408 } 408 409 spin_unlock_bh(&priv->stats_lock); 409 410 ··· 478 477 "rx%d_packets", i); 479 478 sprintf(data + (index++) * ETH_GSTRING_LEN, 480 479 "rx%d_bytes", i); 480 + sprintf(data + (index++) * ETH_GSTRING_LEN, 481 + "rx%d_dropped", i); 481 482 } 482 483 break; 483 484 case ETH_SS_PRIV_FLAGS:
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_port.c
··· 158 158 u64 in_mod = reset << 8 | port; 159 159 int err; 160 160 int i, counter_index; 161 + unsigned long sw_rx_dropped = 0; 161 162 162 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 164 if (IS_ERR(mailbox)) ··· 181 180 for (i = 0; i < priv->rx_ring_num; i++) { 182 181 stats->rx_packets += priv->rx_ring[i]->packets; 183 182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 + sw_rx_dropped += priv->rx_ring[i]->dropped; 184 184 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 185 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 186 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; ··· 238 236 &mlx4_en_stats->MCAST_prio_1, 239 237 NUM_PRIORITIES); 240 238 stats->collisions = 0; 241 - stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + 240 + sw_rx_dropped; 242 241 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 242 stats->rx_over_errors = 0; 244 243 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+8 -4
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 61 61 gfp_t gfp = _gfp; 62 62 63 63 if (order) 64 - gfp |= __GFP_COMP | __GFP_NOWARN; 64 + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; 65 65 page = alloc_pages(gfp, order); 66 66 if (likely(page)) 67 67 break; ··· 126 126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 127 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 128 128 page = page_alloc[i].page; 129 - set_page_count(page, 1); 129 + /* Revert changes done by mlx4_alloc_pages */ 130 + page_ref_sub(page, page_alloc[i].page_size / 131 + priv->frag_info[i].frag_stride - 1); 130 132 put_page(page); 131 133 } 132 134 } ··· 178 176 dma_unmap_page(priv->ddev, page_alloc->dma, 179 177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 180 178 page = page_alloc->page; 181 - set_page_count(page, 1); 179 + /* Revert changes done by mlx4_alloc_pages */ 180 + page_ref_sub(page, page_alloc->page_size / 181 + priv->frag_info[i].frag_stride - 1); 182 182 put_page(page); 183 183 page_alloc->page = NULL; 184 184 } ··· 943 939 /* GRO not possible, complete processing here */ 944 940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 945 941 if (!skb) { 946 - priv->stats.rx_dropped++; 942 + ring->dropped++; 947 943 goto next; 948 944 } 949 945
+4 -2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 405 405 u32 packets = 0; 406 406 u32 bytes = 0; 407 407 int factor = priv->cqe_factor; 408 - u64 timestamp = 0; 409 408 int done = 0; 410 409 int budget = priv->tx_work_limit; 411 410 u32 last_nr_txbb; ··· 444 445 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 445 446 446 447 do { 448 + u64 timestamp = 0; 449 + 447 450 txbbs_skipped += last_nr_txbb; 448 451 ring_index = (ring_index + last_nr_txbb) & size_mask; 449 - if (ring->tx_info[ring_index].ts_requested) 452 + 453 + if (unlikely(ring->tx_info[ring_index].ts_requested)) 450 454 timestamp = mlx4_en_get_cqe_ts(cqe); 451 455 452 456 /* free next descriptor */
+57 -19
drivers/net/ethernet/mellanox/mlx4/main.c
··· 3172 3172 return 0; 3173 3173 } 3174 3174 3175 + static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3176 + { 3177 + struct pci_dev *pdev = dev->persist->pdev; 3178 + int err = 0; 3179 + 3180 + mutex_lock(&dev->persist->pci_status_mutex); 3181 + if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3182 + err = pci_enable_device(pdev); 3183 + if (!err) 3184 + dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3185 + } 3186 + mutex_unlock(&dev->persist->pci_status_mutex); 3187 + 3188 + return err; 3189 + } 3190 + 3191 + static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3192 + { 3193 + struct pci_dev *pdev = dev->persist->pdev; 3194 + 3195 + mutex_lock(&dev->persist->pci_status_mutex); 3196 + if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3197 + pci_disable_device(pdev); 3198 + dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3199 + } 3200 + mutex_unlock(&dev->persist->pci_status_mutex); 3201 + } 3202 + 3175 3203 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3176 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3177 3205 int reset_flow) ··· 3610 3582 3611 3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3612 3584 3613 - err = pci_enable_device(pdev); 3585 + err = mlx4_pci_enable_device(&priv->dev); 3614 3586 if (err) { 3615 3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3616 3588 return err; ··· 3743 3715 pci_release_regions(pdev); 3744 3716 3745 3717 err_disable_pdev: 3746 - pci_disable_device(pdev); 3718 + mlx4_pci_disable_device(&priv->dev); 3747 3719 pci_set_drvdata(pdev, NULL); 3748 3720 return err; 3749 3721 } ··· 3803 3775 priv->pci_dev_data = id->driver_data; 3804 3776 mutex_init(&dev->persist->device_state_mutex); 3805 3777 mutex_init(&dev->persist->interface_state_mutex); 3778 + mutex_init(&dev->persist->pci_status_mutex); 3806 3779 3807 3780 ret = devlink_register(devlink, &pdev->dev); 3808 3781 if (ret) ··· 3952 3923 } 3953 3924 3954 3925 pci_release_regions(pdev); 3955 - pci_disable_device(pdev); 3926 + mlx4_pci_disable_device(dev); 3956 3927 devlink_unregister(devlink); 3957 3928 kfree(dev->persist); 3958 3929 devlink_free(devlink); ··· 4071 4042 if (state == pci_channel_io_perm_failure) 4072 4043 return PCI_ERS_RESULT_DISCONNECT; 4073 4044 4074 - pci_disable_device(pdev); 4045 + mlx4_pci_disable_device(persist->dev); 4075 4046 return PCI_ERS_RESULT_NEED_RESET; 4076 4047 } 4077 4048 ··· 4079 4050 { 4080 4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4081 4052 struct mlx4_dev *dev = persist->dev; 4082 - struct mlx4_priv *priv = mlx4_priv(dev); 4083 - int ret; 4084 - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4085 - int total_vfs; 4053 + int err; 4086 4054 4087 4055 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4088 - ret = pci_enable_device(pdev); 4089 - if (ret) { 4090 - mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4056 + err = mlx4_pci_enable_device(dev); 4057 + if (err) { 4058 + mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4091 4059 return PCI_ERS_RESULT_DISCONNECT; 4092 4060 } 4093 4061 4094 4062 pci_set_master(pdev); 4095 4063 pci_restore_state(pdev); 4096 4064 pci_save_state(pdev); 4065 + return PCI_ERS_RESULT_RECOVERED; 4066 + } 4097 4067 4068 + static void mlx4_pci_resume(struct pci_dev *pdev) 4069 + { 4070 + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4071 + struct mlx4_dev *dev = persist->dev; 4072 + struct mlx4_priv *priv = mlx4_priv(dev); 4073 + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4074 + int total_vfs; 4075 + int err; 4076 + 4077 + mlx4_err(dev, "%s was called\n", __func__); 4098 4078 total_vfs = dev->persist->num_vfs; 4099 4079 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4100 4080 4101 4081 mutex_lock(&persist->interface_state_mutex); 4102 4082 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4103 - ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4083 + err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4104 4084 priv, 1); 4105 - if (ret) { 4106 - mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4107 - __func__, ret); 4085 + if (err) { 4086 + mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4087 + __func__, err); 4108 4088 goto end; 4109 4089 } 4110 4090 4111 - ret = restore_current_port_types(dev, dev->persist-> 4091 + err = restore_current_port_types(dev, dev->persist-> 4112 4092 curr_port_type, dev->persist-> 4113 4093 curr_port_poss_type); 4114 - if (ret) 4115 - mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4094 + if (err) 4095 + mlx4_err(dev, "could not restore original port types (%d)\n", err); 4116 4096 } 4117 4097 end: 4118 4098 mutex_unlock(&persist->interface_state_mutex); 4119 4099 4120 - return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 4121 4100 } 4122 4101 4123 4102 static void mlx4_shutdown(struct pci_dev *pdev) ··· 4142 4105 static const struct pci_error_handlers mlx4_err_handler = { 4143 4106 .error_detected = mlx4_pci_err_detected, 4144 4107 .slot_reset = mlx4_pci_slot_reset, 4108 + .resume = mlx4_pci_resume, 4145 4109 }; 4146 4110 4147 4111 static struct pci_driver mlx4_driver = {
+2
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 586 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 587 587 int init_port_ref[MLX4_MAX_PORTS + 1]; 588 588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 589 + u8 pptx; 590 + u8 pprx; 589 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 590 592 struct mlx4_resource_tracker res_tracker; 591 593 struct workqueue_struct *comm_wq;
+1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 323 323 unsigned long csum_ok; 324 324 unsigned long csum_none; 325 325 unsigned long csum_complete; 326 + unsigned long dropped; 326 327 int hwtstamp_rx_filter; 327 328 cpumask_var_t affinity_mask; 328 329 };
+13
drivers/net/ethernet/mellanox/mlx4/port.c
··· 1317 1317 } 1318 1318 1319 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1320 + /* Slave cannot change Global Pause configuration */ 1321 + if (slave != mlx4_master_func_num(dev) && 1322 + ((gen_context->pptx != master->pptx) || 1323 + (gen_context->pprx != master->pprx))) { 1324 + gen_context->pptx = master->pptx; 1325 + gen_context->pprx = master->pprx; 1326 + mlx4_warn(dev, 1327 + "denying Global Pause change for slave:%d\n", 1328 + slave); 1329 + } else { 1330 + master->pptx = gen_context->pptx; 1331 + master->pprx = gen_context->pprx; 1332 + } 1320 1333 break; 1321 1334 case MLX4_SET_PORT_GID_TABLE: 1322 1335 /* change to MULTIPLE entries: number of guest's gids
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 609 609 MLX5E_100GBASE_KR4 = 22, 610 610 MLX5E_100GBASE_LR4 = 23, 611 611 MLX5E_100BASE_TX = 24, 612 - MLX5E_100BASE_T = 25, 612 + MLX5E_1000BASE_T = 25, 613 613 MLX5E_10GBASE_T = 26, 614 614 MLX5E_25GBASE_CR = 27, 615 615 MLX5E_25GBASE_KR = 28,
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 138 138 [MLX5E_100BASE_TX] = { 139 139 .speed = 100, 140 140 }, 141 - [MLX5E_100BASE_T] = { 142 - .supported = SUPPORTED_100baseT_Full, 143 - .advertised = ADVERTISED_100baseT_Full, 144 - .speed = 100, 141 + [MLX5E_1000BASE_T] = { 142 + .supported = SUPPORTED_1000baseT_Full, 143 + .advertised = ADVERTISED_1000baseT_Full, 144 + .speed = 1000, 145 145 }, 146 146 [MLX5E_10GBASE_T] = { 147 147 .supported = SUPPORTED_10000baseT_Full,
+57 -15
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1404 1404 return 0; 1405 1405 } 1406 1406 1407 - static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1407 + static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) 1408 1408 { 1409 - struct mlx5e_priv *priv = netdev_priv(netdev); 1410 1409 struct mlx5_core_dev *mdev = priv->mdev; 1411 - int hw_mtu; 1410 + u16 hw_mtu = MLX5E_SW2HW_MTU(mtu); 1412 1411 int err; 1413 1412 1414 - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); 1413 + err = mlx5_set_port_mtu(mdev, hw_mtu, 1); 1415 1414 if (err) 1416 1415 return err; 1417 1416 1418 - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1417 + /* Update vport context MTU */ 1418 + mlx5_modify_nic_vport_mtu(mdev, hw_mtu); 1419 + return 0; 1420 + } 1419 1421 1420 - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) 1421 - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", 1422 - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); 1422 + static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) 1423 + { 1424 + struct mlx5_core_dev *mdev = priv->mdev; 1425 + u16 hw_mtu = 0; 1426 + int err; 1423 1427 1424 - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); 1428 + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); 1429 + if (err || !hw_mtu) /* fallback to port oper mtu */ 1430 + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1431 + 1432 + *mtu = MLX5E_HW2SW_MTU(hw_mtu); 1433 + } 1434 + 1435 + static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1436 + { 1437 + struct mlx5e_priv *priv = netdev_priv(netdev); 1438 + u16 mtu; 1439 + int err; 1440 + 1441 + err = mlx5e_set_mtu(priv, netdev->mtu); 1442 + if (err) 1443 + return err; 1444 + 1445 + mlx5e_query_mtu(priv, &mtu); 1446 + if (mtu != netdev->mtu) 1447 + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", 1448 + __func__, mtu, netdev->mtu); 1449 + 1450 + netdev->mtu = mtu; 1425 1451 return 0; 1426 1452 } 1427 1453 ··· 2025 1999 return err; 2026 2000 } 2027 2001 2002 + #define MXL5_HW_MIN_MTU 64 2003 + #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) 2004 + 2028 2005 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) 2029 2006 { 2030 2007 struct mlx5e_priv *priv = netdev_priv(netdev); 2031 2008 struct mlx5_core_dev *mdev = priv->mdev; 2032 2009 bool was_opened; 2033 - int max_mtu; 2010 + u16 max_mtu; 2011 + u16 min_mtu; 2034 2012 int err = 0; 2035 2013 2036 2014 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2037 2015 2038 2016 max_mtu = MLX5E_HW2SW_MTU(max_mtu); 2017 + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); 2039 2018 2040 - if (new_mtu > max_mtu) { 2019 + if (new_mtu > max_mtu || new_mtu < min_mtu) { 2041 2020 netdev_err(netdev, 2042 - "%s: Bad MTU (%d) > (%d) Max\n", 2043 - __func__, new_mtu, max_mtu); 2021 + "%s: Bad MTU (%d), valid range is: [%d..%d]\n", 2022 + __func__, new_mtu, min_mtu, max_mtu); 2044 2023 return -EINVAL; 2045 2024 } 2046 2025 ··· 2633 2602 schedule_work(&priv->set_rx_mode_work); 2634 2603 mlx5e_disable_async_events(priv); 2635 2604 flush_scheduled_work(); 2636 - unregister_netdev(netdev); 2605 + if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 2606 + netif_device_detach(netdev); 2607 + mutex_lock(&priv->state_lock); 2608 + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 2609 + mlx5e_close_locked(netdev); 2610 + mutex_unlock(&priv->state_lock); 2611 + } else { 2612 + unregister_netdev(netdev); 2613 + } 2614 + 2637 2615 mlx5e_tc_cleanup(priv); 2638 2616 mlx5e_vxlan_cleanup(priv); 2639 2617 mlx5e_destroy_flow_tables(priv); ··· 2655 2615 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 2656 2616 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2657 2617 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2658 - free_netdev(netdev); 2618 + 2619 + if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) 2620 + free_netdev(netdev); 2659 2621 } 2660 2622 2661 2623 static void *mlx5e_get_netdev(void *vpriv)
+18 -30
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1065 1065 return rule; 1066 1066 } 1067 1067 1068 - static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, 1069 - u8 match_criteria_enable, 1070 - u32 *match_criteria, 1071 - u32 *match_value, 1072 - u8 action, 1073 - u32 flow_tag, 1074 - struct mlx5_flow_destination *dest) 1075 - { 1076 - struct mlx5_flow_rule *rule; 1077 - struct mlx5_flow_group *g; 1078 - 1079 - g = create_autogroup(ft, match_criteria_enable, match_criteria); 1080 - if (IS_ERR(g)) 1081 - return (void *)g; 1082 - 1083 - rule = add_rule_fg(g, match_value, 1084 - action, flow_tag, dest); 1085 - if (IS_ERR(rule)) { 1086 - /* Remove assumes refcount > 0 and autogroup creates a group 1087 - * with a refcount = 0. 1088 - */ 1089 - tree_get_node(&g->node); 1090 - tree_remove_node(&g->node); 1091 - } 1092 - return rule; 1093 - } 1094 - 1095 1068 static struct mlx5_flow_rule * 1096 1069 _mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1097 1070 u8 match_criteria_enable, ··· 1092 1119 goto unlock; 1093 1120 } 1094 1121 1095 - rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, 1096 - match_value, action, flow_tag, dest); 1122 + g = create_autogroup(ft, match_criteria_enable, match_criteria); 1123 + if (IS_ERR(g)) { 1124 + rule = (void *)g; 1125 + goto unlock; 1126 + } 1127 + 1128 + rule = add_rule_fg(g, match_value, 1129 + action, flow_tag, dest); 1130 + if (IS_ERR(rule)) { 1131 + /* Remove assumes refcount > 0 and autogroup creates a group 1132 + * with a refcount = 0. 1133 + */ 1134 + unlock_ref_node(&ft->node); 1135 + tree_get_node(&g->node); 1136 + tree_remove_node(&g->node); 1137 + return rule; 1138 + } 1097 1139 unlock: 1098 1140 unlock_ref_node(&ft->node); 1099 1141 return rule; ··· 1276 1288 { 1277 1289 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1278 1290 int prio; 1279 - static struct fs_prio *fs_prio; 1291 + struct fs_prio *fs_prio; 1280 1292 struct mlx5_flow_namespace *ns; 1281 1293 1282 1294 if (!root_ns)
+21 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 966 966 int err; 967 967 968 968 mutex_lock(&dev->intf_state_mutex); 969 - if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { 969 + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 970 970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 971 971 __func__); 972 972 goto out; ··· 1133 1133 if (err) 1134 1134 pr_info("failed request module on %s\n", MLX5_IB_MOD); 1135 1135 1136 - dev->interface_state = MLX5_INTERFACE_STATE_UP; 1136 + clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1137 + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1137 1138 out: 1138 1139 mutex_unlock(&dev->intf_state_mutex); 1139 1140 ··· 1208 1207 } 1209 1208 1210 1209 mutex_lock(&dev->intf_state_mutex); 1211 - if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { 1210 + if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1212 1211 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1213 1212 __func__); 1214 1213 goto out; ··· 1242 1241 mlx5_cmd_cleanup(dev); 1243 1242 1244 1243 out: 1245 - dev->interface_state = MLX5_INTERFACE_STATE_DOWN; 1244 + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1245 + set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1246 1246 mutex_unlock(&dev->intf_state_mutex); 1247 1247 return err; 1248 1248 } ··· 1454 1452 .resume = mlx5_pci_resume 1455 1453 }; 1456 1454 1455 + static void shutdown(struct pci_dev *pdev) 1456 + { 1457 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1458 + struct mlx5_priv *priv = &dev->priv; 1459 + 1460 + dev_info(&pdev->dev, "Shutdown was called\n"); 1461 + /* Notify mlx5 clients that the kernel is being shut down */ 1462 + set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); 1463 + mlx5_unload_one(dev, priv); 1464 + mlx5_pci_disable_device(dev); 1465 + } 1466 + 1457 1467 static const struct pci_device_id mlx5_core_pci_table[] = { 1458 1468 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ 1459 1469 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1473 1459 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1474 1460 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1475 1461 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1462 + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ 1463 + { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ 1476 1464 { 0, } 1477 1465 }; 1478 1466 ··· 1485 1469 .id_table = mlx5_core_pci_table, 1486 1470 .probe = init_one, 1487 1471 .remove = remove_one, 1472 + .shutdown = shutdown, 1488 1473 .err_handler = &mlx5_err_handler, 1489 1474 .sriov_configure = mlx5_core_sriov_configure, 1490 1475 };
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 247 247 } 248 248 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); 249 249 250 - static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, 251 - int *max_mtu, int *oper_mtu, u8 port) 250 + static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, 251 + u16 *max_mtu, u16 *oper_mtu, u8 port) 252 252 { 253 253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 254 254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; ··· 268 268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); 269 269 } 270 270 271 - int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) 271 + int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) 272 272 { 273 273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 274 274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; ··· 283 283 } 284 284 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); 285 285 286 - void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, 286 + void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, 287 287 u8 port) 288 288 { 289 289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); 290 290 } 291 291 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); 292 292 293 - void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 293 + void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, 294 294 u8 port) 295 295 { 296 296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+40
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 196 196 } 197 197 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 198 198 199 + int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) 200 + { 201 + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 202 + u32 *out; 203 + int err; 204 + 205 + out = mlx5_vzalloc(outlen); 206 + if (!out) 207 + return -ENOMEM; 208 + 209 + err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); 210 + if (!err) 211 + *mtu = MLX5_GET(query_nic_vport_context_out, out, 212 + nic_vport_context.mtu); 213 + 214 + kvfree(out); 215 + return err; 216 + } 217 + EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); 218 + 219 + int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) 220 + { 221 + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 222 + void *in; 223 + int err; 224 + 225 + in = mlx5_vzalloc(inlen); 226 + if (!in) 227 + return -ENOMEM; 228 + 229 + MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); 230 + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); 231 + 232 + err = mlx5_modify_nic_vport_context(mdev, in, inlen); 233 + 234 + kvfree(in); 235 + return err; 236 + } 237 + EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); 238 + 199 239 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 200 240 u32 vport, 201 241 enum mlx5_list_type list_type,
+101 -58
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 750 750 return false; 751 751 } 752 752 753 + static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) 754 + { 755 + qed_chain_consume(&rxq->rx_bd_ring); 756 + rxq->sw_rx_cons++; 757 + } 758 + 753 759 /* This function reuses the buffer(from an offset) from 754 760 * consumer index to producer index in the bd ring 755 761 */ ··· 779 773 curr_cons->data = NULL; 780 774 } 781 775 776 + /* In case of allocation failures reuse buffers 777 + * from consumer index to produce buffers for firmware 778 + */ 779 + static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, 780 + struct qede_dev *edev, u8 count) 781 + { 782 + struct sw_rx_data *curr_cons; 783 + 784 + for (; count > 0; count--) { 785 + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; 786 + qede_reuse_page(edev, rxq, curr_cons); 787 + qede_rx_bd_ring_consume(rxq); 788 + } 789 + } 790 + 782 791 static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 783 792 struct qede_rx_queue *rxq, 784 793 struct sw_rx_data *curr_cons) ··· 802 781 curr_cons->page_offset += rxq->rx_buf_seg_size; 803 782 804 783 if (curr_cons->page_offset == PAGE_SIZE) { 805 - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 784 + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { 785 + /* Since we failed to allocate new buffer 786 + * current buffer can be used again. 787 + */ 788 + curr_cons->page_offset -= rxq->rx_buf_seg_size; 789 + 806 790 return -ENOMEM; 791 + } 807 792 808 793 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 809 794 PAGE_SIZE, DMA_FROM_DEVICE); ··· 928 901 len_on_bd); 929 902 930 903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 931 - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 904 + /* Incr page ref count to reuse on allocation failure 905 + * so that it doesn't get freed while freeing SKB. 906 + */ 907 + atomic_inc(&current_bd->data->_count); 932 908 goto out; 933 909 } 934 910 ··· 945 915 return 0; 946 916 947 917 out: 918 + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 919 + qede_recycle_rx_bd_ring(rxq, edev, 1); 948 920 return -ENOMEM; 949 921 } 950 922 ··· 998 966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 999 967 le16_to_cpu(cqe->len_on_first_bd)); 1000 968 if (unlikely(!tpa_info->skb)) { 969 + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); 1001 970 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 - return; 971 + goto cons_buf; 1003 972 } 1004 973 1005 974 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); ··· 1023 990 /* This is needed in order to enable forwarding support */ 1024 991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1025 992 993 + cons_buf: /* We still need to handle bd_len_list to consume buffers */ 1026 994 if (likely(cqe->ext_bd_len_list[0])) 1027 995 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 996 le16_to_cpu(cqe->ext_bd_len_list[0])); ··· 1041 1007 const struct iphdr *iph = ip_hdr(skb); 1042 1008 struct tcphdr *th; 1043 1009 1044 - skb_set_network_header(skb, 0); 1045 1010 skb_set_transport_header(skb, sizeof(struct iphdr)); 1046 1011 th = tcp_hdr(skb); 1047 1012 ··· 1055 1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1056 1023 struct tcphdr *th; 1057 1024 1058 - skb_set_network_header(skb, 0); 1059 1025 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1060 1026 th = tcp_hdr(skb); 1061 1027 ··· 1069 1037 struct sk_buff *skb, 1070 1038 u16 vlan_tag) 1071 1039 { 1040 + /* FW can send a single MTU sized packet from gro flow 1041 + * due to aggregation timeout/last segment etc. which 1042 + * is not expected to be a gro packet. If a skb has zero 1043 + * frags then simply push it in the stack as non gso skb. 1044 + */ 1045 + if (unlikely(!skb->data_len)) { 1046 + skb_shinfo(skb)->gso_type = 0; 1047 + skb_shinfo(skb)->gso_size = 0; 1048 + goto send_skb; 1049 + } 1050 + 1072 1051 #ifdef CONFIG_INET 1073 1052 if (skb_shinfo(skb)->gso_size) { 1053 + skb_set_network_header(skb, 0); 1054 + 1074 1055 switch (skb->protocol) { 1075 1056 case htons(ETH_P_IP): 1076 1057 qede_gro_ip_csum(skb); ··· 1098 1053 } 1099 1054 } 1100 1055 #endif 1056 + 1057 + send_skb: 1101 1058 skb_record_rx_queue(skb, fp->rss_id); 1102 1059 qede_skb_receive(edev, fp, skb, vlan_tag); 1103 1060 } ··· 1291 1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1292 1245 sw_comp_cons, parse_flag); 1293 1246 rxq->rx_hw_errors++; 1294 - qede_reuse_page(edev, rxq, sw_rx_data); 1295 - goto next_rx; 1247 + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); 1248 + goto next_cqe; 1296 1249 } 1297 1250 1298 1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1299 1252 if (unlikely(!skb)) { 1300 1253 DP_NOTICE(edev, 1301 1254 "Build_skb failed, dropping incoming packet\n"); 1302 - qede_reuse_page(edev, rxq, sw_rx_data); 1255 + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); 1303 1256 rxq->rx_alloc_errors++; 1304 - goto next_rx; 1257 + goto next_cqe; 1305 1258 } 1306 1259 1307 1260 /* Copy data into SKB */ ··· 1335 1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1336 1289 sw_rx_data))) { 1337 1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1291 + /* Incr page ref count to reuse on allocation 1292 + * failure so that it doesn't get freed while 1293 + * freeing SKB. 1294 + */ 1295 + 1296 + atomic_inc(&sw_rx_data->data->_count); 1338 1297 rxq->rx_alloc_errors++; 1298 + qede_recycle_rx_bd_ring(rxq, edev, 1299 + fp_cqe->bd_num); 1300 + dev_kfree_skb_any(skb); 1339 1301 goto next_cqe; 1340 1302 } 1341 1303 } 1304 + 1305 + qede_rx_bd_ring_consume(rxq); 1342 1306 1343 1307 if (fp_cqe->bd_num != 1) { 1344 1308 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); ··· 1361 1303 num_frags--) { 1362 1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1363 1305 rxq->rx_buf_size : pkt_len; 1364 - 1365 - WARN_ONCE(!cur_size, 1366 - "Still got %d BDs for mapping jumbo, but length became 0\n", 1367 - num_frags); 1368 - 1369 - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 1306 + if (unlikely(!cur_size)) { 1307 + DP_ERR(edev, 1308 + "Still got %d BDs for mapping jumbo, but length became 0\n", 1309 + num_frags); 1310 + qede_recycle_rx_bd_ring(rxq, edev, 1311 + num_frags); 1312 + dev_kfree_skb_any(skb); 1370 1313 goto next_cqe; 1314 + } 1371 1315 1372 - rxq->sw_rx_cons++; 1316 + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { 1317 + qede_recycle_rx_bd_ring(rxq, edev, 1318 + num_frags); 1319 + dev_kfree_skb_any(skb); 1320 + goto next_cqe; 1321 + } 1322 + 1373 1323 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1374 1324 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1375 - qed_chain_consume(&rxq->rx_bd_ring); 1325 + qede_rx_bd_ring_consume(rxq); 1326 + 1376 1327 dma_unmap_page(&edev->pdev->dev, 1377 1328 sw_rx_data->mapping, 1378 1329 PAGE_SIZE, DMA_FROM_DEVICE); ··· 1397 1330 pkt_len -= cur_size; 1398 1331 } 1399 1332 1400 - if (pkt_len) 1333 + if (unlikely(pkt_len)) 1401 1334 DP_ERR(edev, 1402 1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1403 1336 pkt_len); ··· 1416 1349 skb_record_rx_queue(skb, fp->rss_id); 1417 1350 1418 1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1419 - 1420 - qed_chain_consume(&rxq->rx_bd_ring); 1421 - next_rx: 1422 - rxq->sw_rx_cons++; 1423 1352 next_rx_only: 1424 1353 rx_pkt++; 1425 1354 ··· 2320 2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2321 2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2322 2259 2323 - if (replace_buf) { 2260 + if (replace_buf->data) { 2324 2261 dma_unmap_page(&edev->pdev->dev, 2325 2262 dma_unmap_addr(replace_buf, mapping), 2326 2263 PAGE_SIZE, DMA_FROM_DEVICE); ··· 2440 2377 static int qede_alloc_mem_rxq(struct qede_dev *edev, 2441 2378 struct qede_rx_queue *rxq) 2442 2379 { 2443 - int i, rc, size, num_allocated; 2380 + int i, rc, size; 2444 2381 2445 2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2446 2383 ··· 2457 2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2458 2395 if (!rxq->sw_rx_ring) { 2459 2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2397 + rc = -ENOMEM; 2460 2398 goto err; 2461 2399 } 2462 2400 ··· 2485 2421 /* Allocate buffers for the Rx ring */ 2486 2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2487 2423 rc = qede_alloc_rx_buffer(edev, rxq); 2488 - if (rc) 2489 - break; 2490 - } 2491 - num_allocated = i; 2492 - if (!num_allocated) { 2493 - DP_ERR(edev, "Rx buffers allocation failed\n"); 2494 - goto err; 2495 - } else if (num_allocated < rxq->num_rx_buffers) { 2496 - DP_NOTICE(edev, 2497 - "Allocated less buffers than desired (%d allocated)\n", 2498 - num_allocated); 2424 + if (rc) { 2425 + DP_ERR(edev, 2426 + "Rx buffers allocation failed at index %d\n", i); 2427 + goto err; 2428 + } 2499 2429 } 2500 2430 2501 - qede_alloc_sge_mem(edev, rxq); 2502 - 2503 - return 0; 2504 - 2431 + rc = qede_alloc_sge_mem(edev, rxq); 2505 2432 err: 2506 - qede_free_mem_rxq(edev, rxq); 2507 - return -ENOMEM; 2433 + return rc; 2508 2434 } 2509 2435 2510 2436 static void qede_free_mem_txq(struct qede_dev *edev, ··· 2577 2523 } 2578 2524 2579 2525 return 0; 2580 - 2581 2526 err: 2582 - qede_free_mem_fp(edev, fp); 2583 - return -ENOMEM; 2527 + return rc; 2584 2528 } 2585 2529 2586 2530 static void qede_free_mem_load(struct qede_dev *edev) ··· 2601 2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2602 2550 2603 2551 rc = qede_alloc_mem_fp(edev, fp); 2604 - if (rc) 2605 - break; 2606 - } 2607 - 2608 - if (rss_id != QEDE_RSS_CNT(edev)) { 2609 - /* Failed allocating memory for all the queues */ 2610 - if (!rss_id) { 2552 + if (rc) { 2611 2553 DP_ERR(edev, 2612 - "Failed to allocate memory for the leading queue\n"); 2613 - rc = -ENOMEM; 2614 - } else { 2615 - DP_NOTICE(edev, 2616 - "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", 2617 - QEDE_RSS_CNT(edev), rss_id); 2554 + "Failed to allocate memory for fastpath - rss id = %d\n", 2555 + rss_id); 2556 + qede_free_mem_load(edev); 2557 + return rc; 2618 2558 } 2619 - edev->num_rss = rss_id; 2620 2559 } 2621 2560 2622 2561 return 0;
+2 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 37 37 38 38 #define _QLCNIC_LINUX_MAJOR 5 39 39 #define _QLCNIC_LINUX_MINOR 3 40 - #define _QLCNIC_LINUX_SUBVERSION 63 41 - #define QLCNIC_LINUX_VERSIONID "5.3.63" 40 + #define _QLCNIC_LINUX_SUBVERSION 64 41 + #define QLCNIC_LINUX_VERSIONID "5.3.64" 42 42 #define QLCNIC_DRV_IDC_VER 0x01 43 43 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+3
drivers/net/ethernet/renesas/ravb_main.c
··· 1691 1691 rate = clk_get_rate(clk); 1692 1692 clk_put(clk); 1693 1693 1694 + if (!rate) 1695 + return -EINVAL; 1696 + 1694 1697 inc = 1000000000ULL << 20; 1695 1698 do_div(inc, rate); 1696 1699
+1 -5
drivers/net/ethernet/renesas/sh_eth.c
··· 2194 2194 __func__); 2195 2195 return ret; 2196 2196 } 2197 - ret = sh_eth_dev_init(ndev, false); 2197 + ret = sh_eth_dev_init(ndev, true); 2198 2198 if (ret < 0) { 2199 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2200 2200 __func__); 2201 2201 return ret; 2202 2202 } 2203 2203 2204 - mdp->irq_enabled = true; 2205 - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 2206 - /* Setting the Rx mode will start the Rx process. */ 2207 - sh_eth_write(ndev, EDRRR_R, EDRRR); 2208 2204 netif_device_attach(ndev); 2209 2205 } 2210 2206
+29 -37
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
··· 34 34 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 35 35 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 36 36 37 + #define SYSMGR_FPGAGRP_MODULE_REG 0x00000028 38 + #define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004 39 + 37 40 #define EMAC_SPLITTER_CTRL_REG 0x0 38 41 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 39 42 #define EMAC_SPLITTER_CTRL_SPEED_10 0x2 ··· 49 46 u32 reg_shift; 50 47 struct device *dev; 51 48 struct regmap *sys_mgr_base_addr; 52 - struct reset_control *stmmac_rst; 53 49 void __iomem *splitter_base; 54 50 bool f2h_ptp_ref_clk; 55 51 }; ··· 90 88 int ret; 91 89 struct device_node *np_splitter; 92 90 struct resource res_splitter; 93 - 94 - dwmac->stmmac_rst = devm_reset_control_get(dev, 95 - STMMAC_RESOURCE_NAME); 96 - if (IS_ERR(dwmac->stmmac_rst)) { 97 - dev_info(dev, "Could not get reset control!\n"); 98 - if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) 99 - return -EPROBE_DEFER; 100 - dwmac->stmmac_rst = NULL; 101 - } 102 91 103 92 dwmac->interface = of_get_phy_mode(np); 104 93 ··· 141 148 int phymode = dwmac->interface; 142 149 u32 reg_offset = dwmac->reg_offset; 143 150 u32 reg_shift = dwmac->reg_shift; 144 - u32 ctrl, val; 151 + u32 ctrl, val, module; 145 152 146 153 switch (phymode) { 147 154 case PHY_INTERFACE_MODE_RGMII: ··· 168 175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 169 176 ctrl |= val << reg_shift; 170 177 171 - if (dwmac->f2h_ptp_ref_clk) 178 + if (dwmac->f2h_ptp_ref_clk) { 172 179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 173 - else 180 + regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 181 + &module); 182 + module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); 183 + regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 184 + module); 185 + } else { 174 186 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 187 + } 175 188 176 189 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 190 + 177 191 return 0; 178 - } 179 - 180 - static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) 181 - { 182 - struct socfpga_dwmac *dwmac = priv; 183 - 184 - /* On socfpga platform exit, assert and hold reset to the 185 - * enet controller - the default state after a hard reset. 186 - */ 187 - if (dwmac->stmmac_rst) 188 - reset_control_assert(dwmac->stmmac_rst); 189 192 } 190 193 191 194 static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) 192 195 { 193 - struct socfpga_dwmac *dwmac = priv; 196 + struct socfpga_dwmac *dwmac = priv; 194 197 struct net_device *ndev = platform_get_drvdata(pdev); 195 198 struct stmmac_priv *stpriv = NULL; 196 199 int ret = 0; 197 200 198 - if (ndev) 199 - stpriv = netdev_priv(ndev); 201 + if (!ndev) 202 + return -EINVAL; 203 + 204 + stpriv = netdev_priv(ndev); 205 + if (!stpriv) 206 + return -EINVAL; 200 207 201 208 /* Assert reset to the enet controller before changing the phy mode */ 202 - if (dwmac->stmmac_rst) 203 - reset_control_assert(dwmac->stmmac_rst); 209 + if (stpriv->stmmac_rst) 210 + reset_control_assert(stpriv->stmmac_rst); 204 211 205 212 /* Setup the phy mode in the system manager registers according to 206 213 * devicetree configuration ··· 210 217 /* Deassert reset for the phy configuration to be sampled by 211 218 * the enet controller, and operation to start in requested mode 212 219 */ 213 - if (dwmac->stmmac_rst) 214 - reset_control_deassert(dwmac->stmmac_rst); 220 + if (stpriv->stmmac_rst) 221 + reset_control_deassert(stpriv->stmmac_rst); 215 222 216 223 /* Before the enet controller is suspended, the phy is suspended. 217 224 * This causes the phy clock to be gated. The enet controller is ··· 228 235 * control register 0, and can be modified by the phy driver 229 236 * framework. 230 237 */ 231 - if (stpriv && stpriv->phydev) 238 + if (stpriv->phydev) 232 239 phy_resume(stpriv->phydev); 233 240 234 241 return ret; ··· 268 275 269 276 plat_dat->bsp_priv = dwmac; 270 277 plat_dat->init = socfpga_dwmac_init; 271 - plat_dat->exit = socfpga_dwmac_exit; 272 278 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 273 279 274 - ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); 275 - if (ret) 276 - return ret; 280 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 281 + if (!ret) 282 + ret = socfpga_dwmac_init(pdev, dwmac); 277 283 278 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 284 + return ret; 279 285 } 280 286 281 287 static const struct of_device_id socfpga_dwmac_match[] = {
+2 -2
drivers/net/ethernet/ti/cpsw.c
··· 1251 1251 int i, ret; 1252 1252 u32 reg; 1253 1253 1254 + pm_runtime_get_sync(&priv->pdev->dev); 1255 + 1254 1256 if (!cpsw_common_res_usage_state(priv)) 1255 1257 cpsw_intr_disable(priv); 1256 1258 netif_carrier_off(ndev); 1257 - 1258 - pm_runtime_get_sync(&priv->pdev->dev); 1259 1259 1260 1260 reg = priv->version; 1261 1261
+1 -2
drivers/net/ethernet/ti/davinci_emac.c
··· 1878 1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1879 1879 } 1880 1880 1881 - pdev->dev.platform_data = pdata; 1882 - 1883 1881 return pdata; 1884 1882 } 1885 1883 ··· 2099 2101 cpdma_ctlr_destroy(priv->dma); 2100 2102 2101 2103 unregister_netdev(ndev); 2104 + pm_runtime_disable(&pdev->dev); 2102 2105 free_netdev(ndev); 2103 2106 2104 2107 return 0;
+42 -23
drivers/net/macsec.c
··· 880 880 macsec_skb_cb(skb)->valid = false; 881 881 skb = skb_share_check(skb, GFP_ATOMIC); 882 882 if (!skb) 883 - return NULL; 883 + return ERR_PTR(-ENOMEM); 884 884 885 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 886 886 if (!req) { 887 887 kfree_skb(skb); 888 - return NULL; 888 + return ERR_PTR(-ENOMEM); 889 889 } 890 890 891 891 hdr = (struct macsec_eth_header *)skb->data; ··· 905 905 skb = skb_unshare(skb, GFP_ATOMIC); 906 906 if (!skb) { 907 907 aead_request_free(req); 908 - return NULL; 908 + return ERR_PTR(-ENOMEM); 909 909 } 910 910 } else { 911 911 /* integrity only: all headers + data authenticated */ ··· 921 921 dev_hold(dev); 922 922 ret = crypto_aead_decrypt(req); 923 923 if (ret == -EINPROGRESS) { 924 - return NULL; 924 + return ERR_PTR(ret); 925 925 } else if (ret != 0) { 926 926 /* decryption/authentication failed 927 927 * 10.6 if validateFrames is disabled, deliver anyway 928 928 */ 929 929 if (ret != -EBADMSG) { 930 930 kfree_skb(skb); 931 - skb = NULL; 931 + skb = ERR_PTR(ret); 932 932 } 933 933 } else { 934 934 macsec_skb_cb(skb)->valid = true; ··· 1146 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1147 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1148 1148 1149 - if (!skb) { 1150 - macsec_rxsa_put(rx_sa); 1149 + if (IS_ERR(skb)) { 1150 + /* the decrypt callback needs the reference */ 1151 + if (PTR_ERR(skb) != -EINPROGRESS) 1152 + macsec_rxsa_put(rx_sa); 1151 1153 rcu_read_unlock(); 1152 1154 *pskb = NULL; 1153 1155 return RX_HANDLER_CONSUMED; ··· 1163 1161 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1164 1162 macsec_reset_skb(skb, secy->netdev); 1165 1163 1166 - macsec_rxsa_put(rx_sa); 1164 + if (rx_sa) 1165 + macsec_rxsa_put(rx_sa); 1167 1166 count_rx(dev, skb->len); 1168 1167 1169 1168 rcu_read_unlock(); ··· 1625 1622 } 1626 1623 1627 1624 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1628 - if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, 1629 - secy->icv_len)) { 1625 + if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1626 + secy->key_len, secy->icv_len)) { 1627 + kfree(rx_sa); 1630 1628 rtnl_unlock(); 1631 1629 return -ENOMEM; 1632 1630 } ··· 1772 1768 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1773 1769 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1774 1770 secy->key_len, secy->icv_len)) { 1771 + kfree(tx_sa); 1775 1772 rtnl_unlock(); 1776 1773 return -ENOMEM; 1777 1774 } ··· 2232 2227 return 1; 2233 2228 2234 2229 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || 2235 - nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 2230 + nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2231 + MACSEC_DEFAULT_CIPHER_ID) || 2236 2232 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2237 2233 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2238 2234 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || ··· 2274 2268 if (!hdr) 2275 2269 return -EMSGSIZE; 2276 2270 2277 - rtnl_lock(); 2271 + genl_dump_check_consistent(cb, hdr, &macsec_fam); 2278 2272 2279 2273 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2280 2274 goto nla_put_failure; ··· 2435 2429 2436 2430 nla_nest_end(skb, rxsc_list); 2437 2431 2438 - rtnl_unlock(); 2439 - 2440 2432 genlmsg_end(skb, hdr); 2441 2433 2442 2434 return 0; 2443 2435 2444 2436 nla_put_failure: 2445 - rtnl_unlock(); 2446 2437 genlmsg_cancel(skb, hdr); 2447 2438 return -EMSGSIZE; 2448 2439 } 2440 + 2441 + static int macsec_generation = 1; /* protected by RTNL */ 2449 2442 2450 2443 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2451 2444 { ··· 2455 2450 dev_idx = cb->args[0]; 2456 2451 2457 2452 d = 0; 2453 + rtnl_lock(); 2454 + 2455 + cb->seq = macsec_generation; 2456 + 2458 2457 for_each_netdev(net, dev) { 2459 2458 struct macsec_secy *secy; 2460 2459 ··· 2476 2467 } 2477 2468 2478 2469 done: 2470 + rtnl_unlock(); 2479 2471 cb->args[0] = d; 2480 2472 return skb->len; 2481 2473 } ··· 2930 2920 struct net_device *real_dev = macsec->real_dev; 2931 2921 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2932 2922 2923 + macsec_generation++; 2924 + 2933 2925 unregister_netdevice_queue(dev, head); 2934 2926 list_del_rcu(&macsec->secys); 2935 - if (list_empty(&rxd->secys)) 2927 + if (list_empty(&rxd->secys)) { 2936 2928 netdev_rx_handler_unregister(real_dev); 2929 + kfree(rxd); 2930 + } 2937 2931 2938 2932 macsec_del_dev(macsec); 2939 2933 } ··· 2959 2945 2960 2946 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 2961 2947 rxd); 2962 - if (err < 0) 2948 + if (err < 0) { 2949 + kfree(rxd); 2963 2950 return err; 2951 + } 2964 2952 } 2965 2953 2966 2954 list_add_tail_rcu(&macsec->secys, &rxd->secys); ··· 3082 3066 if (err < 0) 3083 3067 goto del_dev; 3084 3068 3069 + macsec_generation++; 3070 + 3085 3071 dev_hold(real_dev); 3086 3072 3087 3073 return 0; ··· 3097 3079 3098 3080 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3099 3081 { 3100 - u64 csid = DEFAULT_CIPHER_ID; 3082 + u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3101 3083 u8 icv_len = DEFAULT_ICV_LEN; 3102 3084 int flag; 3103 3085 bool es, scb, sci; ··· 3112 3094 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3113 3095 3114 3096 switch (csid) { 3115 - case DEFAULT_CIPHER_ID: 3116 - case DEFAULT_CIPHER_ALT: 3097 + case MACSEC_DEFAULT_CIPHER_ID: 3098 + case MACSEC_DEFAULT_CIPHER_ALT: 3117 3099 if (icv_len < MACSEC_MIN_ICV_LEN || 3118 3100 icv_len > MACSEC_MAX_ICV_LEN) 3119 3101 return -EINVAL; ··· 3147 3129 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3148 3130 return -EINVAL; 3149 3131 3150 - if ((data[IFLA_MACSEC_PROTECT] && 3151 - nla_get_u8(data[IFLA_MACSEC_PROTECT])) && 3132 + if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3133 + nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3152 3134 !data[IFLA_MACSEC_WINDOW]) 3153 3135 return -EINVAL; 3154 3136 ··· 3186 3168 3187 3169 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || 3188 3170 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3189 - nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 3171 + nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, 3172 + MACSEC_DEFAULT_CIPHER_ID) || 3190 3173 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3191 3174 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3192 3175 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
+1 -1
drivers/net/phy/spi_ks8995.c
··· 441 441 return -ENOMEM; 442 442 443 443 mutex_init(&ks->lock); 444 - ks->spi = spi_dev_get(spi); 444 + ks->spi = spi; 445 445 ks->chip = &ks8995_chip[variant]; 446 446 447 447 if (ks->spi->dev.of_node) {
+7 -2
drivers/net/usb/cdc_mbim.c
··· 617 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 618 618 .driver_info = (unsigned long)&cdc_mbim_info, 619 619 }, 620 - /* Huawei E3372 fails unless NDP comes after the IP packets */ 621 - { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 620 + 621 + /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372 622 + * (12d1:157d), are known to fail unless the NDP is placed 623 + * after the IP packets. Applying the quirk to all Huawei 624 + * devices is broader than necessary, but harmless. 625 + */ 626 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 622 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 623 628 }, 624 629 /* default entry */
+8 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 1152 1152 union Vmxnet3_GenericDesc *gdesc) 1153 1153 { 1154 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1155 - /* typical case: TCP/UDP over IP and both csums are correct */ 1156 - if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1157 - VMXNET3_RCD_CSUM_OK) { 1155 + if (gdesc->rcd.v4 && 1156 + (le32_to_cpu(gdesc->dword[3]) & 1157 + VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { 1158 1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1159 1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1160 - BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 1160 + BUG_ON(gdesc->rcd.frg); 1161 + } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & 1162 + (1 << VMXNET3_RCD_TUC_SHIFT))) { 1163 + skb->ip_summed = CHECKSUM_UNNECESSARY; 1164 + BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1161 1165 BUG_ON(gdesc->rcd.frg); 1162 1166 } else { 1163 1167 if (gdesc->rcd.csum) {
+2 -2
drivers/net/vmxnet3/vmxnet3_int.h
··· 69 69 /* 70 70 * Version numbers 71 71 */ 72 - #define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72 + #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" 73 73 74 74 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75 - #define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75 + #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 76 76 77 77 #if defined(CONFIG_PCI_MSI) 78 78 /* RSS only makes sense if MSI-X is supported. */
+16 -161
drivers/net/vrf.c
··· 60 60 struct u64_stats_sync syncp; 61 61 }; 62 62 63 - static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) 64 - { 65 - return dst; 66 - } 67 - 68 - static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 69 - { 70 - return ip_local_out(net, sk, skb); 71 - } 72 - 73 - static unsigned int vrf_v4_mtu(const struct dst_entry *dst) 74 - { 75 - /* TO-DO: return max ethernet size? */ 76 - return dst->dev->mtu; 77 - } 78 - 79 - static void vrf_dst_destroy(struct dst_entry *dst) 80 - { 81 - /* our dst lives forever - or until the device is closed */ 82 - } 83 - 84 - static unsigned int vrf_default_advmss(const struct dst_entry *dst) 85 - { 86 - return 65535 - 40; 87 - } 88 - 89 - static struct dst_ops vrf_dst_ops = { 90 - .family = AF_INET, 91 - .local_out = vrf_ip_local_out, 92 - .check = vrf_ip_check, 93 - .mtu = vrf_v4_mtu, 94 - .destroy = vrf_dst_destroy, 95 - .default_advmss = vrf_default_advmss, 96 - }; 97 - 98 63 /* neighbor handling is done with actual device; do not want 99 64 * to flip skb->dev for those ndisc packets. This really fails 100 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is ··· 314 349 } 315 350 316 351 #if IS_ENABLED(CONFIG_IPV6) 317 - static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie) 318 - { 319 - return dst; 320 - } 321 - 322 - static struct dst_ops vrf_dst_ops6 = { 323 - .family = AF_INET6, 324 - .local_out = ip6_local_out, 325 - .check = vrf_ip6_check, 326 - .mtu = vrf_v4_mtu, 327 - .destroy = vrf_dst_destroy, 328 - .default_advmss = vrf_default_advmss, 329 - }; 330 - 331 - static int init_dst_ops6_kmem_cachep(void) 332 - { 333 - vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache", 334 - sizeof(struct rt6_info), 335 - 0, 336 - SLAB_HWCACHE_ALIGN, 337 - NULL); 338 - 339 - if (!vrf_dst_ops6.kmem_cachep) 340 - return -ENOMEM; 341 - 342 - return 0; 343 - } 344 - 345 - static void free_dst_ops6_kmem_cachep(void) 346 - { 347 - kmem_cache_destroy(vrf_dst_ops6.kmem_cachep); 348 - } 349 - 350 - static int vrf_input6(struct sk_buff *skb) 351 - { 352 - skb->dev->stats.rx_errors++; 353 - kfree_skb(skb); 354 - return 0; 355 - } 356 - 357 352 /* modelled after ip6_finish_output2 */ 358 353 static int vrf_finish_output6(struct net *net, struct sock *sk, 359 354 struct sk_buff *skb) ··· 354 429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 355 430 } 356 431 357 - static void vrf_rt6_destroy(struct net_vrf *vrf) 432 + static void vrf_rt6_release(struct net_vrf *vrf) 358 433 { 359 - dst_destroy(&vrf->rt6->dst); 360 - free_percpu(vrf->rt6->rt6i_pcpu); 434 + dst_release(&vrf->rt6->dst); 361 435 vrf->rt6 = NULL; 362 436 } 363 437 364 438 static int vrf_rt6_create(struct net_device *dev) 365 439 { 366 440 struct net_vrf *vrf = netdev_priv(dev); 367 - struct dst_entry *dst; 441 + struct net *net = dev_net(dev); 368 442 struct rt6_info *rt6; 369 - int cpu; 370 443 int rc = -ENOMEM; 371 444 372 - rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 373 - DST_OBSOLETE_NONE, 374 - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); 445 + rt6 = ip6_dst_alloc(net, dev, 446 + DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); 375 447 if (!rt6) 376 448 goto out; 377 449 378 - dst = &rt6->dst; 379 - 380 - rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); 381 - if (!rt6->rt6i_pcpu) { 382 - dst_destroy(dst); 383 - goto out; 384 - } 385 - for_each_possible_cpu(cpu) { 386 - struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); 387 - *p = NULL; 388 - } 389 - 390 - memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); 391 - 392 - INIT_LIST_HEAD(&rt6->rt6i_siblings); 393 - INIT_LIST_HEAD(&rt6->rt6i_uncached); 394 - 395 - rt6->dst.input = vrf_input6; 396 450 rt6->dst.output = vrf_output6; 397 - 398 - rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 399 - 400 - atomic_set(&rt6->dst.__refcnt, 2); 401 - 451 + rt6->rt6i_table = fib6_get_table(net, vrf->tb_id); 452 + dst_hold(&rt6->dst); 402 453 vrf->rt6 = rt6; 403 454 rc = 0; 404 455 out: 405 456 return rc; 406 457 } 407 458 #else 408 - static int init_dst_ops6_kmem_cachep(void) 409 - { 410 - return 0; 411 - } 412 - 413 - static void free_dst_ops6_kmem_cachep(void) 414 - { 415 - } 416 - 417 - static void vrf_rt6_destroy(struct net_vrf *vrf) 459 + static void vrf_rt6_release(struct net_vrf *vrf) 418 460 { 419 461 } 420 462 ··· 449 557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 450 558 } 451 559 452 - static void vrf_rtable_destroy(struct net_vrf *vrf) 560 + static void vrf_rtable_release(struct net_vrf *vrf) 453 561 { 454 562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 455 563 456 - dst_destroy(dst); 564 + dst_release(dst); 457 565 vrf->rth = NULL; 458 566 } 459 567 ··· 462 570 struct net_vrf *vrf = netdev_priv(dev); 463 571 struct rtable *rth; 464 572 465 - rth = dst_alloc(&vrf_dst_ops, dev, 2, 466 - DST_OBSOLETE_NONE, 467 - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); 573 + rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); 468 574 if (rth) { 469 575 rth->dst.output = vrf_output; 470 - rth->rt_genid = rt_genid_ipv4(dev_net(dev)); 471 - rth->rt_flags = 0; 472 - rth->rt_type = RTN_UNICAST; 473 - rth->rt_is_input = 0; 474 - rth->rt_iif = 0; 475 - rth->rt_pmtu = 0; 476 - rth->rt_gateway = 0; 477 - rth->rt_uses_gateway = 0; 478 576 rth->rt_table_id = vrf->tb_id; 479 - INIT_LIST_HEAD(&rth->rt_uncached); 480 - rth->rt_uncached_list = NULL; 481 577 } 482 578 483 579 return rth; ··· 553 673 struct net_device *port_dev; 554 674 struct list_head *iter; 555 675 556 - vrf_rtable_destroy(vrf); 557 - vrf_rt6_destroy(vrf); 676 + vrf_rtable_release(vrf); 677 + vrf_rt6_release(vrf); 558 678 559 679 netdev_for_each_lower_dev(dev, port_dev, iter) 560 680 vrf_del_slave(dev, port_dev); ··· 584 704 return 0; 585 705 586 706 out_rth: 587 - vrf_rtable_destroy(vrf); 707 + vrf_rtable_release(vrf); 588 708 out_stats: 589 709 free_percpu(dev->dstats); 590 710 dev->dstats = NULL; ··· 617 737 struct net_vrf *vrf = netdev_priv(dev); 618 738 619 739 rth = vrf->rth; 620 - atomic_inc(&rth->dst.__refcnt); 740 + dst_hold(&rth->dst); 621 741 } 622 742 623 743 return rth; ··· 668 788 struct net_vrf *vrf = netdev_priv(dev); 669 789 670 790 rt = vrf->rt6; 671 - atomic_inc(&rt->dst.__refcnt); 791 + dst_hold(&rt->dst); 672 792 } 673 793 674 794 return (struct dst_entry *)rt; ··· 826 946 { 827 947 int rc; 828 948 829 - vrf_dst_ops.kmem_cachep = 830 - kmem_cache_create("vrf_ip_dst_cache", 831 - sizeof(struct rtable), 0, 832 - SLAB_HWCACHE_ALIGN, 833 - NULL); 834 - 835 - if (!vrf_dst_ops.kmem_cachep) 836 - return -ENOMEM; 837 - 838 - rc = init_dst_ops6_kmem_cachep(); 839 - if (rc != 0) 840 - goto error2; 841 - 842 949 register_netdevice_notifier(&vrf_notifier_block); 843 950 844 951 rc = rtnl_link_register(&vrf_link_ops); ··· 836 969 837 970 error: 838 971 unregister_netdevice_notifier(&vrf_notifier_block); 839 - free_dst_ops6_kmem_cachep(); 840 - error2: 841 - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); 842 972 return rc; 843 973 } 844 974 845 - static void __exit vrf_cleanup_module(void) 846 - { 847 - rtnl_link_unregister(&vrf_link_ops); 848 - unregister_netdevice_notifier(&vrf_notifier_block); 849 - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); 850 - free_dst_ops6_kmem_cachep(); 851 - } 852 - 853 975 module_init(vrf_init_module); 854 - module_exit(vrf_cleanup_module); 855 976 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 856 977 MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 857 978 MODULE_LICENSE("GPL");
+4 -2
drivers/net/wireless/broadcom/b43/main.c
··· 5680 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5681 5681 schedule_work(&wl->firmware_load); 5682 5682 5683 - bcma_out: 5684 5683 return err; 5685 5684 5686 5685 bcma_err_wireless_exit: 5687 5686 ieee80211_free_hw(wl->hw); 5687 + bcma_out: 5688 + kfree(dev); 5688 5689 return err; 5689 5690 } 5690 5691 ··· 5713 5712 b43_rng_exit(wl); 5714 5713 5715 5714 b43_leds_unregister(wl); 5716 - 5717 5715 ieee80211_free_hw(wl->hw); 5716 + kfree(wldev->dev); 5718 5717 } 5719 5718 5720 5719 static struct bcma_driver b43_bcma_driver = { ··· 5797 5796 5798 5797 b43_leds_unregister(wl); 5799 5798 b43_wireless_exit(dev, wl); 5799 + kfree(dev); 5800 5800 } 5801 5801 5802 5802 static struct ssb_driver b43_ssb_driver = {
+2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 1147 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1148 1148 iwl_mvm_del_aux_sta(mvm); 1149 1149 1150 + iwl_free_fw_paging(mvm); 1151 + 1150 1152 /* 1151 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1152 1154 * won't be called in this case).
-2
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 761 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 762 762 kfree(mvm->nvm_sections[i].data); 763 763 764 - iwl_free_fw_paging(mvm); 765 - 766 764 iwl_mvm_tof_clean(mvm); 767 765 768 766 ieee80211_free_hw(mvm->hw);
+2 -2
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 732 732 */ 733 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 734 734 if (val & (BIT(1) | BIT(17))) { 735 - IWL_INFO(trans, 736 - "can't access the RSA semaphore it is write protected\n"); 735 + IWL_DEBUG_INFO(trans, 736 + "can't access the RSA semaphore it is write protected\n"); 737 737 return 0; 738 738 } 739 739
+3 -3
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
··· 2488 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2489 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2490 2490 2491 - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2492 - "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2493 - rtldm->thermalvalue, thermal_value); 2491 + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2492 + "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2493 + rtldm->thermalvalue, thermal_value); 2494 2494 /*Record last Power Tracking Thermal Value*/ 2495 2495 rtldm->thermalvalue = thermal_value; 2496 2496 }
+42
drivers/pci/access.c
··· 275 275 } 276 276 EXPORT_SYMBOL(pci_write_vpd); 277 277 278 + /** 279 + * pci_set_vpd_size - Set size of Vital Product Data space 280 + * @dev: pci device struct 281 + * @len: size of vpd space 282 + */ 283 + int pci_set_vpd_size(struct pci_dev *dev, size_t len) 284 + { 285 + if (!dev->vpd || !dev->vpd->ops) 286 + return -ENODEV; 287 + return dev->vpd->ops->set_size(dev, len); 288 + } 289 + EXPORT_SYMBOL(pci_set_vpd_size); 290 + 278 291 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 279 292 280 293 /** ··· 511 498 return ret ? ret : count; 512 499 } 513 500 501 + static int pci_vpd_set_size(struct pci_dev *dev, size_t len) 502 + { 503 + struct pci_vpd *vpd = dev->vpd; 504 + 505 + if (len == 0 || len > PCI_VPD_MAX_SIZE) 506 + return -EIO; 507 + 508 + vpd->valid = 1; 509 + vpd->len = len; 510 + 511 + return 0; 512 + } 513 + 514 514 static const struct pci_vpd_ops pci_vpd_ops = { 515 515 .read = pci_vpd_read, 516 516 .write = pci_vpd_write, 517 + .set_size = pci_vpd_set_size, 517 518 }; 518 519 519 520 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, ··· 560 533 return ret; 561 534 } 562 535 536 + static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len) 537 + { 538 + struct pci_dev *tdev = pci_get_slot(dev->bus, 539 + PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); 540 + int ret; 541 + 542 + if (!tdev) 543 + return -ENODEV; 544 + 545 + ret = pci_set_vpd_size(tdev, len); 546 + pci_dev_put(tdev); 547 + return ret; 548 + } 549 + 563 550 static const struct pci_vpd_ops pci_vpd_f0_ops = { 564 551 .read = pci_vpd_f0_read, 565 552 .write = pci_vpd_f0_write, 553 + .set_size = pci_vpd_f0_set_size, 566 554 }; 567 555 568 556 int pci_vpd_init(struct pci_dev *dev)
+14 -6
drivers/pci/host/pci-imx6.c
··· 32 32 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 33 33 34 34 struct imx6_pcie { 35 - struct gpio_desc *reset_gpio; 35 + int reset_gpio; 36 36 struct clk *pcie_bus; 37 37 struct clk *pcie_phy; 38 38 struct clk *pcie; ··· 309 309 usleep_range(200, 500); 310 310 311 311 /* Some boards don't have PCIe reset GPIO. */ 312 - if (imx6_pcie->reset_gpio) { 313 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); 312 + if (gpio_is_valid(imx6_pcie->reset_gpio)) { 313 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0); 314 314 msleep(100); 315 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); 315 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1); 316 316 } 317 317 return 0; 318 318 ··· 523 523 { 524 524 struct imx6_pcie *imx6_pcie; 525 525 struct pcie_port *pp; 526 + struct device_node *np = pdev->dev.of_node; 526 527 struct resource *dbi_base; 527 528 struct device_node *node = pdev->dev.of_node; 528 529 int ret; ··· 545 544 return PTR_ERR(pp->dbi_base); 546 545 547 546 /* Fetch GPIOs */ 548 - imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", 549 - GPIOD_OUT_LOW); 547 + imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); 548 + if (gpio_is_valid(imx6_pcie->reset_gpio)) { 549 + ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, 550 + GPIOF_OUT_INIT_LOW, "PCIe reset"); 551 + if (ret) { 552 + dev_err(&pdev->dev, "unable to get reset gpio\n"); 553 + return ret; 554 + } 555 + } 550 556 551 557 /* Fetch clocks */ 552 558 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+1
drivers/pci/pci.h
··· 97 97 struct pci_vpd_ops { 98 98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 99 99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 100 + int (*set_size)(struct pci_dev *dev, size_t len); 100 101 }; 101 102 102 103 struct pci_vpd {
+13 -2
drivers/perf/arm_pmu.c
··· 737 737 break; 738 738 case CPU_PM_EXIT: 739 739 case CPU_PM_ENTER_FAILED: 740 - /* Restore and enable the counter */ 741 - armpmu_start(event, PERF_EF_RELOAD); 740 + /* 741 + * Restore and enable the counter. 742 + * armpmu_start() indirectly calls 743 + * 744 + * perf_event_update_userpage() 745 + * 746 + * that requires RCU read locking to be functional, 747 + * wrap the call within RCU_NONIDLE to make the 748 + * RCU subsystem aware this cpu is not idle from 749 + * an RCU perspective for the armpmu_start() call 750 + * duration. 751 + */ 752 + RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); 742 753 break; 743 754 default: 744 755 break;
+5 -2
drivers/phy/phy-rockchip-dp.c
··· 86 86 if (!np) 87 87 return -ENODEV; 88 88 89 + if (!dev->parent || !dev->parent->of_node) 90 + return -ENODEV; 91 + 89 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 90 93 if (IS_ERR(dp)) 91 94 return -ENOMEM; ··· 107 104 return ret; 108 105 } 109 106 110 - dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 107 + dp->grf = syscon_node_to_regmap(dev->parent->of_node); 111 108 if (IS_ERR(dp->grf)) { 112 - dev_err(dev, "rk3288-dp needs rockchip,grf property\n"); 109 + dev_err(dev, "rk3288-dp needs the General Register Files syscon\n"); 113 110 return PTR_ERR(dp->grf); 114 111 } 115 112
+4 -1
drivers/phy/phy-rockchip-emmc.c
··· 176 176 struct regmap *grf; 177 177 unsigned int reg_offset; 178 178 179 - grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 179 + if (!dev->parent || !dev->parent->of_node) 180 + return -ENODEV; 181 + 182 + grf = syscon_node_to_regmap(dev->parent->of_node); 180 183 if (IS_ERR(grf)) { 181 184 dev_err(dev, "Missing rockchip,grf property\n"); 182 185 return PTR_ERR(grf);
+1
drivers/pinctrl/freescale/Kconfig
··· 2 2 bool 3 3 select PINMUX 4 4 select PINCONF 5 + select REGMAP 5 6 6 7 config PINCTRL_IMX1_CORE 7 8 bool
+5 -4
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
··· 1004 1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 1005 1005 int eint_num, virq, eint_offset; 1006 1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; 1007 - static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256}; 1007 + static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000, 1008 + 128000, 256000}; 1008 1009 const struct mtk_desc_pin *pin; 1009 1010 struct irq_data *d; 1010 1011 ··· 1023 1022 if (!mtk_eint_can_en_debounce(pctl, eint_num)) 1024 1023 return -ENOSYS; 1025 1024 1026 - dbnc = ARRAY_SIZE(dbnc_arr); 1027 - for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) { 1028 - if (debounce <= dbnc_arr[i]) { 1025 + dbnc = ARRAY_SIZE(debounce_time); 1026 + for (i = 0; i < ARRAY_SIZE(debounce_time); i++) { 1027 + if (debounce <= debounce_time[i]) { 1029 1028 dbnc = i; 1030 1029 break; 1031 1030 }
+3 -3
drivers/pinctrl/pinctrl-single.c
··· 1280 1280 1281 1281 /* Parse pins in each row from LSB */ 1282 1282 while (mask) { 1283 - bit_pos = ffs(mask); 1283 + bit_pos = __ffs(mask); 1284 1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin; 1285 - mask_pos = ((pcs->fmask) << (bit_pos - 1)); 1285 + mask_pos = ((pcs->fmask) << bit_pos); 1286 1286 val_pos = val & mask_pos; 1287 1287 submask = mask & mask_pos; 1288 1288 ··· 1852 1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask", 1853 1853 &pcs->fmask); 1854 1854 if (!ret) { 1855 - pcs->fshift = ffs(pcs->fmask) - 1; 1855 + pcs->fshift = __ffs(pcs->fmask); 1856 1856 pcs->fmax = pcs->fmask >> pcs->fshift; 1857 1857 } else { 1858 1858 /* If mask property doesn't exist, function mux is invalid. */
+5 -1
drivers/platform/x86/hp_accel.c
··· 127 127 arg0.integer.value = reg; 128 128 129 129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret); 130 + if (ACPI_FAILURE(status)) 131 + return -EINVAL; 130 132 *ret = lret; 131 - return (status != AE_OK) ? -EINVAL : 0; 133 + return 0; 132 134 } 133 135 134 136 /** ··· 175 173 DEFINE_CONV(normal, 1, 2, 3); 176 174 DEFINE_CONV(y_inverted, 1, -2, 3); 177 175 DEFINE_CONV(x_inverted, -1, 2, 3); 176 + DEFINE_CONV(x_inverted_usd, -1, 2, -3); 178 177 DEFINE_CONV(z_inverted, 1, 2, -3); 179 178 DEFINE_CONV(xy_swap, 2, 1, 3); 180 179 DEFINE_CONV(xy_rotated_left, -2, 1, 3); ··· 239 236 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 240 237 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 241 238 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), 239 + AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), 242 240 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 243 241 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 244 242 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+2
drivers/platform/x86/intel-hid.c
··· 91 91 } 92 92 93 93 static const struct dev_pm_ops intel_hid_pl_pm_ops = { 94 + .freeze = intel_hid_pl_suspend_handler, 95 + .restore = intel_hid_pl_resume_handler, 94 96 .suspend = intel_hid_pl_suspend_handler, 95 97 .resume = intel_hid_pl_resume_handler, 96 98 };
+22 -26
drivers/platform/x86/intel_pmc_ipc.c
··· 687 687 ipcdev.acpi_io_size = size; 688 688 dev_info(&pdev->dev, "io res: %pR\n", res); 689 689 690 - /* This is index 0 to cover BIOS data register */ 691 690 punit_res = punit_res_array; 691 + /* This is index 0 to cover BIOS data register */ 692 692 res = platform_get_resource(pdev, IORESOURCE_MEM, 693 693 PLAT_RESOURCE_BIOS_DATA_INDEX); 694 694 if (!res) { ··· 698 698 *punit_res = *res; 699 699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 700 700 701 + /* This is index 1 to cover BIOS interface register */ 701 702 res = platform_get_resource(pdev, IORESOURCE_MEM, 702 703 PLAT_RESOURCE_BIOS_IFACE_INDEX); 703 704 if (!res) { 704 705 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 705 706 return -ENXIO; 706 707 } 707 - /* This is index 1 to cover BIOS interface register */ 708 708 *++punit_res = *res; 709 709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 710 710 711 + /* This is index 2 to cover ISP data register, optional */ 711 712 res = platform_get_resource(pdev, IORESOURCE_MEM, 712 713 PLAT_RESOURCE_ISP_DATA_INDEX); 713 - if (!res) { 714 - dev_err(&pdev->dev, "Failed to get res of punit ISP data\n"); 715 - return -ENXIO; 714 + ++punit_res; 715 + if (res) { 716 + *punit_res = *res; 717 + dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); 716 718 } 717 - /* This is index 2 to cover ISP data register */ 718 - *++punit_res = *res; 719 - dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); 720 719 720 + /* This is index 3 to cover ISP interface register, optional */ 721 721 res = platform_get_resource(pdev, IORESOURCE_MEM, 722 722 PLAT_RESOURCE_ISP_IFACE_INDEX); 723 - if (!res) { 724 - dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n"); 725 - return -ENXIO; 723 + ++punit_res; 724 + if (res) { 725 + *punit_res = *res; 726 + dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); 726 727 } 727 - /* This is index 3 to cover ISP interface register */ 728 - *++punit_res = *res; 729 - dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); 730 728 729 + /* This is index 4 to cover GTD data register, optional */ 731 730 res = platform_get_resource(pdev, IORESOURCE_MEM, 732 731 PLAT_RESOURCE_GTD_DATA_INDEX); 733 - if (!res) { 734 - dev_err(&pdev->dev, "Failed to get res of punit GTD data\n"); 735 - return -ENXIO; 732 + ++punit_res; 733 + if (res) { 734 + *punit_res = *res; 735 + dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); 736 736 } 737 - /* This is index 4 to cover GTD data register */ 738 - *++punit_res = *res; 739 - dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); 740 737 738 + /* This is index 5 to cover GTD interface register, optional */ 741 739 res = platform_get_resource(pdev, IORESOURCE_MEM, 742 740 PLAT_RESOURCE_GTD_IFACE_INDEX); 743 - if (!res) { 744 - dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n"); 745 - return -ENXIO; 741 + ++punit_res; 742 + if (res) { 743 + *punit_res = *res; 744 + dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); 746 745 } 747 - /* This is index 5 to cover GTD interface register */ 748 - *++punit_res = *res; 749 - dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); 750 746 751 747 res = platform_get_resource(pdev, IORESOURCE_MEM, 752 748 PLAT_RESOURCE_IPC_INDEX);
+32 -16
drivers/platform/x86/intel_punit_ipc.c
··· 227 227 struct resource *res; 228 228 void __iomem *addr; 229 229 230 + /* 231 + * The following resources are required 232 + * - BIOS_IPC BASE_DATA 233 + * - BIOS_IPC BASE_IFACE 234 + */ 230 235 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 231 236 addr = devm_ioremap_resource(&pdev->dev, res); 232 237 if (IS_ERR(addr)) ··· 244 239 return PTR_ERR(addr); 245 240 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr; 246 241 242 + /* 243 + * The following resources are optional 244 + * - ISPDRIVER_IPC BASE_DATA 245 + * - ISPDRIVER_IPC BASE_IFACE 246 + * - GTDRIVER_IPC BASE_DATA 247 + * - GTDRIVER_IPC BASE_IFACE 248 + */ 247 249 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 248 - addr = devm_ioremap_resource(&pdev->dev, res); 249 - if (IS_ERR(addr)) 250 - return PTR_ERR(addr); 251 - punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 250 + if (res) { 251 + addr = devm_ioremap_resource(&pdev->dev, res); 252 + if (!IS_ERR(addr)) 253 + punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 254 + } 252 255 253 256 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 254 - addr = devm_ioremap_resource(&pdev->dev, res); 255 - if (IS_ERR(addr)) 256 - return PTR_ERR(addr); 257 - punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 257 + if (res) { 258 + addr = devm_ioremap_resource(&pdev->dev, res); 259 + if (!IS_ERR(addr)) 260 + punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 261 + } 258 262 259 263 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 260 - addr = devm_ioremap_resource(&pdev->dev, res); 261 - if (IS_ERR(addr)) 262 - return PTR_ERR(addr); 263 - punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 264 + if (res) { 265 + addr = devm_ioremap_resource(&pdev->dev, res); 266 + if (!IS_ERR(addr)) 267 + punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 268 + } 264 269 265 270 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 266 - addr = devm_ioremap_resource(&pdev->dev, res); 267 - if (IS_ERR(addr)) 268 - return PTR_ERR(addr); 269 - punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 271 + if (res) { 272 + addr = devm_ioremap_resource(&pdev->dev, res); 273 + if (!IS_ERR(addr)) 274 + punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 275 + } 270 276 271 277 return 0; 272 278 }
+1 -1
drivers/platform/x86/intel_telemetry_pltdrv.c
··· 659 659 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) 660 660 { 661 661 u32 telem_ctrl = 0; 662 - int ret; 662 + int ret = 0; 663 663 664 664 mutex_lock(&(telm_conf->telem_lock)); 665 665 if (ioss_period) {
+3 -1
drivers/platform/x86/thinkpad_acpi.c
··· 7972 7972 fan_update_desired_level(s); 7973 7973 mutex_unlock(&fan_mutex); 7974 7974 7975 + if (rc) 7976 + return rc; 7975 7977 if (status) 7976 7978 *status = s; 7977 7979 7978 - return rc; 7980 + return 0; 7979 7981 } 7980 7982 7981 7983 static int fan_get_speed(unsigned int *speed)
+1 -1
drivers/platform/x86/toshiba_acpi.c
··· 135 135 /* Field definitions */ 136 136 #define HCI_ACCEL_MASK 0x7fff 137 137 #define HCI_HOTKEY_DISABLE 0x0b 138 - #define HCI_HOTKEY_ENABLE 0x01 138 + #define HCI_HOTKEY_ENABLE 0x09 139 139 #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 140 140 #define HCI_LCD_BRIGHTNESS_BITS 3 141 141 #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
+2 -2
drivers/rapidio/devices/rio_mport_cdev.c
··· 2669 2669 2670 2670 /* Create device class needed by udev */ 2671 2671 dev_class = class_create(THIS_MODULE, DRV_NAME); 2672 - if (!dev_class) { 2672 + if (IS_ERR(dev_class)) { 2673 2673 rmcd_error("Unable to create " DRV_NAME " class"); 2674 - return -EINVAL; 2674 + return PTR_ERR(dev_class); 2675 2675 } 2676 2676 2677 2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
+3 -3
drivers/rtc/rtc-ds1307.c
··· 863 863 * A user-initiated temperature conversion is not started by this function, 864 864 * so the temperature is updated once every 64 seconds. 865 865 */ 866 - static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC) 866 + static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC) 867 867 { 868 868 struct ds1307 *ds1307 = dev_get_drvdata(dev); 869 869 u8 temp_buf[2]; ··· 892 892 struct device_attribute *attr, char *buf) 893 893 { 894 894 int ret; 895 - s16 temp; 895 + s32 temp; 896 896 897 897 ret = ds3231_hwmon_read_temp(dev, &temp); 898 898 if (ret) ··· 1531 1531 return PTR_ERR(ds1307->rtc); 1532 1532 } 1533 1533 1534 - if (ds1307_can_wakeup_device) { 1534 + if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) { 1535 1535 /* Disable request for an IRQ */ 1536 1536 want_irq = false; 1537 1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
+3 -2
drivers/s390/block/dcssblk.c
··· 756 756 blk_cleanup_queue(dev_info->dcssblk_queue); 757 757 dev_info->gd->queue = NULL; 758 758 put_disk(dev_info->gd); 759 - device_unregister(&dev_info->dev); 760 759 761 760 /* unload all related segments */ 762 761 list_for_each_entry(entry, &dev_info->seg_list, lh) 763 762 segment_unload(entry->segment_name); 764 763 765 - put_device(&dev_info->dev); 766 764 up_write(&dcssblk_devices_sem); 765 + 766 + device_unregister(&dev_info->dev); 767 + put_device(&dev_info->dev); 767 768 768 769 rc = count; 769 770 out_buf:
+1 -1
drivers/s390/block/scm_blk.c
··· 303 303 if (req->cmd_type != REQ_TYPE_FS) { 304 304 blk_start_request(req); 305 305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 306 - blk_end_request_all(req, -EIO); 306 + __blk_end_request_all(req, -EIO); 307 307 continue; 308 308 } 309 309
+7 -5
drivers/s390/char/sclp_ctl.c
··· 56 56 { 57 57 struct sclp_ctl_sccb ctl_sccb; 58 58 struct sccb_header *sccb; 59 + unsigned long copied; 59 60 int rc; 60 61 61 62 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) ··· 66 65 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 67 66 if (!sccb) 68 67 return -ENOMEM; 69 - if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { 68 + copied = PAGE_SIZE - 69 + copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE); 70 + if (offsetof(struct sccb_header, length) + 71 + sizeof(sccb->length) > copied || sccb->length > copied) { 70 72 rc = -EFAULT; 71 73 goto out_free; 72 74 } 73 - if (sccb->length > PAGE_SIZE || sccb->length < 8) 74 - return -EINVAL; 75 - if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) { 76 - rc = -EFAULT; 75 + if (sccb->length < 8) { 76 + rc = -EINVAL; 77 77 goto out_free; 78 78 } 79 79 rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
+1
drivers/scsi/cxgbi/libcxgbi.c
··· 688 688 { 689 689 struct flowi6 fl; 690 690 691 + memset(&fl, 0, sizeof(fl)); 691 692 if (saddr) 692 693 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 693 694 if (daddr)
+6 -5
drivers/soc/mediatek/mtk-scpsys.c
··· 491 491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup; 492 492 493 493 /* 494 - * With CONFIG_PM disabled turn on all domains to make the 495 - * hardware usable. 494 + * Initially turn on all domains to make the domains usable 495 + * with !CONFIG_PM and to get the hardware in sync with the 496 + * software. The unused domains will be switched off during 497 + * late_init time. 496 498 */ 497 - if (!IS_ENABLED(CONFIG_PM)) 498 - genpd->power_on(genpd); 499 + genpd->power_on(genpd); 499 500 500 - pm_genpd_init(genpd, NULL, true); 501 + pm_genpd_init(genpd, NULL, false); 501 502 } 502 503 503 504 /*
+34 -20
drivers/staging/media/davinci_vpfe/vpfe_video.c
··· 172 172 static int vpfe_update_pipe_state(struct vpfe_video_device *video) 173 173 { 174 174 struct vpfe_pipeline *pipe = &video->pipe; 175 + int ret; 175 176 176 - if (vpfe_prepare_pipeline(video)) 177 - return vpfe_prepare_pipeline(video); 177 + ret = vpfe_prepare_pipeline(video); 178 + if (ret) 179 + return ret; 178 180 179 181 /* 180 182 * Find out if there is any input video ··· 184 182 */ 185 183 if (pipe->input_num == 0) { 186 184 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; 187 - if (vpfe_update_current_ext_subdev(video)) { 185 + ret = vpfe_update_current_ext_subdev(video); 186 + if (ret) { 188 187 pr_err("Invalid external subdev\n"); 189 - return vpfe_update_current_ext_subdev(video); 188 + return ret; 190 189 } 191 190 } else { 192 191 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; ··· 670 667 struct v4l2_subdev *subdev; 671 668 struct v4l2_format format; 672 669 struct media_pad *remote; 670 + int ret; 673 671 674 672 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); 675 673 ··· 699 695 sd_fmt.pad = remote->index; 700 696 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 701 697 /* get output format of remote subdev */ 702 - if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { 698 + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 699 + if (ret) { 703 700 v4l2_err(&vpfe_dev->v4l2_dev, 704 701 "invalid remote subdev for video node\n"); 705 - return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 702 + return ret; 706 703 } 707 704 /* convert to pix format */ 708 705 mbus.code = sd_fmt.format.code; ··· 730 725 struct vpfe_video_device *video = video_drvdata(file); 731 726 struct vpfe_device *vpfe_dev = video->vpfe_dev; 732 727 struct v4l2_format format; 728 + int ret; 733 729 734 730 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); 735 731 /* If streaming is started, return error */ ··· 739 733 return -EBUSY; 740 734 } 741 735 /* get adjacent subdev's output pad format */ 742 - if (__vpfe_video_get_format(video, &format)) 743 - return __vpfe_video_get_format(video, &format); 736 + ret = __vpfe_video_get_format(video, &format); 737 + if (ret) 738 + return ret; 744 739 *fmt = format; 745 740 video->fmt = *fmt; 746 741 return 0; ··· 764 757 struct vpfe_video_device *video = video_drvdata(file); 765 758 struct vpfe_device *vpfe_dev = video->vpfe_dev; 766 759 struct v4l2_format format; 760 + int ret; 767 761 768 762 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); 769 763 /* get adjacent subdev's output pad format */ 770 - if (__vpfe_video_get_format(video, &format)) 771 - return __vpfe_video_get_format(video, &format); 764 + ret = __vpfe_video_get_format(video, &format); 765 + if (ret) 766 + return ret; 772 767 773 768 *fmt = format; 774 769 return 0; ··· 847 838 848 839 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); 849 840 850 - if (mutex_lock_interruptible(&video->lock)) 851 - return mutex_lock_interruptible(&video->lock); 841 + ret = mutex_lock_interruptible(&video->lock); 842 + if (ret) 843 + return ret; 852 844 /* 853 845 * If streaming is started return device busy 854 846 * error ··· 950 940 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); 951 941 952 942 /* Call decoder driver function to set the standard */ 953 - if (mutex_lock_interruptible(&video->lock)) 954 - return mutex_lock_interruptible(&video->lock); 943 + ret = mutex_lock_interruptible(&video->lock); 944 + if (ret) 945 + return ret; 955 946 sdinfo = video->current_ext_subdev; 956 947 /* If streaming is started, return device busy error */ 957 948 if (video->started) { ··· 1338 1327 return -EINVAL; 1339 1328 } 1340 1329 1341 - if (mutex_lock_interruptible(&video->lock)) 1342 - return mutex_lock_interruptible(&video->lock); 1330 + ret = mutex_lock_interruptible(&video->lock); 1331 + if (ret) 1332 + return ret; 1343 1333 1344 1334 if (video->io_usrs != 0) { 1345 1335 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); ··· 1366 1354 q->buf_struct_size = sizeof(struct vpfe_cap_buffer); 1367 1355 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1368 1356 1369 - if (vb2_queue_init(q)) { 1357 + ret = vb2_queue_init(q); 1358 + if (ret) { 1370 1359 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); 1371 1360 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); 1372 - return vb2_queue_init(q); 1361 + return ret; 1373 1362 } 1374 1363 1375 1364 fh->io_allowed = 1; ··· 1546 1533 return -EINVAL; 1547 1534 } 1548 1535 1549 - if (mutex_lock_interruptible(&video->lock)) 1550 - return mutex_lock_interruptible(&video->lock); 1536 + ret = mutex_lock_interruptible(&video->lock); 1537 + if (ret) 1538 + return ret; 1551 1539 1552 1540 vpfe_stop_capture(video); 1553 1541 ret = vb2_streamoff(&video->buffer_queue, buf_type);
+1 -1
drivers/staging/rdma/hfi1/TODO
··· 3 3 - Remove unneeded file entries in sysfs 4 4 - Remove software processing of IB protocol and place in library for use 5 5 by qib, ipath (if still present), hfi1, and eventually soft-roce 6 - 6 + - Replace incorrect uAPI
+35 -56
drivers/staging/rdma/hfi1/file_ops.c
··· 49 49 #include <linux/vmalloc.h> 50 50 #include <linux/io.h> 51 51 52 + #include <rdma/ib.h> 53 + 52 54 #include "hfi.h" 53 55 #include "pio.h" 54 56 #include "device.h" ··· 191 189 __u64 user_val = 0; 192 190 int uctxt_required = 1; 193 191 int must_be_root = 0; 192 + 193 + /* FIXME: This interface cannot continue out of staging */ 194 + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) 195 + return -EACCES; 194 196 195 197 if (count < sizeof(cmd)) { 196 198 ret = -EINVAL; ··· 797 791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 798 792 799 793 dd->rcd[uctxt->ctxt] = NULL; 794 + 795 + hfi1_user_exp_rcv_free(fdata); 796 + hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); 797 + 800 798 uctxt->rcvwait_to = 0; 801 799 uctxt->piowait_to = 0; 802 800 uctxt->rcvnowait = 0; 803 801 uctxt->pionowait = 0; 804 802 uctxt->event_flags = 0; 805 - 806 - hfi1_user_exp_rcv_free(fdata); 807 - hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); 808 803 809 804 hfi1_stats.sps_ctxts--; 810 805 if (++dd->freectxts == dd->num_user_contexts) ··· 1134 1127 1135 1128 static int user_init(struct file *fp) 1136 1129 { 1137 - int ret; 1138 1130 unsigned int rcvctrl_ops = 0; 1139 1131 struct hfi1_filedata *fd = fp->private_data; 1140 1132 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1141 1133 1142 1134 /* make sure that the context has already been setup */ 1143 - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { 1144 - ret = -EFAULT; 1145 - goto done; 1146 - } 1147 - 1148 - /* 1149 - * Subctxts don't need to initialize anything since master 1150 - * has done it. 1151 - */ 1152 - if (fd->subctxt) { 1153 - ret = wait_event_interruptible(uctxt->wait, !test_bit( 1154 - HFI1_CTXT_MASTER_UNINIT, 1155 - &uctxt->event_flags)); 1156 - goto expected; 1157 - } 1135 + if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) 1136 + return -EFAULT; 1158 1137 1159 1138 /* initialize poll variables... */ 1160 1139 uctxt->urgent = 0; ··· 1195 1202 wake_up(&uctxt->wait); 1196 1203 } 1197 1204 1198 - expected: 1199 - /* 1200 - * Expected receive has to be setup for all processes (including 1201 - * shared contexts). However, it has to be done after the master 1202 - * context has been fully configured as it depends on the 1203 - * eager/expected split of the RcvArray entries. 1204 - * Setting it up here ensures that the subcontexts will be waiting 1205 - * (due to the above wait_event_interruptible() until the master 1206 - * is setup. 1207 - */ 1208 - ret = hfi1_user_exp_rcv_init(fp); 1209 - done: 1210 - return ret; 1205 + return 0; 1211 1206 } 1212 1207 1213 1208 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) ··· 1242 1261 int ret = 0; 1243 1262 1244 1263 /* 1245 - * Context should be set up only once (including allocation and 1264 + * Context should be set up only once, including allocation and 1246 1265 * programming of eager buffers. This is done if context sharing 1247 1266 * is not requested or by the master process. 1248 1267 */ ··· 1263 1282 if (ret) 1264 1283 goto done; 1265 1284 } 1285 + } else { 1286 + ret = wait_event_interruptible(uctxt->wait, !test_bit( 1287 + HFI1_CTXT_MASTER_UNINIT, 1288 + &uctxt->event_flags)); 1289 + if (ret) 1290 + goto done; 1266 1291 } 1292 + 1267 1293 ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1294 + if (ret) 1295 + goto done; 1296 + /* 1297 + * Expected receive has to be setup for all processes (including 1298 + * shared contexts). However, it has to be done after the master 1299 + * context has been fully configured as it depends on the 1300 + * eager/expected split of the RcvArray entries. 1301 + * Setting it up here ensures that the subcontexts will be waiting 1302 + * (due to the above wait_event_interruptible() until the master 1303 + * is setup. 1304 + */ 1305 + ret = hfi1_user_exp_rcv_init(fp); 1268 1306 if (ret) 1269 1307 goto done; 1270 1308 ··· 1565 1565 { 1566 1566 struct hfi1_devdata *dd = filp->private_data; 1567 1567 1568 - switch (whence) { 1569 - case SEEK_SET: 1570 - break; 1571 - case SEEK_CUR: 1572 - offset += filp->f_pos; 1573 - break; 1574 - case SEEK_END: 1575 - offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) - 1576 - offset; 1577 - break; 1578 - default: 1579 - return -EINVAL; 1580 - } 1581 - 1582 - if (offset < 0) 1583 - return -EINVAL; 1584 - 1585 - if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) 1586 - return -EINVAL; 1587 - 1588 - filp->f_pos = offset; 1589 - 1590 - return filp->f_pos; 1568 + return fixed_size_llseek(filp, offset, whence, 1569 + (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); 1591 1570 } 1592 1571 1593 1572 /* NOTE: assumes unsigned long is 8 bytes */
+25 -15
drivers/staging/rdma/hfi1/mmu_rb.c
··· 71 71 struct mm_struct *, 72 72 unsigned long, unsigned long); 73 73 static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 74 + struct mm_struct *, 74 75 unsigned long, unsigned long); 75 76 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 76 77 unsigned long, unsigned long); ··· 138 137 rbnode = rb_entry(node, struct mmu_rb_node, node); 139 138 rb_erase(node, root); 140 139 if (handler->ops->remove) 141 - handler->ops->remove(root, rbnode, false); 140 + handler->ops->remove(root, rbnode, NULL); 142 141 } 143 142 } 144 143 ··· 177 176 return ret; 178 177 } 179 178 180 - /* Caller must host handler lock */ 179 + /* Caller must hold handler lock */ 181 180 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 182 181 unsigned long addr, 183 182 unsigned long len) ··· 201 200 return node; 202 201 } 203 202 203 + /* Caller must *not* hold handler lock. */ 204 204 static void __mmu_rb_remove(struct mmu_rb_handler *handler, 205 - struct mmu_rb_node *node, bool arg) 205 + struct mmu_rb_node *node, struct mm_struct *mm) 206 206 { 207 + unsigned long flags; 208 + 207 209 /* Validity of handler and node pointers has been checked by caller. */ 208 210 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 209 211 node->len); 212 + spin_lock_irqsave(&handler->lock, flags); 210 213 __mmu_int_rb_remove(node, handler->root); 214 + spin_unlock_irqrestore(&handler->lock, flags); 215 + 211 216 if (handler->ops->remove) 212 - handler->ops->remove(handler->root, node, arg); 217 + handler->ops->remove(handler->root, node, mm); 213 218 } 214 219 215 220 struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, ··· 238 231 void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 239 232 { 240 233 struct mmu_rb_handler *handler = find_mmu_handler(root); 241 - unsigned long flags; 242 234 243 235 if (!handler || !node) 244 236 return; 245 237 246 - spin_lock_irqsave(&handler->lock, flags); 247 - __mmu_rb_remove(handler, node, false); 248 - spin_unlock_irqrestore(&handler->lock, flags); 238 + __mmu_rb_remove(handler, node, NULL); 249 239 } 250 240 251 241 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) ··· 264 260 static inline void mmu_notifier_page(struct mmu_notifier *mn, 265 261 struct mm_struct *mm, unsigned long addr) 266 262 { 267 - mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); 263 + mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); 268 264 } 269 265 270 266 static inline void mmu_notifier_range_start(struct mmu_notifier *mn, ··· 272 268 unsigned long start, 273 269 unsigned long end) 274 270 { 275 - mmu_notifier_mem_invalidate(mn, start, end); 271 + mmu_notifier_mem_invalidate(mn, mm, start, end); 276 272 } 277 273 278 274 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 275 + struct mm_struct *mm, 279 276 unsigned long start, unsigned long end) 280 277 { 281 278 struct mmu_rb_handler *handler = 282 279 container_of(mn, struct mmu_rb_handler, mn); 283 280 struct rb_root *root = handler->root; 284 - struct mmu_rb_node *node; 281 + struct mmu_rb_node *node, *ptr = NULL; 285 282 unsigned long flags; 286 283 287 284 spin_lock_irqsave(&handler->lock, flags); 288 - for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; 289 - node = __mmu_int_rb_iter_next(node, start, end - 1)) { 285 + for (node = __mmu_int_rb_iter_first(root, start, end - 1); 286 + node; node = ptr) { 287 + /* Guard against node removal. */ 288 + ptr = __mmu_int_rb_iter_next(node, start, end - 1); 290 289 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 291 290 node->addr, node->len); 292 - if (handler->ops->invalidate(root, node)) 293 - __mmu_rb_remove(handler, node, true); 291 + if (handler->ops->invalidate(root, node)) { 292 + spin_unlock_irqrestore(&handler->lock, flags); 293 + __mmu_rb_remove(handler, node, mm); 294 + spin_lock_irqsave(&handler->lock, flags); 295 + } 294 296 } 295 297 spin_unlock_irqrestore(&handler->lock, flags); 296 298 }
+2 -1
drivers/staging/rdma/hfi1/mmu_rb.h
··· 59 59 struct mmu_rb_ops { 60 60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); 61 61 int (*insert)(struct rb_root *, struct mmu_rb_node *); 62 - void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); 62 + void (*remove)(struct rb_root *, struct mmu_rb_node *, 63 + struct mm_struct *); 63 64 int (*invalidate)(struct rb_root *, struct mmu_rb_node *); 64 65 }; 65 66
+2
drivers/staging/rdma/hfi1/qp.c
··· 519 519 * do the flush work until that QP's 520 520 * sdma work has finished. 521 521 */ 522 + spin_lock(&qp->s_lock); 522 523 if (qp->s_flags & RVT_S_WAIT_DMA) { 523 524 qp->s_flags &= ~RVT_S_WAIT_DMA; 524 525 hfi1_schedule_send(qp); 525 526 } 527 + spin_unlock(&qp->s_lock); 526 528 } 527 529 528 530 /**
+7 -4
drivers/staging/rdma/hfi1/user_exp_rcv.c
··· 87 87 static int set_rcvarray_entry(struct file *, unsigned long, u32, 88 88 struct tid_group *, struct page **, unsigned); 89 89 static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); 90 - static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 90 + static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, 91 + struct mm_struct *); 91 92 static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 92 93 static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 93 94 struct tid_pageset *, unsigned, u16, struct page **, ··· 255 254 struct hfi1_ctxtdata *uctxt = fd->uctxt; 256 255 struct tid_group *grp, *gptr; 257 256 257 + if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) 258 + return 0; 258 259 /* 259 260 * The notifier would have been removed when the process'es mm 260 261 * was freed. ··· 902 899 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 903 900 return -EBADF; 904 901 if (HFI1_CAP_IS_USET(TID_UNMAP)) 905 - mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); 902 + mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL); 906 903 else 907 904 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); 908 905 ··· 968 965 continue; 969 966 if (HFI1_CAP_IS_USET(TID_UNMAP)) 970 967 mmu_rb_remove(&fd->tid_rb_root, 971 - &node->mmu, false); 968 + &node->mmu, NULL); 972 969 else 973 970 hfi1_mmu_rb_remove(&fd->tid_rb_root, 974 971 &node->mmu); ··· 1035 1032 } 1036 1033 1037 1034 static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, 1038 - bool notifier) 1035 + struct mm_struct *mm) 1039 1036 { 1040 1037 struct hfi1_filedata *fdata = 1041 1038 container_of(root, struct hfi1_filedata, tid_rb_root);
+22 -11
drivers/staging/rdma/hfi1/user_sdma.c
··· 278 278 static void user_sdma_free_request(struct user_sdma_request *, bool); 279 279 static int pin_vector_pages(struct user_sdma_request *, 280 280 struct user_sdma_iovec *); 281 - static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); 281 + static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned, 282 + unsigned); 282 283 static int check_header_template(struct user_sdma_request *, 283 284 struct hfi1_pkt_header *, u32, u32); 284 285 static int set_txreq_header(struct user_sdma_request *, ··· 300 299 static void activate_packet_queue(struct iowait *, int); 301 300 static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 302 301 static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); 303 - static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 302 + static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, 303 + struct mm_struct *); 304 304 static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 305 305 306 306 static struct mmu_rb_ops sdma_rb_ops = { ··· 1065 1063 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, 1066 1064 (unsigned long)iovec->iov.iov_base, 1067 1065 iovec->iov.iov_len); 1068 - if (rb_node) 1066 + if (rb_node && !IS_ERR(rb_node)) 1069 1067 node = container_of(rb_node, struct sdma_mmu_node, rb); 1068 + else 1069 + rb_node = NULL; 1070 1070 1071 1071 if (!node) { 1072 1072 node = kzalloc(sizeof(*node), GFP_KERNEL); ··· 1111 1107 goto bail; 1112 1108 } 1113 1109 if (pinned != npages) { 1114 - unpin_vector_pages(current->mm, pages, pinned); 1110 + unpin_vector_pages(current->mm, pages, node->npages, 1111 + pinned); 1115 1112 ret = -EFAULT; 1116 1113 goto bail; 1117 1114 } ··· 1152 1147 } 1153 1148 1154 1149 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1155 - unsigned npages) 1150 + unsigned start, unsigned npages) 1156 1151 { 1157 - hfi1_release_user_pages(mm, pages, npages, 0); 1152 + hfi1_release_user_pages(mm, pages + start, npages, 0); 1158 1153 kfree(pages); 1159 1154 } 1160 1155 ··· 1507 1502 &req->pq->sdma_rb_root, 1508 1503 (unsigned long)req->iovs[i].iov.iov_base, 1509 1504 req->iovs[i].iov.iov_len); 1510 - if (!mnode) 1505 + if (!mnode || IS_ERR(mnode)) 1511 1506 continue; 1512 1507 1513 1508 node = container_of(mnode, struct sdma_mmu_node, rb); ··· 1552 1547 } 1553 1548 1554 1549 static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, 1555 - bool notifier) 1550 + struct mm_struct *mm) 1556 1551 { 1557 1552 struct sdma_mmu_node *node = 1558 1553 container_of(mnode, struct sdma_mmu_node, rb); ··· 1562 1557 node->pq->n_locked -= node->npages; 1563 1558 spin_unlock(&node->pq->evict_lock); 1564 1559 1565 - unpin_vector_pages(notifier ? NULL : current->mm, node->pages, 1560 + /* 1561 + * If mm is set, we are being called by the MMU notifier and we 1562 + * should not pass a mm_struct to unpin_vector_page(). This is to 1563 + * prevent a deadlock when hfi1_release_user_pages() attempts to 1564 + * take the mmap_sem, which the MMU notifier has already taken. 1565 + */ 1566 + unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0, 1566 1567 node->npages); 1567 1568 /* 1568 1569 * If called by the MMU notifier, we have to adjust the pinned 1569 1570 * page count ourselves. 1570 1571 */ 1571 - if (notifier) 1572 - current->mm->pinned_vm -= node->npages; 1572 + if (mm) 1573 + mm->pinned_vm -= node->npages; 1573 1574 kfree(node); 1574 1575 } 1575 1576
+2
drivers/thermal/Kconfig
··· 376 376 tristate "Temperature sensor driver for mediatek SoCs" 377 377 depends on ARCH_MEDIATEK || COMPILE_TEST 378 378 depends on HAS_IOMEM 379 + depends on NVMEM || NVMEM=n 380 + depends on RESET_CONTROLLER 379 381 default y 380 382 help 381 383 Enable this option if you want to have support for thermal management
+2 -2
drivers/thermal/hisi_thermal.c
··· 68 68 * Every step equals (1 * 200) / 255 celsius, and finally 69 69 * need convert to millicelsius. 70 70 */ 71 - return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 71 + return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255)); 72 72 } 73 73 74 74 static inline long _temp_to_step(long temp) 75 75 { 76 - return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 76 + return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000; 77 77 } 78 78 79 79 static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
+1 -2
drivers/thermal/mtk_thermal.c
··· 27 27 #include <linux/thermal.h> 28 28 #include <linux/reset.h> 29 29 #include <linux/types.h> 30 - #include <linux/nvmem-consumer.h> 31 30 32 31 /* AUXADC Registers */ 33 32 #define AUXADC_CON0_V 0x000 ··· 618 619 619 620 module_platform_driver(mtk_thermal_driver); 620 621 621 - MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 622 + MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 622 623 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); 623 624 MODULE_DESCRIPTION("Mediatek thermal driver"); 624 625 MODULE_LICENSE("GPL v2");
+2 -2
drivers/thermal/of-thermal.c
··· 803 803 * otherwise, it returns a corresponding ERR_PTR(). Caller must 804 804 * check the return value with help of IS_ERR() helper. 805 805 */ 806 - static struct __thermal_zone * 807 - thermal_of_build_thermal_zone(struct device_node *np) 806 + static struct __thermal_zone 807 + __init *thermal_of_build_thermal_zone(struct device_node *np) 808 808 { 809 809 struct device_node *child = NULL, *gchild; 810 810 struct __thermal_zone *tz;
+1 -1
drivers/thermal/power_allocator.c
··· 301 301 capped_extra_power = 0; 302 302 extra_power = 0; 303 303 for (i = 0; i < num_actors; i++) { 304 - u64 req_range = req_power[i] * power_range; 304 + u64 req_range = (u64)req_power[i] * power_range; 305 305 306 306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 307 307 total_req_power);
+5 -5
drivers/thermal/thermal_core.c
··· 688 688 { 689 689 struct thermal_zone_device *tz = to_thermal_zone(dev); 690 690 int trip, ret; 691 - unsigned long temperature; 691 + int temperature; 692 692 693 693 if (!tz->ops->set_trip_temp) 694 694 return -EPERM; ··· 696 696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) 697 697 return -EINVAL; 698 698 699 - if (kstrtoul(buf, 10, &temperature)) 699 + if (kstrtoint(buf, 10, &temperature)) 700 700 return -EINVAL; 701 701 702 702 ret = tz->ops->set_trip_temp(tz, trip, temperature); ··· 899 899 { 900 900 struct thermal_zone_device *tz = to_thermal_zone(dev); 901 901 int ret = 0; 902 - unsigned long temperature; 902 + int temperature; 903 903 904 - if (kstrtoul(buf, 10, &temperature)) 904 + if (kstrtoint(buf, 10, &temperature)) 905 905 return -EINVAL; 906 906 907 907 if (!tz->ops->set_emul_temp) { ··· 959 959 struct thermal_zone_device *tz = to_thermal_zone(dev); \ 960 960 \ 961 961 if (tz->tzp) \ 962 - return sprintf(buf, "%u\n", tz->tzp->name); \ 962 + return sprintf(buf, "%d\n", tz->tzp->name); \ 963 963 else \ 964 964 return -EIO; \ 965 965 } \
+37 -42
drivers/tty/pty.c
··· 626 626 */ 627 627 628 628 static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, 629 - struct inode *ptm_inode, int idx) 629 + struct file *file, int idx) 630 630 { 631 631 /* Master must be open via /dev/ptmx */ 632 632 return ERR_PTR(-EIO); ··· 642 642 */ 643 643 644 644 static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, 645 - struct inode *pts_inode, int idx) 645 + struct file *file, int idx) 646 646 { 647 647 struct tty_struct *tty; 648 648 649 649 mutex_lock(&devpts_mutex); 650 - tty = devpts_get_priv(pts_inode); 650 + tty = devpts_get_priv(file->f_path.dentry); 651 651 mutex_unlock(&devpts_mutex); 652 652 /* Master must be open before slave */ 653 653 if (!tty) ··· 663 663 /* this is called once with whichever end is closed last */ 664 664 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 665 665 { 666 - struct inode *ptmx_inode; 666 + struct pts_fs_info *fsi; 667 667 668 668 if (tty->driver->subtype == PTY_TYPE_MASTER) 669 - ptmx_inode = tty->driver_data; 669 + fsi = tty->driver_data; 670 670 else 671 - ptmx_inode = tty->link->driver_data; 672 - devpts_kill_index(ptmx_inode, tty->index); 673 - devpts_del_ref(ptmx_inode); 671 + fsi = tty->link->driver_data; 672 + devpts_kill_index(fsi, tty->index); 673 + devpts_put_ref(fsi); 674 674 } 675 675 676 676 static const struct tty_operations ptm_unix98_ops = { ··· 720 720 721 721 static int ptmx_open(struct inode *inode, struct file *filp) 722 722 { 723 + struct pts_fs_info *fsi; 723 724 struct tty_struct *tty; 724 - struct inode *slave_inode; 725 + struct dentry *dentry; 725 726 int retval; 726 727 int index; 727 728 ··· 735 734 if (retval) 736 735 return retval; 737 736 737 + fsi = devpts_get_ref(inode, filp); 738 + retval = -ENODEV; 739 + if (!fsi) 740 + goto out_free_file; 741 + 738 742 /* find a device that is not in use. */ 739 743 mutex_lock(&devpts_mutex); 740 - index = devpts_new_index(inode); 741 - if (index < 0) { 742 - retval = index; 743 - mutex_unlock(&devpts_mutex); 744 - goto err_file; 745 - } 746 - 744 + index = devpts_new_index(fsi); 747 745 mutex_unlock(&devpts_mutex); 746 + 747 + retval = index; 748 + if (index < 0) 749 + goto out_put_ref; 750 + 748 751 749 752 mutex_lock(&tty_mutex); 750 753 tty = tty_init_dev(ptm_driver, index); 751 - 752 - if (IS_ERR(tty)) { 753 - retval = PTR_ERR(tty); 754 - goto out; 755 - } 756 - 757 754 /* The tty returned here is locked so we can safely 758 755 drop the mutex */ 759 756 mutex_unlock(&tty_mutex); 760 757 761 - set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 762 - tty->driver_data = inode; 758 + retval = PTR_ERR(tty); 759 + if (IS_ERR(tty)) 760 + goto out; 763 761 764 762 /* 765 - * In the case where all references to ptmx inode are dropped and we 766 - * still have /dev/tty opened pointing to the master/slave pair (ptmx 767 - * is closed/released before /dev/tty), we must make sure that the inode 768 - * is still valid when we call the final pty_unix98_shutdown, thus we 769 - * hold an additional reference to the ptmx inode. For the same /dev/tty 770 - * last close case, we also need to make sure the super_block isn't 771 - * destroyed (devpts instance unmounted), before /dev/tty is closed and 772 - * on its release devpts_kill_index is called. 763 + * From here on out, the tty is "live", and the index and 764 + * fsi will be killed/put by the tty_release() 773 765 */ 774 - devpts_add_ref(inode); 766 + set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 767 + tty->driver_data = fsi; 775 768 776 769 tty_add_file(tty, filp); 777 770 778 - slave_inode = devpts_pty_new(inode, 779 - MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, 780 - tty->link); 781 - if (IS_ERR(slave_inode)) { 782 - retval = PTR_ERR(slave_inode); 771 + dentry = devpts_pty_new(fsi, index, tty->link); 772 + if (IS_ERR(dentry)) { 773 + retval = PTR_ERR(dentry); 783 774 goto err_release; 784 775 } 785 - tty->link->driver_data = slave_inode; 776 + tty->link->driver_data = dentry; 786 777 787 778 retval = ptm_driver->ops->open(tty, filp); 788 779 if (retval) ··· 786 793 return 0; 787 794 err_release: 788 795 tty_unlock(tty); 796 + // This will also put-ref the fsi 789 797 tty_release(inode, filp); 790 798 return retval; 791 799 out: 792 - mutex_unlock(&tty_mutex); 793 - devpts_kill_index(inode, index); 794 - err_file: 800 + devpts_kill_index(fsi, index); 801 + out_put_ref: 802 + devpts_put_ref(fsi); 803 + out_free_file: 795 804 tty_free_file(filp); 796 805 return retval; 797 806 }
+10 -1
drivers/tty/serial/8250/8250_port.c
··· 1403 1403 /* 1404 1404 * Empty the RX FIFO, we are not interested in anything 1405 1405 * received during the half-duplex transmission. 1406 + * Enable previously disabled RX interrupts. 1406 1407 */ 1407 - if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) 1408 + if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { 1408 1409 serial8250_clear_fifos(p); 1410 + 1411 + serial8250_rpm_get(p); 1412 + 1413 + p->ier |= UART_IER_RLSI | UART_IER_RDI; 1414 + serial_port_out(&p->port, UART_IER, p->ier); 1415 + 1416 + serial8250_rpm_put(p); 1417 + } 1409 1418 } 1410 1419 1411 1420 static void serial8250_em485_handle_stop_tx(unsigned long arg)
-1
drivers/tty/serial/8250/Kconfig
··· 324 324 config SERIAL_8250_RT288X 325 325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" 326 326 depends on SERIAL_8250 327 - depends on MIPS || COMPILE_TEST 328 327 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 329 328 help 330 329 Selecting this option will add support for the alternate register
+4 -4
drivers/tty/serial/uartlite.c
··· 72 72 iowrite32be(val, addr); 73 73 } 74 74 75 - static const struct uartlite_reg_ops uartlite_be = { 75 + static struct uartlite_reg_ops uartlite_be = { 76 76 .in = uartlite_inbe32, 77 77 .out = uartlite_outbe32, 78 78 }; ··· 87 87 iowrite32(val, addr); 88 88 } 89 89 90 - static const struct uartlite_reg_ops uartlite_le = { 90 + static struct uartlite_reg_ops uartlite_le = { 91 91 .in = uartlite_inle32, 92 92 .out = uartlite_outle32, 93 93 }; 94 94 95 95 static inline u32 uart_in32(u32 offset, struct uart_port *port) 96 96 { 97 - const struct uartlite_reg_ops *reg_ops = port->private_data; 97 + struct uartlite_reg_ops *reg_ops = port->private_data; 98 98 99 99 return reg_ops->in(port->membase + offset); 100 100 } 101 101 102 102 static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) 103 103 { 104 - const struct uartlite_reg_ops *reg_ops = port->private_data; 104 + struct uartlite_reg_ops *reg_ops = port->private_data; 105 105 106 106 reg_ops->out(val, port->membase + offset); 107 107 }
+3 -3
drivers/tty/tty_io.c
··· 1367 1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. 1368 1368 */ 1369 1369 static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, 1370 - struct inode *inode, int idx) 1370 + struct file *file, int idx) 1371 1371 { 1372 1372 struct tty_struct *tty; 1373 1373 1374 1374 if (driver->ops->lookup) 1375 - tty = driver->ops->lookup(driver, inode, idx); 1375 + tty = driver->ops->lookup(driver, file, idx); 1376 1376 else 1377 1377 tty = driver->ttys[idx]; 1378 1378 ··· 2040 2040 } 2041 2041 2042 2042 /* check whether we're reopening an existing tty */ 2043 - tty = tty_driver_lookup_tty(driver, inode, index); 2043 + tty = tty_driver_lookup_tty(driver, filp, index); 2044 2044 if (IS_ERR(tty)) { 2045 2045 mutex_unlock(&tty_mutex); 2046 2046 goto out;
+22 -1
drivers/usb/dwc3/core.c
··· 1150 1150 phy_exit(dwc->usb2_generic_phy); 1151 1151 phy_exit(dwc->usb3_generic_phy); 1152 1152 1153 + usb_phy_set_suspend(dwc->usb2_phy, 1); 1154 + usb_phy_set_suspend(dwc->usb3_phy, 1); 1155 + WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0); 1156 + WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0); 1157 + 1153 1158 pinctrl_pm_select_sleep_state(dev); 1154 1159 1155 1160 return 0; ··· 1168 1163 1169 1164 pinctrl_pm_select_default_state(dev); 1170 1165 1166 + usb_phy_set_suspend(dwc->usb2_phy, 0); 1167 + usb_phy_set_suspend(dwc->usb3_phy, 0); 1168 + ret = phy_power_on(dwc->usb2_generic_phy); 1169 + if (ret < 0) 1170 + return ret; 1171 + 1172 + ret = phy_power_on(dwc->usb3_generic_phy); 1173 + if (ret < 0) 1174 + goto err_usb2phy_power; 1175 + 1171 1176 usb_phy_init(dwc->usb3_phy); 1172 1177 usb_phy_init(dwc->usb2_phy); 1173 1178 ret = phy_init(dwc->usb2_generic_phy); 1174 1179 if (ret < 0) 1175 - return ret; 1180 + goto err_usb3phy_power; 1176 1181 1177 1182 ret = phy_init(dwc->usb3_generic_phy); 1178 1183 if (ret < 0) ··· 1214 1199 1215 1200 err_usb2phy_init: 1216 1201 phy_exit(dwc->usb2_generic_phy); 1202 + 1203 + err_usb3phy_power: 1204 + phy_power_off(dwc->usb3_generic_phy); 1205 + 1206 + err_usb2phy_power: 1207 + phy_power_off(dwc->usb2_generic_phy); 1217 1208 1218 1209 return ret; 1219 1210 }
+8 -5
drivers/usb/dwc3/debugfs.c
··· 645 645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); 646 646 if (!file) { 647 647 ret = -ENOMEM; 648 - goto err1; 648 + goto err2; 649 649 } 650 650 651 651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { ··· 653 653 dwc, &dwc3_mode_fops); 654 654 if (!file) { 655 655 ret = -ENOMEM; 656 - goto err1; 656 + goto err2; 657 657 } 658 658 } 659 659 ··· 663 663 dwc, &dwc3_testmode_fops); 664 664 if (!file) { 665 665 ret = -ENOMEM; 666 - goto err1; 666 + goto err2; 667 667 } 668 668 669 669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, 670 670 dwc, &dwc3_link_state_fops); 671 671 if (!file) { 672 672 ret = -ENOMEM; 673 - goto err1; 673 + goto err2; 674 674 } 675 675 } 676 676 677 677 return 0; 678 + 679 + err2: 680 + kfree(dwc->regset); 678 681 679 682 err1: 680 683 debugfs_remove_recursive(root); ··· 689 686 void dwc3_debugfs_exit(struct dwc3 *dwc) 690 687 { 691 688 debugfs_remove_recursive(dwc->root); 692 - dwc->root = NULL; 689 + kfree(dwc->regset); 693 690 }
+4 -8
drivers/usb/dwc3/dwc3-omap.c
··· 496 496 ret = pm_runtime_get_sync(dev); 497 497 if (ret < 0) { 498 498 dev_err(dev, "get_sync failed with err %d\n", ret); 499 - goto err0; 499 + goto err1; 500 500 } 501 501 502 502 dwc3_omap_map_offset(omap); ··· 516 516 517 517 ret = dwc3_omap_extcon_register(omap); 518 518 if (ret < 0) 519 - goto err2; 519 + goto err1; 520 520 521 521 ret = of_platform_populate(node, NULL, NULL, dev); 522 522 if (ret) { 523 523 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 524 - goto err3; 524 + goto err2; 525 525 } 526 526 527 527 dwc3_omap_enable_irqs(omap); 528 528 529 529 return 0; 530 530 531 - err3: 531 + err2: 532 532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 533 533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 534 - err2: 535 - dwc3_omap_disable_irqs(omap); 536 534 537 535 err1: 538 536 pm_runtime_put_sync(dev); 539 - 540 - err0: 541 537 pm_runtime_disable(dev); 542 538 543 539 return ret;
+6
drivers/usb/dwc3/gadget.c
··· 2936 2936 2937 2937 int dwc3_gadget_suspend(struct dwc3 *dwc) 2938 2938 { 2939 + if (!dwc->gadget_driver) 2940 + return 0; 2941 + 2939 2942 if (dwc->pullups_connected) { 2940 2943 dwc3_gadget_disable_irq(dwc); 2941 2944 dwc3_gadget_run_stop(dwc, true, true); ··· 2956 2953 { 2957 2954 struct dwc3_ep *dep; 2958 2955 int ret; 2956 + 2957 + if (!dwc->gadget_driver) 2958 + return 0; 2959 2959 2960 2960 /* Start with SuperSpeed Default */ 2961 2961 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+2
drivers/usb/gadget/composite.c
··· 651 651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); 652 652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 653 653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; 654 + ssp_cap->bReserved = 0; 655 + ssp_cap->wReserved = 0; 654 656 655 657 /* SSAC = 1 (2 attributes) */ 656 658 ssp_cap->bmAttributes = cpu_to_le32(1);
+2 -3
drivers/usb/gadget/function/f_fs.c
··· 646 646 work); 647 647 int ret = io_data->req->status ? io_data->req->status : 648 648 io_data->req->actual; 649 + bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; 649 650 650 651 if (io_data->read && ret > 0) { 651 652 use_mm(io_data->mm); ··· 658 657 659 658 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 660 659 661 - if (io_data->ffs->ffs_eventfd && 662 - !(io_data->kiocb->ki_flags & IOCB_EVENTFD)) 660 + if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) 663 661 eventfd_signal(io_data->ffs->ffs_eventfd, 1); 664 662 665 663 usb_ep_free_request(io_data->ep, io_data->req); 666 664 667 - io_data->kiocb->private = NULL; 668 665 if (io_data->read) 669 666 kfree(io_data->to_free); 670 667 kfree(io_data->buf);
+8 -7
drivers/video/fbdev/amba-clcd.c
··· 440 440 fb->off_ienb = CLCD_PL111_IENB; 441 441 fb->off_cntl = CLCD_PL111_CNTL; 442 442 } else { 443 - #ifdef CONFIG_ARCH_VERSATILE 444 - fb->off_ienb = CLCD_PL111_IENB; 445 - fb->off_cntl = CLCD_PL111_CNTL; 446 - #else 447 - fb->off_ienb = CLCD_PL110_IENB; 448 - fb->off_cntl = CLCD_PL110_CNTL; 449 - #endif 443 + if (of_machine_is_compatible("arm,versatile-ab") || 444 + of_machine_is_compatible("arm,versatile-pb")) { 445 + fb->off_ienb = CLCD_PL111_IENB; 446 + fb->off_cntl = CLCD_PL111_CNTL; 447 + } else { 448 + fb->off_ienb = CLCD_PL110_IENB; 449 + fb->off_cntl = CLCD_PL110_CNTL; 450 + } 450 451 } 451 452 452 453 fb->clk = clk_get(&fb->dev->dev, NULL);
+4 -8
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
··· 200 200 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, 201 201 char *desc, struct gpio_desc **gpiod) 202 202 { 203 - struct gpio_desc *gd; 204 203 int r; 205 204 206 - *gpiod = NULL; 207 - 208 205 r = devm_gpio_request_one(dev, gpio, flags, desc); 209 - if (r) 206 + if (r) { 207 + *gpiod = NULL; 210 208 return r == -ENOENT ? 0 : r; 209 + } 211 210 212 - gd = gpio_to_desc(gpio); 213 - if (IS_ERR(gd)) 214 - return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd); 211 + *gpiod = gpio_to_desc(gpio); 215 212 216 - *gpiod = gd; 217 213 return 0; 218 214 } 219 215
+2 -4
fs/ceph/mds_client.c
··· 386 386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 387 387 if (atomic_dec_and_test(&s->s_ref)) { 388 388 if (s->s_auth.authorizer) 389 - ceph_auth_destroy_authorizer( 390 - s->s_mdsc->fsc->client->monc.auth, 391 - s->s_auth.authorizer); 389 + ceph_auth_destroy_authorizer(s->s_auth.authorizer); 392 390 kfree(s); 393 391 } 394 392 } ··· 3898 3900 struct ceph_auth_handshake *auth = &s->s_auth; 3899 3901 3900 3902 if (force_new && auth->authorizer) { 3901 - ceph_auth_destroy_authorizer(ac, auth->authorizer); 3903 + ceph_auth_destroy_authorizer(auth->authorizer); 3902 3904 auth->authorizer = NULL; 3903 3905 } 3904 3906 if (!auth->authorizer) {
+35 -65
fs/devpts/inode.c
··· 128 128 struct pts_fs_info { 129 129 struct ida allocated_ptys; 130 130 struct pts_mount_opts mount_opts; 131 + struct super_block *sb; 131 132 struct dentry *ptmx_dentry; 132 133 }; 133 134 ··· 359 358 .show_options = devpts_show_options, 360 359 }; 361 360 362 - static void *new_pts_fs_info(void) 361 + static void *new_pts_fs_info(struct super_block *sb) 363 362 { 364 363 struct pts_fs_info *fsi; 365 364 ··· 370 369 ida_init(&fsi->allocated_ptys); 371 370 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; 372 371 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 372 + fsi->sb = sb; 373 373 374 374 return fsi; 375 375 } ··· 386 384 s->s_op = &devpts_sops; 387 385 s->s_time_gran = 1; 388 386 389 - s->s_fs_info = new_pts_fs_info(); 387 + s->s_fs_info = new_pts_fs_info(s); 390 388 if (!s->s_fs_info) 391 389 goto fail; 392 390 ··· 526 524 * to the System V naming convention 527 525 */ 528 526 529 - int devpts_new_index(struct inode *ptmx_inode) 527 + int devpts_new_index(struct pts_fs_info *fsi) 530 528 { 531 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 532 - struct pts_fs_info *fsi; 533 529 int index; 534 530 int ida_ret; 535 531 536 - if (!sb) 532 + if (!fsi) 537 533 return -ENODEV; 538 534 539 - fsi = DEVPTS_SB(sb); 540 535 retry: 541 536 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 542 537 return -ENOMEM; ··· 563 564 return index; 564 565 } 565 566 566 - void devpts_kill_index(struct inode *ptmx_inode, int idx) 567 + void devpts_kill_index(struct pts_fs_info *fsi, int idx) 567 568 { 568 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 569 - struct pts_fs_info *fsi = DEVPTS_SB(sb); 570 - 571 569 mutex_lock(&allocated_ptys_lock); 572 570 ida_remove(&fsi->allocated_ptys, idx); 573 571 pty_count--; ··· 574 578 /* 575 579 * pty code needs to hold extra references in case of last /dev/tty close 576 580 */ 577 - 578 - void devpts_add_ref(struct inode *ptmx_inode) 581 + struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) 579 582 { 580 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 583 + struct super_block *sb; 584 + struct pts_fs_info *fsi; 585 + 586 + sb = pts_sb_from_inode(ptmx_inode); 587 + if (!sb) 588 + return NULL; 589 + fsi = DEVPTS_SB(sb); 590 + if (!fsi) 591 + return NULL; 581 592 582 593 atomic_inc(&sb->s_active); 583 - ihold(ptmx_inode); 594 + return fsi; 584 595 } 585 596 586 - void devpts_del_ref(struct inode *ptmx_inode) 597 + void devpts_put_ref(struct pts_fs_info *fsi) 587 598 { 588 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 589 - 590 - iput(ptmx_inode); 591 - deactivate_super(sb); 599 + deactivate_super(fsi->sb); 592 600 } 593 601 594 602 /** ··· 604 604 * 605 605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. 606 606 */ 607 - struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 608 - void *priv) 607 + struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) 609 608 { 610 609 struct dentry *dentry; 611 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 610 + struct super_block *sb; 612 611 struct inode *inode; 613 612 struct dentry *root; 614 - struct pts_fs_info *fsi; 615 613 struct pts_mount_opts *opts; 616 614 char s[12]; 617 615 618 - if (!sb) 616 + if (!fsi) 619 617 return ERR_PTR(-ENODEV); 620 618 619 + sb = fsi->sb; 621 620 root = sb->s_root; 622 - fsi = DEVPTS_SB(sb); 623 621 opts = &fsi->mount_opts; 624 622 625 623 inode = new_inode(sb); ··· 628 630 inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); 629 631 inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); 630 632 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 631 - init_special_inode(inode, S_IFCHR|opts->mode, device); 632 - inode->i_private = priv; 633 + init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index)); 633 634 634 635 sprintf(s, "%d", index); 635 636 636 - inode_lock(d_inode(root)); 637 - 638 637 dentry = d_alloc_name(root, s); 639 638 if (dentry) { 639 + dentry->d_fsdata = priv; 640 640 d_add(dentry, inode); 641 641 fsnotify_create(d_inode(root), dentry); 642 642 } else { 643 643 iput(inode); 644 - inode = ERR_PTR(-ENOMEM); 644 + dentry = ERR_PTR(-ENOMEM); 645 645 } 646 646 647 - inode_unlock(d_inode(root)); 648 - 649 - return inode; 647 + return dentry; 650 648 } 651 649 652 650 /** ··· 651 657 * 652 658 * Returns whatever was passed as priv in devpts_pty_new for a given inode. 653 659 */ 654 - void *devpts_get_priv(struct inode *pts_inode) 660 + void *devpts_get_priv(struct dentry *dentry) 655 661 { 656 - struct dentry *dentry; 657 - void *priv = NULL; 658 - 659 - BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); 660 - 661 - /* Ensure dentry has not been deleted by devpts_pty_kill() */ 662 - dentry = d_find_alias(pts_inode); 663 - if (!dentry) 664 - return NULL; 665 - 666 - if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) 667 - priv = pts_inode->i_private; 668 - 669 - dput(dentry); 670 - 671 - return priv; 662 + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); 663 + return dentry->d_fsdata; 672 664 } 673 665 674 666 /** ··· 663 683 * 664 684 * This is an inverse operation of devpts_pty_new. 665 685 */ 666 - void devpts_pty_kill(struct inode *inode) 686 + void devpts_pty_kill(struct dentry *dentry) 667 687 { 668 - struct super_block *sb = pts_sb_from_inode(inode); 669 - struct dentry *root = sb->s_root; 670 - struct dentry *dentry; 688 + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); 671 689 672 - BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); 673 - 674 - inode_lock(d_inode(root)); 675 - 676 - dentry = d_find_alias(inode); 677 - 678 - drop_nlink(inode); 690 + dentry->d_fsdata = NULL; 691 + drop_nlink(dentry->d_inode); 679 692 d_delete(dentry); 680 693 dput(dentry); /* d_alloc_name() in devpts_pty_new() */ 681 - dput(dentry); /* d_find_alias above */ 682 - 683 - inode_unlock(d_inode(root)); 684 694 } 685 695 686 696 static int __init init_devpts_fs(void)
+2
fs/ocfs2/dlm/dlmmaster.c
··· 2455 2455 2456 2456 spin_unlock(&dlm->spinlock); 2457 2457 2458 + ret = 0; 2459 + 2458 2460 done: 2459 2461 dlm_put(dlm); 2460 2462 return ret;
+30 -3
fs/proc/task_mmu.c
··· 1518 1518 return page; 1519 1519 } 1520 1520 1521 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1522 + static struct page *can_gather_numa_stats_pmd(pmd_t pmd, 1523 + struct vm_area_struct *vma, 1524 + unsigned long addr) 1525 + { 1526 + struct page *page; 1527 + int nid; 1528 + 1529 + if (!pmd_present(pmd)) 1530 + return NULL; 1531 + 1532 + page = vm_normal_page_pmd(vma, addr, pmd); 1533 + if (!page) 1534 + return NULL; 1535 + 1536 + if (PageReserved(page)) 1537 + return NULL; 1538 + 1539 + nid = page_to_nid(page); 1540 + if (!node_isset(nid, node_states[N_MEMORY])) 1541 + return NULL; 1542 + 1543 + return page; 1544 + } 1545 + #endif 1546 + 1521 1547 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1522 1548 unsigned long end, struct mm_walk *walk) 1523 1549 { ··· 1553 1527 pte_t *orig_pte; 1554 1528 pte_t *pte; 1555 1529 1530 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1556 1531 ptl = pmd_trans_huge_lock(pmd, vma); 1557 1532 if (ptl) { 1558 - pte_t huge_pte = *(pte_t *)pmd; 1559 1533 struct page *page; 1560 1534 1561 - page = can_gather_numa_stats(huge_pte, vma, addr); 1535 + page = can_gather_numa_stats_pmd(*pmd, vma, addr); 1562 1536 if (page) 1563 - gather_stats(page, md, pte_dirty(huge_pte), 1537 + gather_stats(page, md, pmd_dirty(*pmd), 1564 1538 HPAGE_PMD_SIZE/PAGE_SIZE); 1565 1539 spin_unlock(ptl); 1566 1540 return 0; ··· 1568 1542 1569 1543 if (pmd_trans_unstable(pmd)) 1570 1544 return 0; 1545 + #endif 1571 1546 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1572 1547 do { 1573 1548 struct page *page = can_gather_numa_stats(*pte, vma, addr);
+6 -2
include/asm-generic/futex.h
··· 108 108 u32 val; 109 109 110 110 preempt_disable(); 111 - if (unlikely(get_user(val, uaddr) != 0)) 111 + if (unlikely(get_user(val, uaddr) != 0)) { 112 + preempt_enable(); 112 113 return -EFAULT; 114 + } 113 115 114 - if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) 116 + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { 117 + preempt_enable(); 115 118 return -EFAULT; 119 + } 116 120 117 121 *uval = val; 118 122 preempt_enable();
+2
include/drm/drm_cache.h
··· 39 39 { 40 40 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) 41 41 return false; 42 + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) 43 + return false; 42 44 #else 43 45 return true; 44 46 #endif
+5 -5
include/linux/ceph/auth.h
··· 12 12 */ 13 13 14 14 struct ceph_auth_client; 15 - struct ceph_authorizer; 16 15 struct ceph_msg; 16 + 17 + struct ceph_authorizer { 18 + void (*destroy)(struct ceph_authorizer *); 19 + }; 17 20 18 21 struct ceph_auth_handshake { 19 22 struct ceph_authorizer *authorizer; ··· 65 62 struct ceph_auth_handshake *auth); 66 63 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 67 64 struct ceph_authorizer *a, size_t len); 68 - void (*destroy_authorizer)(struct ceph_auth_client *ac, 69 - struct ceph_authorizer *a); 70 65 void (*invalidate_authorizer)(struct ceph_auth_client *ac, 71 66 int peer_type); 72 67 ··· 113 112 extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 114 113 int peer_type, 115 114 struct ceph_auth_handshake *auth); 116 - extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 117 - struct ceph_authorizer *a); 115 + void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); 118 116 extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 119 117 int peer_type, 120 118 struct ceph_auth_handshake *a);
-1
include/linux/ceph/osd_client.h
··· 16 16 struct ceph_snap_context; 17 17 struct ceph_osd_request; 18 18 struct ceph_osd_client; 19 - struct ceph_authorizer; 20 19 21 20 /* 22 21 * completion callback for async writepages
+1
include/linux/cgroup-defs.h
··· 444 444 int (*can_attach)(struct cgroup_taskset *tset); 445 445 void (*cancel_attach)(struct cgroup_taskset *tset); 446 446 void (*attach)(struct cgroup_taskset *tset); 447 + void (*post_attach)(void); 447 448 int (*can_fork)(struct task_struct *task); 448 449 void (*cancel_fork)(struct task_struct *task); 449 450 void (*fork)(struct task_struct *task);
-6
include/linux/cpuset.h
··· 137 137 task_unlock(current); 138 138 } 139 139 140 - extern void cpuset_post_attach_flush(void); 141 - 142 140 #else /* !CONFIG_CPUSETS */ 143 141 144 142 static inline bool cpusets_enabled(void) { return false; } ··· 241 243 static inline bool read_mems_allowed_retry(unsigned int seq) 242 244 { 243 245 return false; 244 - } 245 - 246 - static inline void cpuset_post_attach_flush(void) 247 - { 248 246 } 249 247 250 248 #endif /* !CONFIG_CPUSETS */
+12 -26
include/linux/devpts_fs.h
··· 15 15 16 16 #include <linux/errno.h> 17 17 18 + struct pts_fs_info; 19 + 18 20 #ifdef CONFIG_UNIX98_PTYS 19 21 20 - int devpts_new_index(struct inode *ptmx_inode); 21 - void devpts_kill_index(struct inode *ptmx_inode, int idx); 22 - void devpts_add_ref(struct inode *ptmx_inode); 23 - void devpts_del_ref(struct inode *ptmx_inode); 22 + /* Look up a pts fs info and get a ref to it */ 23 + struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); 24 + void devpts_put_ref(struct pts_fs_info *); 25 + 26 + int devpts_new_index(struct pts_fs_info *); 27 + void devpts_kill_index(struct pts_fs_info *, int); 28 + 24 29 /* mknod in devpts */ 25 - struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 26 - void *priv); 30 + struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); 27 31 /* get private structure */ 28 - void *devpts_get_priv(struct inode *pts_inode); 32 + void *devpts_get_priv(struct dentry *); 29 33 /* unlink */ 30 - void devpts_pty_kill(struct inode *inode); 31 - 32 - #else 33 - 34 - /* Dummy stubs in the no-pty case */ 35 - static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } 36 - static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } 37 - static inline void devpts_add_ref(struct inode *ptmx_inode) { } 38 - static inline void devpts_del_ref(struct inode *ptmx_inode) { } 39 - static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, 40 - dev_t device, int index, void *priv) 41 - { 42 - return ERR_PTR(-EINVAL); 43 - } 44 - static inline void *devpts_get_priv(struct inode *pts_inode) 45 - { 46 - return NULL; 47 - } 48 - static inline void devpts_pty_kill(struct inode *inode) { } 34 + void devpts_pty_kill(struct dentry *); 49 35 50 36 #endif 51 37
+5
include/linux/huge_mm.h
··· 152 152 } 153 153 154 154 struct page *get_huge_zero_page(void); 155 + void put_huge_zero_page(void); 155 156 156 157 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 157 158 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) ··· 209 208 return false; 210 209 } 211 210 211 + static inline void put_huge_zero_page(void) 212 + { 213 + BUILD_BUG(); 214 + } 212 215 213 216 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 214 217 unsigned long addr, pmd_t *pmd, int flags)
+5 -3
include/linux/lockdep.h
··· 196 196 * We record lock dependency chains, so that we can cache them: 197 197 */ 198 198 struct lock_chain { 199 - u8 irq_context; 200 - u8 depth; 201 - u16 base; 199 + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ 200 + unsigned int irq_context : 2, 201 + depth : 6, 202 + base : 24; 203 + /* 4 byte hole */ 202 204 struct hlist_node entry; 203 205 u64 chain_key; 204 206 };
+7
include/linux/mlx4/device.h
··· 828 828 u8 n_ports; 829 829 }; 830 830 831 + enum mlx4_pci_status { 832 + MLX4_PCI_STATUS_DISABLED, 833 + MLX4_PCI_STATUS_ENABLED, 834 + }; 835 + 831 836 struct mlx4_dev_persistent { 832 837 struct pci_dev *pdev; 833 838 struct mlx4_dev *dev; ··· 846 841 u8 state; 847 842 struct mutex interface_state_mutex; /* protect SW state */ 848 843 u8 interface_state; 844 + struct mutex pci_status_mutex; /* sync pci state */ 845 + enum mlx4_pci_status pci_status; 849 846 }; 850 847 851 848 struct mlx4_dev {
+11
include/linux/mlx5/device.h
··· 392 392 MLX5_CAP_OFF_CMDIF_CSUM = 46, 393 393 }; 394 394 395 + enum { 396 + /* 397 + * Max wqe size for rdma read is 512 bytes, so this 398 + * limits our max_sge_rd as the wqe needs to fit: 399 + * - ctrl segment (16 bytes) 400 + * - rdma segment (16 bytes) 401 + * - scatter elements (16 bytes each) 402 + */ 403 + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 404 + }; 405 + 395 406 struct mlx5_inbox_hdr { 396 407 __be16 opcode; 397 408 u8 rsvd[4];
+4 -3
include/linux/mlx5/driver.h
··· 519 519 }; 520 520 521 521 enum mlx5_interface_state { 522 - MLX5_INTERFACE_STATE_DOWN, 523 - MLX5_INTERFACE_STATE_UP, 522 + MLX5_INTERFACE_STATE_DOWN = BIT(0), 523 + MLX5_INTERFACE_STATE_UP = BIT(1), 524 + MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), 524 525 }; 525 526 526 527 enum mlx5_pci_status { ··· 545 544 enum mlx5_device_state state; 546 545 /* sync interface state */ 547 546 struct mutex intf_state_mutex; 548 - enum mlx5_interface_state interface_state; 547 + unsigned long intf_state; 549 548 void (*event) (struct mlx5_core_dev *dev, 550 549 enum mlx5_dev_event event, 551 550 unsigned long param);
+3 -3
include/linux/mlx5/port.h
··· 54 54 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, 55 55 enum mlx5_port_status *status); 56 56 57 - int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 58 - void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 59 - void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 57 + int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); 58 + void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); 59 + void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, 60 60 u8 port); 61 61 62 62 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+2
include/linux/mlx5/vport.h
··· 45 45 u16 vport, u8 *addr); 46 46 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 47 47 u16 vport, u8 *addr); 48 + int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); 49 + int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); 48 50 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 49 51 u64 *system_image_guid); 50 52 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+4
include/linux/mm.h
··· 1031 1031 page = compound_head(page); 1032 1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1033 1033 return true; 1034 + if (PageHuge(page)) 1035 + return false; 1034 1036 for (i = 0; i < hpage_nr_pages(page); i++) { 1035 1037 if (atomic_read(&page[i]._mapcount) >= 0) 1036 1038 return true; ··· 1140 1138 1141 1139 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1142 1140 pte_t pte); 1141 + struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1142 + pmd_t pmd); 1143 1143 1144 1144 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1145 1145 unsigned long size);
+1
include/linux/pci.h
··· 1111 1111 /* Vital product data routines */ 1112 1112 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1113 1113 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1114 + int pci_set_vpd_size(struct pci_dev *dev, size_t len); 1114 1115 1115 1116 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1116 1117 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
+39
include/linux/rculist_nulls.h
··· 98 98 if (!is_a_nulls(first)) 99 99 first->pprev = &n->next; 100 100 } 101 + 102 + /** 103 + * hlist_nulls_add_tail_rcu 104 + * @n: the element to add to the hash list. 105 + * @h: the list to add to. 106 + * 107 + * Description: 108 + * Adds the specified element to the end of the specified hlist_nulls, 109 + * while permitting racing traversals. NOTE: tail insertion requires 110 + * list traversal. 111 + * 112 + * The caller must take whatever precautions are necessary 113 + * (such as holding appropriate locks) to avoid racing 114 + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() 115 + * or hlist_nulls_del_rcu(), running on this same list. 116 + * However, it is perfectly legal to run concurrently with 117 + * the _rcu list-traversal primitives, such as 118 + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency 119 + * problems on Alpha CPUs. Regardless of the type of CPU, the 120 + * list-traversal primitive must be guarded by rcu_read_lock(). 121 + */ 122 + static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, 123 + struct hlist_nulls_head *h) 124 + { 125 + struct hlist_nulls_node *i, *last = NULL; 126 + 127 + for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); 128 + i = hlist_nulls_next_rcu(i)) 129 + last = i; 130 + 131 + if (last) { 132 + n->next = last->next; 133 + n->pprev = &last->next; 134 + rcu_assign_pointer(hlist_nulls_next_rcu(last), n); 135 + } else { 136 + hlist_nulls_add_head_rcu(n, h); 137 + } 138 + } 139 + 101 140 /** 102 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 103 142 * @tpos: the type * to use as a loop cursor.
+2 -2
include/linux/thermal.h
··· 352 352 353 353 struct thermal_trip { 354 354 struct device_node *np; 355 - unsigned long int temperature; 356 - unsigned long int hysteresis; 355 + int temperature; 356 + int hysteresis; 357 357 enum thermal_trip_type type; 358 358 }; 359 359
+2 -2
include/linux/tty_driver.h
··· 7 7 * defined; unless noted otherwise, they are optional, and can be 8 8 * filled in with a null pointer. 9 9 * 10 - * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) 10 + * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx) 11 11 * 12 12 * Return the tty device corresponding to idx, NULL if there is not 13 13 * one currently in use and an ERR_PTR value on error. Called under ··· 250 250 251 251 struct tty_operations { 252 252 struct tty_struct * (*lookup)(struct tty_driver *driver, 253 - struct inode *inode, int idx); 253 + struct file *filp, int idx); 254 254 int (*install)(struct tty_driver *driver, struct tty_struct *tty); 255 255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty); 256 256 int (*open)(struct tty_struct * tty, struct file * filp);
+8
include/media/videobuf2-core.h
··· 375 375 /** 376 376 * struct vb2_ops - driver-specific callbacks 377 377 * 378 + * @verify_planes_array: Verify that a given user space structure contains 379 + * enough planes for the buffer. This is called 380 + * for each dequeued buffer. 378 381 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. 379 382 * For V4L2 this is a struct v4l2_buffer. 380 383 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. ··· 387 384 * the vb2_buffer struct. 388 385 */ 389 386 struct vb2_buf_ops { 387 + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); 390 388 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); 391 389 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, 392 390 struct vb2_plane *planes); ··· 404 400 * @fileio_read_once: report EOF after reading the first buffer 405 401 * @fileio_write_immediately: queue buffer after each write() call 406 402 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 403 + * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF 404 + * has not been called. This is a vb1 idiom that has been adopted 405 + * also by vb2. 407 406 * @lock: pointer to a mutex that protects the vb2_queue struct. The 408 407 * driver can set this to a mutex to let the v4l2 core serialize 409 408 * the queuing ioctls. If the driver wants to handle locking ··· 470 463 unsigned fileio_read_once:1; 471 464 unsigned fileio_write_immediately:1; 472 465 unsigned allow_zero_bytesused:1; 466 + unsigned quirk_poll_must_check_waiting_for_buffers:1; 473 467 474 468 struct mutex *lock; 475 469 void *owner;
+5 -2
include/net/cls_cgroup.h
··· 17 17 #include <linux/hardirq.h> 18 18 #include <linux/rcupdate.h> 19 19 #include <net/sock.h> 20 + #include <net/inet_sock.h> 20 21 21 22 #ifdef CONFIG_CGROUP_NET_CLASSID 22 23 struct cgroup_cls_state { ··· 64 63 * softirqs always disables bh. 65 64 */ 66 65 if (in_serving_softirq()) { 66 + struct sock *sk = skb_to_full_sk(skb); 67 + 67 68 /* If there is an sock_cgroup_classid we'll use that. */ 68 - if (!skb->sk) 69 + if (!sk || !sk_fullsock(sk)) 69 70 return 0; 70 71 71 - classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); 72 + classid = sock_cgroup_classid(&sk->sk_cgrp_data); 72 73 } 73 74 74 75 return classid;
+3
include/net/ip6_route.h
··· 101 101 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 102 102 const struct in6_addr *addr, bool anycast); 103 103 104 + struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, 105 + int flags); 106 + 104 107 /* 105 108 * support functions for ND 106 109 *
+2
include/net/ipv6.h
··· 959 959 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 960 960 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 961 961 int addr_len); 962 + int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); 963 + void ip6_datagram_release_cb(struct sock *sk); 962 964 963 965 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 964 966 int *addr_len);
+3
include/net/route.h
··· 209 209 void ip_rt_multicast_event(struct in_device *); 210 210 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 211 211 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 212 + struct rtable *rt_dst_alloc(struct net_device *dev, 213 + unsigned int flags, u16 type, 214 + bool nopolicy, bool noxfrm, bool will_cache); 212 215 213 216 struct in_ifaddr; 214 217 void fib_add_ifaddr(struct in_ifaddr *);
+7 -1
include/net/sctp/structs.h
··· 847 847 */ 848 848 ktime_t last_time_heard; 849 849 850 + /* When was the last time that we sent a chunk using this 851 + * transport? We use this to check for idle transports 852 + */ 853 + unsigned long last_time_sent; 854 + 850 855 /* Last time(in jiffies) when cwnd is reduced due to the congestion 851 856 * indication based on ECNE chunk. 852 857 */ ··· 957 952 struct sctp_sock *); 958 953 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); 959 954 void sctp_transport_free(struct sctp_transport *); 960 - void sctp_transport_reset_timers(struct sctp_transport *); 955 + void sctp_transport_reset_t3_rtx(struct sctp_transport *); 956 + void sctp_transport_reset_hb_timer(struct sctp_transport *); 961 957 int sctp_transport_hold(struct sctp_transport *); 962 958 void sctp_transport_put(struct sctp_transport *); 963 959 void sctp_transport_update_rto(struct sctp_transport *, __u32);
+5 -1
include/net/sock.h
··· 630 630 631 631 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 632 632 { 633 - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 633 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 634 + sk->sk_family == AF_INET6) 635 + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); 636 + else 637 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 634 638 } 635 639 636 640 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+4
include/net/switchdev.h
··· 54 54 struct net_device *orig_dev; 55 55 enum switchdev_attr_id id; 56 56 u32 flags; 57 + void *complete_priv; 58 + void (*complete)(struct net_device *dev, int err, void *priv); 57 59 union { 58 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 59 61 u8 stp_state; /* PORT_STP_STATE */ ··· 77 75 struct net_device *orig_dev; 78 76 enum switchdev_obj_id id; 79 77 u32 flags; 78 + void *complete_priv; 79 + void (*complete)(struct net_device *dev, int err, void *priv); 80 80 }; 81 81 82 82 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
+2
include/net/tcp.h
··· 552 552 void tcp_send_delayed_ack(struct sock *sk); 553 553 void tcp_send_loss_probe(struct sock *sk); 554 554 bool tcp_schedule_loss_probe(struct sock *sk); 555 + void tcp_skb_collapse_tstamp(struct sk_buff *skb, 556 + const struct sk_buff *next_skb); 555 557 556 558 /* tcp_input.c */ 557 559 void tcp_resume_early_retransmit(struct sock *sk);
+16
include/rdma/ib.h
··· 34 34 #define _RDMA_IB_H 35 35 36 36 #include <linux/types.h> 37 + #include <linux/sched.h> 37 38 38 39 struct ib_addr { 39 40 union { ··· 86 85 __be64 sib_sid_mask; 87 86 __u64 sib_scope_id; 88 87 }; 88 + 89 + /* 90 + * The IB interfaces that use write() as bi-directional ioctl() are 91 + * fundamentally unsafe, since there are lots of ways to trigger "write()" 92 + * calls from various contexts with elevated privileges. That includes the 93 + * traditional suid executable error message writes, but also various kernel 94 + * interfaces that can write to file descriptors. 95 + * 96 + * This function provides protection for the legacy API by restricting the 97 + * calling context. 98 + */ 99 + static inline bool ib_safe_file_access(struct file *filp) 100 + { 101 + return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); 102 + } 89 103 90 104 #endif /* _RDMA_IB_H */
+2 -3
include/sound/hda_i915.h
··· 9 9 #ifdef CONFIG_SND_HDA_I915 10 10 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); 11 11 int snd_hdac_display_power(struct hdac_bus *bus, bool enable); 12 - int snd_hdac_get_display_clk(struct hdac_bus *bus); 12 + void snd_hdac_i915_set_bclk(struct hdac_bus *bus); 13 13 int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); 14 14 int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, 15 15 bool *audio_enabled, char *buffer, int max_bytes); ··· 25 25 { 26 26 return 0; 27 27 } 28 - static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) 28 + static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus) 29 29 { 30 - return 0; 31 30 } 32 31 static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, 33 32 int rate)
+2
include/sound/hda_regmap.h
··· 17 17 unsigned int verb); 18 18 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 19 19 unsigned int *val); 20 + int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, 21 + unsigned int reg, unsigned int *val); 20 22 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, 21 23 unsigned int val); 22 24 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+5 -1
include/uapi/asm-generic/unistd.h
··· 717 717 __SYSCALL(__NR_mlock2, sys_mlock2) 718 718 #define __NR_copy_file_range 285 719 719 __SYSCALL(__NR_copy_file_range, sys_copy_file_range) 720 + #define __NR_preadv2 286 721 + __SYSCALL(__NR_preadv2, sys_preadv2) 722 + #define __NR_pwritev2 287 723 + __SYSCALL(__NR_pwritev2, sys_pwritev2) 720 724 721 725 #undef __NR_syscalls 722 - #define __NR_syscalls 286 726 + #define __NR_syscalls 288 723 727 724 728 /* 725 729 * All syscalls below here should go away really,
+1
include/uapi/linux/Kbuild
··· 96 96 header-y += cycx_cfm.h 97 97 header-y += dcbnl.h 98 98 header-y += dccp.h 99 + header-y += devlink.h 99 100 header-y += dlmconstants.h 100 101 header-y += dlm_device.h 101 102 header-y += dlm.h
+2 -2
include/uapi/linux/if_macsec.h
··· 19 19 20 20 #define MACSEC_MAX_KEY_LEN 128 21 21 22 - #define DEFAULT_CIPHER_ID 0x0080020001000001ULL 23 - #define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 22 + #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL 23 + #define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 24 24 25 25 #define MACSEC_MIN_ICV_LEN 8 26 26 #define MACSEC_MAX_ICV_LEN 32
+20 -10
include/uapi/linux/v4l2-dv-timings.h
··· 183 183 184 184 #define V4L2_DV_BT_CEA_3840X2160P24 { \ 185 185 .type = V4L2_DV_BT_656_1120, \ 186 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 186 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ 187 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 187 188 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ 188 189 V4L2_DV_BT_STD_CEA861, \ 189 190 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ ··· 192 191 193 192 #define V4L2_DV_BT_CEA_3840X2160P25 { \ 194 193 .type = V4L2_DV_BT_656_1120, \ 195 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 194 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ 195 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 196 196 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 197 197 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 198 198 } 199 199 200 200 #define V4L2_DV_BT_CEA_3840X2160P30 { \ 201 201 .type = V4L2_DV_BT_656_1120, \ 202 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 202 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ 203 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 203 204 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 204 205 V4L2_DV_BT_STD_CEA861, \ 205 206 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ ··· 209 206 210 207 #define V4L2_DV_BT_CEA_3840X2160P50 { \ 211 208 .type = V4L2_DV_BT_656_1120, \ 212 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 209 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ 210 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 213 211 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 214 212 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 215 213 } 216 214 217 215 #define V4L2_DV_BT_CEA_3840X2160P60 { \ 218 216 .type = V4L2_DV_BT_656_1120, \ 219 - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 217 + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ 218 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 220 219 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 221 220 V4L2_DV_BT_STD_CEA861, \ 222 221 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ ··· 226 221 227 222 #define V4L2_DV_BT_CEA_4096X2160P24 { \ 228 223 .type = V4L2_DV_BT_656_1120, \ 229 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 224 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ 225 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 230 226 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ 231 227 V4L2_DV_BT_STD_CEA861, \ 232 228 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ ··· 235 229 236 230 #define V4L2_DV_BT_CEA_4096X2160P25 { \ 237 231 .type = V4L2_DV_BT_656_1120, \ 238 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 232 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ 233 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 239 234 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 240 235 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 241 236 } 242 237 243 238 #define V4L2_DV_BT_CEA_4096X2160P30 { \ 244 239 .type = V4L2_DV_BT_656_1120, \ 245 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 240 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ 241 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 246 242 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 247 243 V4L2_DV_BT_STD_CEA861, \ 248 244 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ ··· 252 244 253 245 #define V4L2_DV_BT_CEA_4096X2160P50 { \ 254 246 .type = V4L2_DV_BT_656_1120, \ 255 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 247 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ 248 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 256 249 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 257 250 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 258 251 } 259 252 260 253 #define V4L2_DV_BT_CEA_4096X2160P60 { \ 261 254 .type = V4L2_DV_BT_656_1120, \ 262 - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 255 + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ 256 + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 263 257 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 264 258 V4L2_DV_BT_STD_CEA861, \ 265 259 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+1 -1
kernel/bpf/verifier.c
··· 1374 1374 } 1375 1375 1376 1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1377 + BPF_SIZE(insn->code) == BPF_DW || 1377 1378 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1378 1379 verbose("BPF_LD_ABS uses reserved fields\n"); 1379 1380 return -EINVAL; ··· 2030 2029 if (IS_ERR(map)) { 2031 2030 verbose("fd %d is not pointing to valid bpf_map\n", 2032 2031 insn->imm); 2033 - fdput(f); 2034 2032 return PTR_ERR(map); 2035 2033 } 2036 2034
+5 -2
kernel/cgroup.c
··· 2825 2825 size_t nbytes, loff_t off, bool threadgroup) 2826 2826 { 2827 2827 struct task_struct *tsk; 2828 + struct cgroup_subsys *ss; 2828 2829 struct cgroup *cgrp; 2829 2830 pid_t pid; 2830 - int ret; 2831 + int ssid, ret; 2831 2832 2832 2833 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2833 2834 return -EINVAL; ··· 2876 2875 rcu_read_unlock(); 2877 2876 out_unlock_threadgroup: 2878 2877 percpu_up_write(&cgroup_threadgroup_rwsem); 2878 + for_each_subsys(ss, ssid) 2879 + if (ss->post_attach) 2880 + ss->post_attach(); 2879 2881 cgroup_kn_unlock(of->kn); 2880 - cpuset_post_attach_flush(); 2881 2882 return ret ?: nbytes; 2882 2883 } 2883 2884
+26 -7
kernel/cpu.c
··· 36 36 * @target: The target state 37 37 * @thread: Pointer to the hotplug thread 38 38 * @should_run: Thread should execute 39 + * @rollback: Perform a rollback 39 40 * @cb_stat: The state for a single callback (install/uninstall) 40 41 * @cb: Single callback function (install/uninstall) 41 42 * @result: Result of the operation ··· 48 47 #ifdef CONFIG_SMP 49 48 struct task_struct *thread; 50 49 bool should_run; 50 + bool rollback; 51 51 enum cpuhp_state cb_state; 52 52 int (*cb)(unsigned int cpu); 53 53 int result; ··· 303 301 return __cpu_notify(val, cpu, -1, NULL); 304 302 } 305 303 304 + static void cpu_notify_nofail(unsigned long val, unsigned int cpu) 305 + { 306 + BUG_ON(cpu_notify(val, cpu)); 307 + } 308 + 306 309 /* Notifier wrappers for transitioning to state machine */ 307 310 static int notify_prepare(unsigned int cpu) 308 311 { ··· 484 477 } else { 485 478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 486 479 } 480 + } else if (st->rollback) { 481 + BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 482 + 483 + undo_cpu_down(cpu, st, cpuhp_ap_states); 484 + /* 485 + * This is a momentary workaround to keep the notifier users 486 + * happy. Will go away once we got rid of the notifiers. 487 + */ 488 + cpu_notify_nofail(CPU_DOWN_FAILED, cpu); 489 + st->rollback = false; 487 490 } else { 488 491 /* Cannot happen .... */ 489 492 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); ··· 653 636 read_unlock(&tasklist_lock); 654 637 } 655 638 656 - static void cpu_notify_nofail(unsigned long val, unsigned int cpu) 657 - { 658 - BUG_ON(cpu_notify(val, cpu)); 659 - } 660 - 661 639 static int notify_down_prepare(unsigned int cpu) 662 640 { 663 641 int err, nr_calls = 0; ··· 733 721 */ 734 722 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); 735 723 if (err) { 736 - /* CPU didn't die: tell everyone. Can't complain. */ 737 - cpu_notify_nofail(CPU_DOWN_FAILED, cpu); 724 + /* CPU refused to die */ 738 725 irq_unlock_sparse(); 726 + /* Unpark the hotplug thread so we can rollback there */ 727 + kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); 739 728 return err; 740 729 } 741 730 BUG_ON(cpu_online(cpu)); ··· 845 832 * to do the further cleanups. 846 833 */ 847 834 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 835 + if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 836 + st->target = prev_state; 837 + st->rollback = true; 838 + cpuhp_kick_ap_work(cpu); 839 + } 848 840 849 841 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 850 842 out: ··· 1267 1249 .name = "notify:online", 1268 1250 .startup = notify_online, 1269 1251 .teardown = notify_down_prepare, 1252 + .skip_onerr = true, 1270 1253 }, 1271 1254 #endif 1272 1255 /*
+2 -2
kernel/cpuset.c
··· 58 58 #include <asm/uaccess.h> 59 59 #include <linux/atomic.h> 60 60 #include <linux/mutex.h> 61 - #include <linux/workqueue.h> 62 61 #include <linux/cgroup.h> 63 62 #include <linux/wait.h> 64 63 ··· 1015 1016 } 1016 1017 } 1017 1018 1018 - void cpuset_post_attach_flush(void) 1019 + static void cpuset_post_attach(void) 1019 1020 { 1020 1021 flush_workqueue(cpuset_migrate_mm_wq); 1021 1022 } ··· 2086 2087 .can_attach = cpuset_can_attach, 2087 2088 .cancel_attach = cpuset_cancel_attach, 2088 2089 .attach = cpuset_attach, 2090 + .post_attach = cpuset_post_attach, 2089 2091 .bind = cpuset_bind, 2090 2092 .legacy_cftypes = files, 2091 2093 .early_init = true,
+23 -4
kernel/futex.c
··· 1295 1295 if (unlikely(should_fail_futex(true))) 1296 1296 ret = -EFAULT; 1297 1297 1298 - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1298 + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { 1299 1299 ret = -EFAULT; 1300 - else if (curval != uval) 1301 - ret = -EINVAL; 1300 + } else if (curval != uval) { 1301 + /* 1302 + * If a unconditional UNLOCK_PI operation (user space did not 1303 + * try the TID->0 transition) raced with a waiter setting the 1304 + * FUTEX_WAITERS flag between get_user() and locking the hash 1305 + * bucket lock, retry the operation. 1306 + */ 1307 + if ((FUTEX_TID_MASK & curval) == uval) 1308 + ret = -EAGAIN; 1309 + else 1310 + ret = -EINVAL; 1311 + } 1302 1312 if (ret) { 1303 1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1304 1314 return ret; ··· 1535 1525 if (likely(&hb1->chain != &hb2->chain)) { 1536 1526 plist_del(&q->list, &hb1->chain); 1537 1527 hb_waiters_dec(hb1); 1538 - plist_add(&q->list, &hb2->chain); 1539 1528 hb_waiters_inc(hb2); 1529 + plist_add(&q->list, &hb2->chain); 1540 1530 q->lock_ptr = &hb2->lock; 1541 1531 } 1542 1532 get_futex_key_refs(key2); ··· 2632 2622 */ 2633 2623 if (ret == -EFAULT) 2634 2624 goto pi_faulted; 2625 + /* 2626 + * A unconditional UNLOCK_PI op raced against a waiter 2627 + * setting the FUTEX_WAITERS bit. Try again. 2628 + */ 2629 + if (ret == -EAGAIN) { 2630 + spin_unlock(&hb->lock); 2631 + put_futex_key(&key); 2632 + goto retry; 2633 + } 2635 2634 /* 2636 2635 * wake_futex_pi has detected invalid state. Tell user 2637 2636 * space.
+1
kernel/irq/ipi.c
··· 94 94 data = irq_get_irq_data(virq + i); 95 95 cpumask_copy(data->common->affinity, dest); 96 96 data->common->ipi_offset = offset; 97 + irq_set_status_flags(virq + i, IRQ_NO_BALANCING); 97 98 } 98 99 return virq; 99 100
+2 -1
kernel/kcov.c
··· 1 1 #define pr_fmt(fmt) "kcov: " fmt 2 2 3 + #define DISABLE_BRANCH_PROFILING 3 4 #include <linux/compiler.h> 4 5 #include <linux/types.h> 5 6 #include <linux/file.h> ··· 44 43 * Entry point from instrumented code. 45 44 * This is called once per basic-block/edge. 46 45 */ 47 - void __sanitizer_cov_trace_pc(void) 46 + void notrace __sanitizer_cov_trace_pc(void) 48 47 { 49 48 struct task_struct *t; 50 49 enum kcov_mode mode;
+5 -2
kernel/kexec_core.c
··· 1415 1415 VMCOREINFO_OFFSET(page, lru); 1416 1416 VMCOREINFO_OFFSET(page, _mapcount); 1417 1417 VMCOREINFO_OFFSET(page, private); 1418 + VMCOREINFO_OFFSET(page, compound_dtor); 1419 + VMCOREINFO_OFFSET(page, compound_order); 1420 + VMCOREINFO_OFFSET(page, compound_head); 1418 1421 VMCOREINFO_OFFSET(pglist_data, node_zones); 1419 1422 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1420 1423 #ifdef CONFIG_FLAT_NODE_MEM_MAP ··· 1450 1447 #ifdef CONFIG_X86 1451 1448 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); 1452 1449 #endif 1453 - #ifdef CONFIG_HUGETLBFS 1454 - VMCOREINFO_SYMBOL(free_huge_page); 1450 + #ifdef CONFIG_HUGETLB_PAGE 1451 + VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); 1455 1452 #endif 1456 1453 1457 1454 arch_crash_save_vmcoreinfo();
+34 -3
kernel/locking/lockdep.c
··· 2176 2176 chain->irq_context = hlock->irq_context; 2177 2177 i = get_first_held_lock(curr, hlock); 2178 2178 chain->depth = curr->lockdep_depth + 1 - i; 2179 + 2180 + BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 2181 + BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 2182 + BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 2183 + 2179 2184 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2180 2185 chain->base = nr_chain_hlocks; 2181 - nr_chain_hlocks += chain->depth; 2182 2186 for (j = 0; j < chain->depth - 1; j++, i++) { 2183 2187 int lock_id = curr->held_locks[i].class_idx - 1; 2184 2188 chain_hlocks[chain->base + j] = lock_id; 2185 2189 } 2186 2190 chain_hlocks[chain->base + j] = class - lock_classes; 2187 2191 } 2192 + 2193 + if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) 2194 + nr_chain_hlocks += chain->depth; 2195 + 2196 + #ifdef CONFIG_DEBUG_LOCKDEP 2197 + /* 2198 + * Important for check_no_collision(). 2199 + */ 2200 + if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { 2201 + if (debug_locks_off_graph_unlock()) 2202 + return 0; 2203 + 2204 + print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 2205 + dump_stack(); 2206 + return 0; 2207 + } 2208 + #endif 2209 + 2188 2210 hlist_add_head_rcu(&chain->entry, hash_head); 2189 2211 debug_atomic_inc(chain_lookup_misses); 2190 2212 inc_chains(); ··· 2954 2932 return 1; 2955 2933 } 2956 2934 2935 + static inline unsigned int task_irq_context(struct task_struct *task) 2936 + { 2937 + return 2 * !!task->hardirq_context + !!task->softirq_context; 2938 + } 2939 + 2957 2940 static int separate_irq_context(struct task_struct *curr, 2958 2941 struct held_lock *hlock) 2959 2942 { ··· 2967 2940 /* 2968 2941 * Keep track of points where we cross into an interrupt context: 2969 2942 */ 2970 - hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + 2971 - curr->softirq_context; 2972 2943 if (depth) { 2973 2944 struct held_lock *prev_hlock; 2974 2945 ··· 2996 2971 struct held_lock *hlock) 2997 2972 { 2998 2973 return 1; 2974 + } 2975 + 2976 + static inline unsigned int task_irq_context(struct task_struct *task) 2977 + { 2978 + return 0; 2999 2979 } 3000 2980 3001 2981 static inline int separate_irq_context(struct task_struct *curr, ··· 3271 3241 hlock->acquire_ip = ip; 3272 3242 hlock->instance = lock; 3273 3243 hlock->nest_lock = nest_lock; 3244 + hlock->irq_context = task_irq_context(curr); 3274 3245 hlock->trylock = trylock; 3275 3246 hlock->read = read; 3276 3247 hlock->check = check;
+2
kernel/locking/lockdep_proc.c
··· 141 141 int i; 142 142 143 143 if (v == SEQ_START_TOKEN) { 144 + if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS) 145 + seq_printf(m, "(buggered) "); 144 146 seq_printf(m, "all lock chains:\n"); 145 147 return 0; 146 148 }
+5 -3
kernel/locking/qspinlock_stat.h
··· 136 136 } 137 137 138 138 if (counter == qstat_pv_hash_hops) { 139 - u64 frac; 139 + u64 frac = 0; 140 140 141 - frac = 100ULL * do_div(stat, kicks); 142 - frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 141 + if (kicks) { 142 + frac = 100ULL * do_div(stat, kicks); 143 + frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 144 + } 143 145 144 146 /* 145 147 * Return a X.XX decimal number
+29
kernel/workqueue.c
··· 666 666 */ 667 667 smp_wmb(); 668 668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 669 + /* 670 + * The following mb guarantees that previous clear of a PENDING bit 671 + * will not be reordered with any speculative LOADS or STORES from 672 + * work->current_func, which is executed afterwards. This possible 673 + * reordering can lead to a missed execution on attempt to qeueue 674 + * the same @work. E.g. consider this case: 675 + * 676 + * CPU#0 CPU#1 677 + * ---------------------------- -------------------------------- 678 + * 679 + * 1 STORE event_indicated 680 + * 2 queue_work_on() { 681 + * 3 test_and_set_bit(PENDING) 682 + * 4 } set_..._and_clear_pending() { 683 + * 5 set_work_data() # clear bit 684 + * 6 smp_mb() 685 + * 7 work->current_func() { 686 + * 8 LOAD event_indicated 687 + * } 688 + * 689 + * Without an explicit full barrier speculative LOAD on line 8 can 690 + * be executed before CPU#0 does STORE on line 1. If that happens, 691 + * CPU#0 observes the PENDING bit is still set and new execution of 692 + * a @work is not queued in a hope, that CPU#1 will eventually 693 + * finish the queued @work. Meanwhile CPU#1 does not see 694 + * event_indicated is set, because speculative LOAD was executed 695 + * before actual STORE. 696 + */ 697 + smp_mb(); 669 698 } 670 699 671 700 static void clear_work_data(struct work_struct *work)
-4
lib/stackdepot.c
··· 210 210 goto fast_exit; 211 211 212 212 hash = hash_stack(trace->entries, trace->nr_entries); 213 - /* Bad luck, we won't store this stack. */ 214 - if (hash == 0) 215 - goto exit; 216 - 217 213 bucket = &stack_table[hash & STACK_HASH_MASK]; 218 214 219 215 /*
+5 -7
mm/huge_memory.c
··· 232 232 return READ_ONCE(huge_zero_page); 233 233 } 234 234 235 - static void put_huge_zero_page(void) 235 + void put_huge_zero_page(void) 236 236 { 237 237 /* 238 238 * Counter should never go to zero here. Only shrinker can put ··· 1684 1684 if (vma_is_dax(vma)) { 1685 1685 spin_unlock(ptl); 1686 1686 if (is_huge_zero_pmd(orig_pmd)) 1687 - put_huge_zero_page(); 1687 + tlb_remove_page(tlb, pmd_page(orig_pmd)); 1688 1688 } else if (is_huge_zero_pmd(orig_pmd)) { 1689 1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1690 1690 atomic_long_dec(&tlb->mm->nr_ptes); 1691 1691 spin_unlock(ptl); 1692 - put_huge_zero_page(); 1692 + tlb_remove_page(tlb, pmd_page(orig_pmd)); 1693 1693 } else { 1694 1694 struct page *page = pmd_page(orig_pmd); 1695 1695 page_remove_rmap(page, true); ··· 1960 1960 * page fault if needed. 1961 1961 */ 1962 1962 return 0; 1963 - if (vma->vm_ops) 1963 + if (vma->vm_ops || (vm_flags & VM_NO_THP)) 1964 1964 /* khugepaged not yet working on file or special mappings */ 1965 1965 return 0; 1966 - VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); 1967 1966 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1968 1967 hend = vma->vm_end & HPAGE_PMD_MASK; 1969 1968 if (hstart < hend) ··· 2351 2352 return false; 2352 2353 if (is_vma_temporary_stack(vma)) 2353 2354 return false; 2354 - VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2355 - return true; 2355 + return !(vma->vm_flags & VM_NO_THP); 2356 2356 } 2357 2357 2358 2358 static void collapse_huge_page(struct mm_struct *mm,
+19 -18
mm/memcontrol.c
··· 207 207 /* "mc" and its members are protected by cgroup_mutex */ 208 208 static struct move_charge_struct { 209 209 spinlock_t lock; /* for from, to */ 210 + struct mm_struct *mm; 210 211 struct mem_cgroup *from; 211 212 struct mem_cgroup *to; 212 213 unsigned long flags; ··· 4668 4667 4669 4668 static void mem_cgroup_clear_mc(void) 4670 4669 { 4670 + struct mm_struct *mm = mc.mm; 4671 + 4671 4672 /* 4672 4673 * we must clear moving_task before waking up waiters at the end of 4673 4674 * task migration. ··· 4679 4676 spin_lock(&mc.lock); 4680 4677 mc.from = NULL; 4681 4678 mc.to = NULL; 4679 + mc.mm = NULL; 4682 4680 spin_unlock(&mc.lock); 4681 + 4682 + mmput(mm); 4683 4683 } 4684 4684 4685 4685 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) ··· 4739 4733 VM_BUG_ON(mc.moved_swap); 4740 4734 4741 4735 spin_lock(&mc.lock); 4736 + mc.mm = mm; 4742 4737 mc.from = from; 4743 4738 mc.to = memcg; 4744 4739 mc.flags = move_flags; ··· 4749 4742 ret = mem_cgroup_precharge_mc(mm); 4750 4743 if (ret) 4751 4744 mem_cgroup_clear_mc(); 4745 + } else { 4746 + mmput(mm); 4752 4747 } 4753 - mmput(mm); 4754 4748 return ret; 4755 4749 } 4756 4750 ··· 4860 4852 return ret; 4861 4853 } 4862 4854 4863 - static void mem_cgroup_move_charge(struct mm_struct *mm) 4855 + static void mem_cgroup_move_charge(void) 4864 4856 { 4865 4857 struct mm_walk mem_cgroup_move_charge_walk = { 4866 4858 .pmd_entry = mem_cgroup_move_charge_pte_range, 4867 - .mm = mm, 4859 + .mm = mc.mm, 4868 4860 }; 4869 4861 4870 4862 lru_add_drain_all(); ··· 4876 4868 atomic_inc(&mc.from->moving_account); 4877 4869 synchronize_rcu(); 4878 4870 retry: 4879 - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 4871 + if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 4880 4872 /* 4881 4873 * Someone who are holding the mmap_sem might be waiting in 4882 4874 * waitq. So we cancel all extra charges, wake up all waiters, ··· 4893 4885 * additional charge, the page walk just aborts. 4894 4886 */ 4895 4887 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 4896 - up_read(&mm->mmap_sem); 4888 + up_read(&mc.mm->mmap_sem); 4897 4889 atomic_dec(&mc.from->moving_account); 4898 4890 } 4899 4891 4900 - static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4892 + static void mem_cgroup_move_task(void) 4901 4893 { 4902 - struct cgroup_subsys_state *css; 4903 - struct task_struct *p = cgroup_taskset_first(tset, &css); 4904 - struct mm_struct *mm = get_task_mm(p); 4905 - 4906 - if (mm) { 4907 - if (mc.to) 4908 - mem_cgroup_move_charge(mm); 4909 - mmput(mm); 4910 - } 4911 - if (mc.to) 4894 + if (mc.to) { 4895 + mem_cgroup_move_charge(); 4912 4896 mem_cgroup_clear_mc(); 4897 + } 4913 4898 } 4914 4899 #else /* !CONFIG_MMU */ 4915 4900 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) ··· 4912 4911 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4913 4912 { 4914 4913 } 4915 - static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4914 + static void mem_cgroup_move_task(void) 4916 4915 { 4917 4916 } 4918 4917 #endif ··· 5196 5195 .css_reset = mem_cgroup_css_reset, 5197 5196 .can_attach = mem_cgroup_can_attach, 5198 5197 .cancel_attach = mem_cgroup_cancel_attach, 5199 - .attach = mem_cgroup_move_task, 5198 + .post_attach = mem_cgroup_move_task, 5200 5199 .bind = mem_cgroup_bind, 5201 5200 .dfl_cftypes = memory_files, 5202 5201 .legacy_cftypes = mem_cgroup_legacy_files,
+9 -1
mm/memory-failure.c
··· 888 888 } 889 889 } 890 890 891 - return get_page_unless_zero(head); 891 + if (get_page_unless_zero(head)) { 892 + if (head == compound_head(page)) 893 + return 1; 894 + 895 + pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page)); 896 + put_page(head); 897 + } 898 + 899 + return 0; 892 900 } 893 901 EXPORT_SYMBOL_GPL(get_hwpoison_page); 894 902
+40
mm/memory.c
··· 789 789 return pfn_to_page(pfn); 790 790 } 791 791 792 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 793 + struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 794 + pmd_t pmd) 795 + { 796 + unsigned long pfn = pmd_pfn(pmd); 797 + 798 + /* 799 + * There is no pmd_special() but there may be special pmds, e.g. 800 + * in a direct-access (dax) mapping, so let's just replicate the 801 + * !HAVE_PTE_SPECIAL case from vm_normal_page() here. 802 + */ 803 + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 804 + if (vma->vm_flags & VM_MIXEDMAP) { 805 + if (!pfn_valid(pfn)) 806 + return NULL; 807 + goto out; 808 + } else { 809 + unsigned long off; 810 + off = (addr - vma->vm_start) >> PAGE_SHIFT; 811 + if (pfn == vma->vm_pgoff + off) 812 + return NULL; 813 + if (!is_cow_mapping(vma->vm_flags)) 814 + return NULL; 815 + } 816 + } 817 + 818 + if (is_zero_pfn(pfn)) 819 + return NULL; 820 + if (unlikely(pfn > highest_memmap_pfn)) 821 + return NULL; 822 + 823 + /* 824 + * NOTE! We still have PageReserved() pages in the page tables. 825 + * eg. VDSO mappings can cause them to exist. 826 + */ 827 + out: 828 + return pfn_to_page(pfn); 829 + } 830 + #endif 831 + 792 832 /* 793 833 * copy one vm_area from one task to the other. Assumes the page tables 794 834 * already present in the new task to be cleared in the whole range
+7 -1
mm/migrate.c
··· 975 975 dec_zone_page_state(page, NR_ISOLATED_ANON + 976 976 page_is_file_cache(page)); 977 977 /* Soft-offlined page shouldn't go through lru cache list */ 978 - if (reason == MR_MEMORY_FAILURE) { 978 + if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) { 979 + /* 980 + * With this release, we free successfully migrated 981 + * page and set PG_HWPoison on just freed page 982 + * intentionally. Although it's rather weird, it's how 983 + * HWPoison flag works at the moment. 984 + */ 979 985 put_page(page); 980 986 if (!test_set_page_hwpoison(page)) 981 987 num_poisoned_pages_inc();
+5 -1
mm/page_io.c
··· 353 353 354 354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); 355 355 if (!ret) { 356 - swap_slot_free_notify(page); 356 + if (trylock_page(page)) { 357 + swap_slot_free_notify(page); 358 + unlock_page(page); 359 + } 360 + 357 361 count_vm_event(PSWPIN); 358 362 return 0; 359 363 }
+5
mm/swap.c
··· 728 728 zone = NULL; 729 729 } 730 730 731 + if (is_huge_zero_page(page)) { 732 + put_huge_zero_page(); 733 + continue; 734 + } 735 + 731 736 page = compound_head(page); 732 737 if (!put_page_testzero(page)) 733 738 continue;
+15 -15
mm/vmscan.c
··· 2553 2553 sc->gfp_mask |= __GFP_HIGHMEM; 2554 2554 2555 2555 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2556 - requested_highidx, sc->nodemask) { 2556 + gfp_zone(sc->gfp_mask), sc->nodemask) { 2557 2557 enum zone_type classzone_idx; 2558 2558 2559 2559 if (!populated_zone(zone)) ··· 3318 3318 /* Try to sleep for a short interval */ 3319 3319 if (prepare_kswapd_sleep(pgdat, order, remaining, 3320 3320 balanced_classzone_idx)) { 3321 + /* 3322 + * Compaction records what page blocks it recently failed to 3323 + * isolate pages from and skips them in the future scanning. 3324 + * When kswapd is going to sleep, it is reasonable to assume 3325 + * that pages and compaction may succeed so reset the cache. 3326 + */ 3327 + reset_isolation_suitable(pgdat); 3328 + 3329 + /* 3330 + * We have freed the memory, now we should compact it to make 3331 + * allocation of the requested order possible. 3332 + */ 3333 + wakeup_kcompactd(pgdat, order, classzone_idx); 3334 + 3321 3335 remaining = schedule_timeout(HZ/10); 3322 3336 finish_wait(&pgdat->kswapd_wait, &wait); 3323 3337 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); ··· 3354 3340 * them before going back to sleep. 3355 3341 */ 3356 3342 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 3357 - 3358 - /* 3359 - * Compaction records what page blocks it recently failed to 3360 - * isolate pages from and skips them in the future scanning. 3361 - * When kswapd is going to sleep, it is reasonable to assume 3362 - * that pages and compaction may succeed so reset the cache. 3363 - */ 3364 - reset_isolation_suitable(pgdat); 3365 - 3366 - /* 3367 - * We have freed the memory, now we should compact it to make 3368 - * allocation of the requested order possible. 3369 - */ 3370 - wakeup_kcompactd(pgdat, order, classzone_idx); 3371 3343 3372 3344 if (!kthread_should_stop()) 3373 3345 schedule();
+79 -45
net/bridge/br_mdb.c
··· 61 61 e->flags |= MDB_FLAGS_OFFLOAD; 62 62 } 63 63 64 + static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 65 + { 66 + memset(ip, 0, sizeof(struct br_ip)); 67 + ip->vid = entry->vid; 68 + ip->proto = entry->addr.proto; 69 + if (ip->proto == htons(ETH_P_IP)) 70 + ip->u.ip4 = entry->addr.u.ip4; 71 + #if IS_ENABLED(CONFIG_IPV6) 72 + else 73 + ip->u.ip6 = entry->addr.u.ip6; 74 + #endif 75 + } 76 + 64 77 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 65 78 struct net_device *dev) 66 79 { ··· 256 243 + nla_total_size(sizeof(struct br_mdb_entry)); 257 244 } 258 245 259 - static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 260 - int type, struct net_bridge_port_group *pg) 246 + struct br_mdb_complete_info { 247 + struct net_bridge_port *port; 248 + struct br_ip ip; 249 + }; 250 + 251 + static void br_mdb_complete(struct net_device *dev, int err, void *priv) 261 252 { 253 + struct br_mdb_complete_info *data = priv; 254 + struct net_bridge_port_group __rcu **pp; 255 + struct net_bridge_port_group *p; 256 + struct net_bridge_mdb_htable *mdb; 257 + struct net_bridge_mdb_entry *mp; 258 + struct net_bridge_port *port = data->port; 259 + struct net_bridge *br = port->br; 260 + 261 + if (err) 262 + goto err; 263 + 264 + spin_lock_bh(&br->multicast_lock); 265 + mdb = mlock_dereference(br->mdb, br); 266 + mp = br_mdb_ip_get(mdb, &data->ip); 267 + if (!mp) 268 + goto out; 269 + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 270 + pp = &p->next) { 271 + if (p->port != port) 272 + continue; 273 + p->flags |= MDB_PG_FLAGS_OFFLOAD; 274 + } 275 + out: 276 + spin_unlock_bh(&br->multicast_lock); 277 + err: 278 + kfree(priv); 279 + } 280 + 281 + static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 282 + struct br_mdb_entry *entry, int type) 283 + { 284 + struct br_mdb_complete_info *complete_info; 262 285 struct switchdev_obj_port_mdb mdb = { 263 286 .obj = { 264 287 .id = SWITCHDEV_OBJ_ID_PORT_MDB, ··· 317 268 318 269 mdb.obj.orig_dev = port_dev; 319 270 if (port_dev && type == RTM_NEWMDB) { 320 - err = switchdev_port_obj_add(port_dev, &mdb.obj); 321 - if (!err && pg) 322 - pg->flags |= MDB_PG_FLAGS_OFFLOAD; 271 + complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 272 + if (complete_info) { 273 + complete_info->port = p; 274 + __mdb_entry_to_br_ip(entry, &complete_info->ip); 275 + mdb.obj.complete_priv = complete_info; 276 + mdb.obj.complete = br_mdb_complete; 277 + switchdev_port_obj_add(port_dev, &mdb.obj); 278 + } 323 279 } else if (port_dev && type == RTM_DELMDB) { 324 280 switchdev_port_obj_del(port_dev, &mdb.obj); 325 281 } ··· 345 291 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 346 292 } 347 293 348 - void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 349 - int type) 294 + void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 295 + struct br_ip *group, int type, u8 flags) 350 296 { 351 297 struct br_mdb_entry entry; 352 298 353 299 memset(&entry, 0, sizeof(entry)); 354 - entry.ifindex = pg->port->dev->ifindex; 355 - entry.addr.proto = pg->addr.proto; 356 - entry.addr.u.ip4 = pg->addr.u.ip4; 300 + entry.ifindex = port->dev->ifindex; 301 + entry.addr.proto = group->proto; 302 + entry.addr.u.ip4 = group->u.ip4; 357 303 #if IS_ENABLED(CONFIG_IPV6) 358 - entry.addr.u.ip6 = pg->addr.u.ip6; 304 + entry.addr.u.ip6 = group->u.ip6; 359 305 #endif 360 - entry.vid = pg->addr.vid; 361 - __mdb_entry_fill_flags(&entry, pg->flags); 362 - __br_mdb_notify(dev, &entry, type, pg); 306 + entry.vid = group->vid; 307 + __mdb_entry_fill_flags(&entry, flags); 308 + __br_mdb_notify(dev, port, &entry, type); 363 309 } 364 310 365 311 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, ··· 504 450 } 505 451 506 452 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 507 - struct br_ip *group, unsigned char state, 508 - struct net_bridge_port_group **pg) 453 + struct br_ip *group, unsigned char state) 509 454 { 510 455 struct net_bridge_mdb_entry *mp; 511 456 struct net_bridge_port_group *p; ··· 535 482 if (unlikely(!p)) 536 483 return -ENOMEM; 537 484 rcu_assign_pointer(*pp, p); 538 - *pg = p; 539 485 if (state == MDB_TEMPORARY) 540 486 mod_timer(&p->timer, now + br->multicast_membership_interval); 541 487 ··· 542 490 } 543 491 544 492 static int __br_mdb_add(struct net *net, struct net_bridge *br, 545 - struct br_mdb_entry *entry, 546 - struct net_bridge_port_group **pg) 493 + struct br_mdb_entry *entry) 547 494 { 548 495 struct br_ip ip; 549 496 struct net_device *dev; ··· 560 509 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 561 510 return -EINVAL; 562 511 563 - memset(&ip, 0, sizeof(ip)); 564 - ip.vid = entry->vid; 565 - ip.proto = entry->addr.proto; 566 - if (ip.proto == htons(ETH_P_IP)) 567 - ip.u.ip4 = entry->addr.u.ip4; 568 - #if IS_ENABLED(CONFIG_IPV6) 569 - else 570 - ip.u.ip6 = entry->addr.u.ip6; 571 - #endif 512 + __mdb_entry_to_br_ip(entry, &ip); 572 513 573 514 spin_lock_bh(&br->multicast_lock); 574 - ret = br_mdb_add_group(br, p, &ip, entry->state, pg); 515 + ret = br_mdb_add_group(br, p, &ip, entry->state); 575 516 spin_unlock_bh(&br->multicast_lock); 576 517 return ret; 577 518 } ··· 571 528 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 572 529 { 573 530 struct net *net = sock_net(skb->sk); 574 - struct net_bridge_port_group *pg; 575 531 struct net_bridge_vlan_group *vg; 576 532 struct net_device *dev, *pdev; 577 533 struct br_mdb_entry *entry; ··· 600 558 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 601 559 list_for_each_entry(v, &vg->vlan_list, vlist) { 602 560 entry->vid = v->vid; 603 - err = __br_mdb_add(net, br, entry, &pg); 561 + err = __br_mdb_add(net, br, entry); 604 562 if (err) 605 563 break; 606 - __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 564 + __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 607 565 } 608 566 } else { 609 - err = __br_mdb_add(net, br, entry, &pg); 567 + err = __br_mdb_add(net, br, entry); 610 568 if (!err) 611 - __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 569 + __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 612 570 } 613 571 614 572 return err; ··· 626 584 if (!netif_running(br->dev) || br->multicast_disabled) 627 585 return -EINVAL; 628 586 629 - memset(&ip, 0, sizeof(ip)); 630 - ip.vid = entry->vid; 631 - ip.proto = entry->addr.proto; 632 - if (ip.proto == htons(ETH_P_IP)) 633 - ip.u.ip4 = entry->addr.u.ip4; 634 - #if IS_ENABLED(CONFIG_IPV6) 635 - else 636 - ip.u.ip6 = entry->addr.u.ip6; 637 - #endif 587 + __mdb_entry_to_br_ip(entry, &ip); 638 588 639 589 spin_lock_bh(&br->multicast_lock); 640 590 mdb = mlock_dereference(br->mdb, br); ··· 696 662 entry->vid = v->vid; 697 663 err = __br_mdb_del(br, entry); 698 664 if (!err) 699 - __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 665 + __br_mdb_notify(dev, p, entry, RTM_DELMDB); 700 666 } 701 667 } else { 702 668 err = __br_mdb_del(br, entry); 703 669 if (!err) 704 - __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 670 + __br_mdb_notify(dev, p, entry, RTM_DELMDB); 705 671 } 706 672 707 673 return err;
+5 -3
net/bridge/br_multicast.c
··· 283 283 rcu_assign_pointer(*pp, p->next); 284 284 hlist_del_init(&p->mglist); 285 285 del_timer(&p->timer); 286 - br_mdb_notify(br->dev, p, RTM_DELMDB); 286 + br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 + p->flags); 287 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 288 289 289 290 if (!mp->ports && !mp->mglist && ··· 706 705 if (unlikely(!p)) 707 706 goto err; 708 707 rcu_assign_pointer(*pp, p); 709 - br_mdb_notify(br->dev, p, RTM_NEWMDB); 708 + br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 710 709 711 710 found: 712 711 mod_timer(&p->timer, now + br->multicast_membership_interval); ··· 1462 1461 hlist_del_init(&p->mglist); 1463 1462 del_timer(&p->timer); 1464 1463 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1465 - br_mdb_notify(br->dev, p, RTM_DELMDB); 1464 + br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1465 + p->flags); 1466 1466 1467 1467 if (!mp->ports && !mp->mglist && 1468 1468 netif_running(br->dev))
+2 -2
net/bridge/br_private.h
··· 560 560 unsigned char flags); 561 561 void br_mdb_init(void); 562 562 void br_mdb_uninit(void); 563 - void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 564 - int type); 563 + void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 564 + struct br_ip *group, int type, u8 flags); 565 565 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 566 566 int type); 567 567
+5 -1
net/bridge/netfilter/ebtables.c
··· 370 370 left - sizeof(struct ebt_entry_match) < m->match_size) 371 371 return -EINVAL; 372 372 373 - match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 + match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 374 + if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { 375 + request_module("ebt_%s", m->u.name); 376 + match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 377 + } 374 378 if (IS_ERR(match)) 375 379 return PTR_ERR(match); 376 380 m->u.match = match;
+2 -6
net/ceph/auth.c
··· 293 293 } 294 294 EXPORT_SYMBOL(ceph_auth_create_authorizer); 295 295 296 - void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 297 - struct ceph_authorizer *a) 296 + void ceph_auth_destroy_authorizer(struct ceph_authorizer *a) 298 297 { 299 - mutex_lock(&ac->mutex); 300 - if (ac->ops && ac->ops->destroy_authorizer) 301 - ac->ops->destroy_authorizer(ac, a); 302 - mutex_unlock(&ac->mutex); 298 + a->destroy(a); 303 299 } 304 300 EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 305 301
+39 -32
net/ceph/auth_none.c
··· 16 16 struct ceph_auth_none_info *xi = ac->private; 17 17 18 18 xi->starting = true; 19 - xi->built_authorizer = false; 20 19 } 21 20 22 21 static void destroy(struct ceph_auth_client *ac) ··· 38 39 return xi->starting; 39 40 } 40 41 42 + static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac, 43 + struct ceph_none_authorizer *au) 44 + { 45 + void *p = au->buf; 46 + void *const end = p + sizeof(au->buf); 47 + int ret; 48 + 49 + ceph_encode_8_safe(&p, end, 1, e_range); 50 + ret = ceph_entity_name_encode(ac->name, &p, end); 51 + if (ret < 0) 52 + return ret; 53 + 54 + ceph_encode_64_safe(&p, end, ac->global_id, e_range); 55 + au->buf_len = p - (void *)au->buf; 56 + dout("%s built authorizer len %d\n", __func__, au->buf_len); 57 + return 0; 58 + 59 + e_range: 60 + return -ERANGE; 61 + } 62 + 41 63 static int build_request(struct ceph_auth_client *ac, void *buf, void *end) 42 64 { 43 65 return 0; ··· 77 57 return result; 78 58 } 79 59 60 + static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) 61 + { 62 + kfree(a); 63 + } 64 + 80 65 /* 81 - * build an 'authorizer' with our entity_name and global_id. we can 82 - * reuse a single static copy since it is identical for all services 83 - * we connect to. 66 + * build an 'authorizer' with our entity_name and global_id. it is 67 + * identical for all services we connect to. 84 68 */ 85 69 static int ceph_auth_none_create_authorizer( 86 70 struct ceph_auth_client *ac, int peer_type, 87 71 struct ceph_auth_handshake *auth) 88 72 { 89 - struct ceph_auth_none_info *ai = ac->private; 90 - struct ceph_none_authorizer *au = &ai->au; 91 - void *p, *end; 73 + struct ceph_none_authorizer *au; 92 74 int ret; 93 75 94 - if (!ai->built_authorizer) { 95 - p = au->buf; 96 - end = p + sizeof(au->buf); 97 - ceph_encode_8(&p, 1); 98 - ret = ceph_entity_name_encode(ac->name, &p, end - 8); 99 - if (ret < 0) 100 - goto bad; 101 - ceph_decode_need(&p, end, sizeof(u64), bad2); 102 - ceph_encode_64(&p, ac->global_id); 103 - au->buf_len = p - (void *)au->buf; 104 - ai->built_authorizer = true; 105 - dout("built authorizer len %d\n", au->buf_len); 76 + au = kmalloc(sizeof(*au), GFP_NOFS); 77 + if (!au) 78 + return -ENOMEM; 79 + 80 + au->base.destroy = ceph_auth_none_destroy_authorizer; 81 + 82 + ret = ceph_auth_none_build_authorizer(ac, au); 83 + if (ret) { 84 + kfree(au); 85 + return ret; 106 86 } 107 87 108 88 auth->authorizer = (struct ceph_authorizer *) au; ··· 112 92 auth->authorizer_reply_buf_len = sizeof (au->reply_buf); 113 93 114 94 return 0; 115 - 116 - bad2: 117 - ret = -ERANGE; 118 - bad: 119 - return ret; 120 - } 121 - 122 - static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, 123 - struct ceph_authorizer *a) 124 - { 125 - /* nothing to do */ 126 95 } 127 96 128 97 static const struct ceph_auth_client_ops ceph_auth_none_ops = { ··· 123 114 .build_request = build_request, 124 115 .handle_reply = handle_reply, 125 116 .create_authorizer = ceph_auth_none_create_authorizer, 126 - .destroy_authorizer = ceph_auth_none_destroy_authorizer, 127 117 }; 128 118 129 119 int ceph_auth_none_init(struct ceph_auth_client *ac) ··· 135 127 return -ENOMEM; 136 128 137 129 xi->starting = true; 138 - xi->built_authorizer = false; 139 130 140 131 ac->protocol = CEPH_AUTH_NONE; 141 132 ac->private = xi;
+1 -2
net/ceph/auth_none.h
··· 12 12 */ 13 13 14 14 struct ceph_none_authorizer { 15 + struct ceph_authorizer base; 15 16 char buf[128]; 16 17 int buf_len; 17 18 char reply_buf[0]; ··· 20 19 21 20 struct ceph_auth_none_info { 22 21 bool starting; 23 - bool built_authorizer; 24 - struct ceph_none_authorizer au; /* we only need one; it's static */ 25 22 }; 26 23 27 24 int ceph_auth_none_init(struct ceph_auth_client *ac);
+10 -11
net/ceph/auth_x.c
··· 565 565 return -EAGAIN; 566 566 } 567 567 568 + static void ceph_x_destroy_authorizer(struct ceph_authorizer *a) 569 + { 570 + struct ceph_x_authorizer *au = (void *)a; 571 + 572 + ceph_x_authorizer_cleanup(au); 573 + kfree(au); 574 + } 575 + 568 576 static int ceph_x_create_authorizer( 569 577 struct ceph_auth_client *ac, int peer_type, 570 578 struct ceph_auth_handshake *auth) ··· 588 580 au = kzalloc(sizeof(*au), GFP_NOFS); 589 581 if (!au) 590 582 return -ENOMEM; 583 + 584 + au->base.destroy = ceph_x_destroy_authorizer; 591 585 592 586 ret = ceph_x_build_authorizer(ac, th, au); 593 587 if (ret) { ··· 652 642 au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); 653 643 return ret; 654 644 } 655 - 656 - static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, 657 - struct ceph_authorizer *a) 658 - { 659 - struct ceph_x_authorizer *au = (void *)a; 660 - 661 - ceph_x_authorizer_cleanup(au); 662 - kfree(au); 663 - } 664 - 665 645 666 646 static void ceph_x_reset(struct ceph_auth_client *ac) 667 647 { ··· 770 770 .create_authorizer = ceph_x_create_authorizer, 771 771 .update_authorizer = ceph_x_update_authorizer, 772 772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply, 773 - .destroy_authorizer = ceph_x_destroy_authorizer, 774 773 .invalidate_authorizer = ceph_x_invalidate_authorizer, 775 774 .reset = ceph_x_reset, 776 775 .destroy = ceph_x_destroy,
+1
net/ceph/auth_x.h
··· 26 26 27 27 28 28 struct ceph_x_authorizer { 29 + struct ceph_authorizer base; 29 30 struct ceph_crypto_key session_key; 30 31 struct ceph_buffer *buf; 31 32 unsigned int service;
+2 -4
net/ceph/osd_client.c
··· 1087 1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1088 1088 atomic_read(&osd->o_ref) - 1); 1089 1089 if (atomic_dec_and_test(&osd->o_ref)) { 1090 - struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 1091 - 1092 1090 if (osd->o_auth.authorizer) 1093 - ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1091 + ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1094 1092 kfree(osd); 1095 1093 } 1096 1094 } ··· 2982 2984 struct ceph_auth_handshake *auth = &o->o_auth; 2983 2985 2984 2986 if (force_new && auth->authorizer) { 2985 - ceph_auth_destroy_authorizer(ac, auth->authorizer); 2987 + ceph_auth_destroy_authorizer(auth->authorizer); 2986 2988 auth->authorizer = NULL; 2987 2989 } 2988 2990 if (!auth->authorizer) {
+5 -2
net/core/skbuff.c
··· 4502 4502 __skb_push(skb, offset); 4503 4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4504 4504 skb_vlan_tag_get(skb)); 4505 - if (err) 4505 + if (err) { 4506 + __skb_pull(skb, offset); 4506 4507 return err; 4508 + } 4509 + 4507 4510 skb->protocol = skb->vlan_proto; 4508 4511 skb->mac_len += VLAN_HLEN; 4509 - __skb_pull(skb, offset); 4510 4512 4511 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4514 + __skb_pull(skb, offset); 4512 4515 } 4513 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4514 4517 return 0;
+8 -1
net/decnet/dn_route.c
··· 1034 1034 if (!fld.daddr) { 1035 1035 fld.daddr = fld.saddr; 1036 1036 1037 - err = -EADDRNOTAVAIL; 1038 1037 if (dev_out) 1039 1038 dev_put(dev_out); 1039 + err = -EINVAL; 1040 1040 dev_out = init_net.loopback_dev; 1041 + if (!dev_out->dn_ptr) 1042 + goto out; 1043 + err = -EADDRNOTAVAIL; 1041 1044 dev_hold(dev_out); 1042 1045 if (!fld.daddr) { 1043 1046 fld.daddr = ··· 1113 1110 if (dev_out == NULL) 1114 1111 goto out; 1115 1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1113 + if (!dn_db) 1114 + goto e_inval; 1116 1115 /* Possible improvement - check all devices for local addr */ 1117 1116 if (dn_dev_islocal(dev_out, fld.daddr)) { 1118 1117 dev_put(dev_out); ··· 1156 1151 dev_put(dev_out); 1157 1152 dev_out = init_net.loopback_dev; 1158 1153 dev_hold(dev_out); 1154 + if (!dev_out->dn_ptr) 1155 + goto e_inval; 1159 1156 fld.flowidn_oif = dev_out->ifindex; 1160 1157 if (res.fi) 1161 1158 dn_fib_info_put(res.fi);
+5 -1
net/ipv4/fib_frontend.c
··· 904 904 if (ifa->ifa_flags & IFA_F_SECONDARY) { 905 905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 906 906 if (!prim) { 907 - pr_warn("%s: bug: prim == NULL\n", __func__); 907 + /* if the device has been deleted, we don't perform 908 + * address promotion 909 + */ 910 + if (!in_dev->dead) 911 + pr_warn("%s: bug: prim == NULL\n", __func__); 908 912 return; 909 913 } 910 914 if (iprim && iprim != prim) {
+6
net/ipv4/netfilter/arptable_filter.c
··· 81 81 return ret; 82 82 } 83 83 84 + ret = arptable_filter_table_init(&init_net); 85 + if (ret) { 86 + unregister_pernet_subsys(&arptable_filter_net_ops); 87 + kfree(arpfilter_ops); 88 + } 89 + 84 90 return ret; 85 91 } 86 92
+16 -3
net/ipv4/route.c
··· 1438 1438 #endif 1439 1439 } 1440 1440 1441 - static struct rtable *rt_dst_alloc(struct net_device *dev, 1442 - unsigned int flags, u16 type, 1443 - bool nopolicy, bool noxfrm, bool will_cache) 1441 + struct rtable *rt_dst_alloc(struct net_device *dev, 1442 + unsigned int flags, u16 type, 1443 + bool nopolicy, bool noxfrm, bool will_cache) 1444 1444 { 1445 1445 struct rtable *rt; 1446 1446 ··· 1468 1468 1469 1469 return rt; 1470 1470 } 1471 + EXPORT_SYMBOL(rt_dst_alloc); 1471 1472 1472 1473 /* called in rcu_read_lock() section */ 1473 1474 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, ··· 2046 2045 */ 2047 2046 if (fi && res->prefixlen < 4) 2048 2047 fi = NULL; 2048 + } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2049 + (orig_oif != dev_out->ifindex)) { 2050 + /* For local routes that require a particular output interface 2051 + * we do not want to cache the result. Caching the result 2052 + * causes incorrect behaviour when there are multiple source 2053 + * addresses on the interface, the end result being that if the 2054 + * intended recipient is waiting on that interface for the 2055 + * packet he won't receive it because it will be delivered on 2056 + * the loopback interface and the IP_PKTINFO ipi_ifindex will 2057 + * be set to the loopback interface as well. 2058 + */ 2059 + fi = NULL; 2049 2060 } 2050 2061 2051 2062 fnhe = NULL;
+3 -1
net/ipv4/tcp_input.c
··· 1309 1309 if (skb == tcp_highest_sack(sk)) 1310 1310 tcp_advance_highest_sack(sk, skb); 1311 1311 1312 + tcp_skb_collapse_tstamp(prev, skb); 1312 1313 tcp_unlink_write_queue(skb, sk); 1313 1314 sk_wmem_free_skb(sk, skb); 1314 1315 ··· 3099 3098 3100 3099 shinfo = skb_shinfo(skb); 3101 3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3102 - between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3101 + !before(shinfo->tskey, prior_snd_una) && 3102 + before(shinfo->tskey, tcp_sk(sk)->snd_una)) 3103 3103 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 3104 } 3105 3105
+16
net/ipv4/tcp_output.c
··· 2441 2441 return window; 2442 2442 } 2443 2443 2444 + void tcp_skb_collapse_tstamp(struct sk_buff *skb, 2445 + const struct sk_buff *next_skb) 2446 + { 2447 + const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb); 2448 + u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 2449 + 2450 + if (unlikely(tsflags)) { 2451 + struct skb_shared_info *shinfo = skb_shinfo(skb); 2452 + 2453 + shinfo->tx_flags |= tsflags; 2454 + shinfo->tskey = next_shinfo->tskey; 2455 + } 2456 + } 2457 + 2444 2458 /* Collapses two adjacent SKB's during retransmission. */ 2445 2459 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2446 2460 { ··· 2497 2483 tp->retransmit_skb_hint = skb; 2498 2484 2499 2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2486 + 2487 + tcp_skb_collapse_tstamp(skb, next_skb); 2500 2488 2501 2489 sk_wmem_free_skb(sk, next_skb); 2502 2490 }
+7 -2
net/ipv4/udp.c
··· 339 339 340 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 341 341 spin_lock(&hslot2->lock); 342 - hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 343 - &hslot2->head); 342 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 343 + sk->sk_family == AF_INET6) 344 + hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 345 + &hslot2->head); 346 + else 347 + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 348 + &hslot2->head); 344 349 hslot2->count++; 345 350 spin_unlock(&hslot2->lock); 346 351 }
+35 -35
net/ipv6/addrconf.c
··· 3176 3176 } 3177 3177 #endif 3178 3178 3179 - #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 3180 - /* If the host route is cached on the addr struct make sure it is associated 3181 - * with the proper table. e.g., enslavement can change and if so the cached 3182 - * host route needs to move to the new table. 3183 - */ 3184 - static void l3mdev_check_host_rt(struct inet6_dev *idev, 3185 - struct inet6_ifaddr *ifp) 3186 - { 3187 - if (ifp->rt) { 3188 - u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; 3189 - 3190 - if (tb_id != ifp->rt->rt6i_table->tb6_id) { 3191 - ip6_del_rt(ifp->rt); 3192 - ifp->rt = NULL; 3193 - } 3194 - } 3195 - } 3196 - #else 3197 - static void l3mdev_check_host_rt(struct inet6_dev *idev, 3198 - struct inet6_ifaddr *ifp) 3199 - { 3200 - } 3201 - #endif 3202 - 3203 3179 static int fixup_permanent_addr(struct inet6_dev *idev, 3204 3180 struct inet6_ifaddr *ifp) 3205 3181 { 3206 - l3mdev_check_host_rt(idev, ifp); 3207 - 3208 3182 if (!ifp->rt) { 3209 3183 struct rt6_info *rt; 3210 3184 ··· 3229 3255 void *ptr) 3230 3256 { 3231 3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3258 + struct netdev_notifier_changeupper_info *info; 3232 3259 struct inet6_dev *idev = __in6_dev_get(dev); 3233 3260 int run_pending = 0; 3234 3261 int err; ··· 3278 3303 break; 3279 3304 3280 3305 if (event == NETDEV_UP) { 3306 + /* restore routes for permanent addresses */ 3307 + addrconf_permanent_addr(dev); 3308 + 3281 3309 if (!addrconf_qdisc_ok(dev)) { 3282 3310 /* device is not ready yet. */ 3283 3311 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", ··· 3313 3335 3314 3336 run_pending = 1; 3315 3337 } 3316 - 3317 - /* restore routes for permanent addresses */ 3318 - addrconf_permanent_addr(dev); 3319 3338 3320 3339 switch (dev->type) { 3321 3340 #if IS_ENABLED(CONFIG_IPV6_SIT) ··· 3388 3413 if (idev) 3389 3414 addrconf_type_change(dev, event); 3390 3415 break; 3416 + 3417 + case NETDEV_CHANGEUPPER: 3418 + info = ptr; 3419 + 3420 + /* flush all routes if dev is linked to or unlinked from 3421 + * an L3 master device (e.g., VRF) 3422 + */ 3423 + if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 3424 + addrconf_ifdown(dev, 0); 3391 3425 } 3392 3426 3393 3427 return NOTIFY_OK; ··· 3420 3436 ipv6_mc_remap(idev); 3421 3437 else if (event == NETDEV_PRE_TYPE_CHANGE) 3422 3438 ipv6_mc_unmap(idev); 3439 + } 3440 + 3441 + static bool addr_is_local(const struct in6_addr *addr) 3442 + { 3443 + return ipv6_addr_type(addr) & 3444 + (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 3423 3445 } 3424 3446 3425 3447 static int addrconf_ifdown(struct net_device *dev, int how) ··· 3485 3495 * address is retained on a down event 3486 3496 */ 3487 3497 if (!keep_addr || 3488 - !(ifa->flags & IFA_F_PERMANENT)) { 3498 + !(ifa->flags & IFA_F_PERMANENT) || 3499 + addr_is_local(&ifa->addr)) { 3489 3500 hlist_del_init_rcu(&ifa->addr_lst); 3490 3501 goto restart; 3491 3502 } ··· 3530 3539 3531 3540 INIT_LIST_HEAD(&del_list); 3532 3541 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3542 + struct rt6_info *rt = NULL; 3543 + 3533 3544 addrconf_del_dad_work(ifa); 3534 3545 3535 3546 write_unlock_bh(&idev->lock); 3536 3547 spin_lock_bh(&ifa->lock); 3537 3548 3538 - if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3549 + if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3550 + !addr_is_local(&ifa->addr)) { 3539 3551 /* set state to skip the notifier below */ 3540 3552 state = INET6_IFADDR_STATE_DEAD; 3541 3553 ifa->state = 0; 3542 3554 if (!(ifa->flags & IFA_F_NODAD)) 3543 3555 ifa->flags |= IFA_F_TENTATIVE; 3556 + 3557 + rt = ifa->rt; 3558 + ifa->rt = NULL; 3544 3559 } else { 3545 3560 state = ifa->state; 3546 3561 ifa->state = INET6_IFADDR_STATE_DEAD; ··· 3556 3559 } 3557 3560 3558 3561 spin_unlock_bh(&ifa->lock); 3562 + 3563 + if (rt) 3564 + ip6_del_rt(rt); 3559 3565 3560 3566 if (state != INET6_IFADDR_STATE_DEAD) { 3561 3567 __ipv6_ifa_notify(RTM_DELADDR, ifa); ··· 5325 5325 if (rt) 5326 5326 ip6_del_rt(rt); 5327 5327 } 5328 - dst_hold(&ifp->rt->dst); 5329 - 5330 - ip6_del_rt(ifp->rt); 5331 - 5328 + if (ifp->rt) { 5329 + dst_hold(&ifp->rt->dst); 5330 + ip6_del_rt(ifp->rt); 5331 + } 5332 5332 rt_genid_bump_ipv6(net); 5333 5333 break; 5334 5334 }
+106 -63
net/ipv6/datagram.c
··· 40 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 41 41 } 42 42 43 + static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) 44 + { 45 + struct inet_sock *inet = inet_sk(sk); 46 + struct ipv6_pinfo *np = inet6_sk(sk); 47 + 48 + memset(fl6, 0, sizeof(*fl6)); 49 + fl6->flowi6_proto = sk->sk_protocol; 50 + fl6->daddr = sk->sk_v6_daddr; 51 + fl6->saddr = np->saddr; 52 + fl6->flowi6_oif = sk->sk_bound_dev_if; 53 + fl6->flowi6_mark = sk->sk_mark; 54 + fl6->fl6_dport = inet->inet_dport; 55 + fl6->fl6_sport = inet->inet_sport; 56 + fl6->flowlabel = np->flow_label; 57 + 58 + if (!fl6->flowi6_oif) 59 + fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 60 + 61 + if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) 62 + fl6->flowi6_oif = np->mcast_oif; 63 + 64 + security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 65 + } 66 + 67 + int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) 68 + { 69 + struct ip6_flowlabel *flowlabel = NULL; 70 + struct in6_addr *final_p, final; 71 + struct ipv6_txoptions *opt; 72 + struct dst_entry *dst; 73 + struct inet_sock *inet = inet_sk(sk); 74 + struct ipv6_pinfo *np = inet6_sk(sk); 75 + struct flowi6 fl6; 76 + int err = 0; 77 + 78 + if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) { 79 + flowlabel = fl6_sock_lookup(sk, np->flow_label); 80 + if (!flowlabel) 81 + return -EINVAL; 82 + } 83 + ip6_datagram_flow_key_init(&fl6, sk); 84 + 85 + rcu_read_lock(); 86 + opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); 87 + final_p = fl6_update_dst(&fl6, opt, &final); 88 + rcu_read_unlock(); 89 + 90 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 91 + if (IS_ERR(dst)) { 92 + err = PTR_ERR(dst); 93 + goto out; 94 + } 95 + 96 + if (fix_sk_saddr) { 97 + if (ipv6_addr_any(&np->saddr)) 98 + np->saddr = fl6.saddr; 99 + 100 + if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 101 + sk->sk_v6_rcv_saddr = fl6.saddr; 102 + inet->inet_rcv_saddr = LOOPBACK4_IPV6; 103 + if (sk->sk_prot->rehash) 104 + sk->sk_prot->rehash(sk); 105 + } 106 + } 107 + 108 + ip6_dst_store(sk, dst, 109 + ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 110 + &sk->sk_v6_daddr : NULL, 111 + #ifdef CONFIG_IPV6_SUBTREES 112 + ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 113 + &np->saddr : 114 + #endif 115 + NULL); 116 + 117 + out: 118 + fl6_sock_release(flowlabel); 119 + return err; 120 + } 121 + 122 + void ip6_datagram_release_cb(struct sock *sk) 123 + { 124 + struct dst_entry *dst; 125 + 126 + if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 127 + return; 128 + 129 + rcu_read_lock(); 130 + dst = __sk_dst_get(sk); 131 + if (!dst || !dst->obsolete || 132 + dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) { 133 + rcu_read_unlock(); 134 + return; 135 + } 136 + rcu_read_unlock(); 137 + 138 + ip6_datagram_dst_update(sk, false); 139 + } 140 + EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); 141 + 43 142 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 44 143 { 45 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 46 145 struct inet_sock *inet = inet_sk(sk); 47 146 struct ipv6_pinfo *np = inet6_sk(sk); 48 - struct in6_addr *daddr, *final_p, final; 49 - struct dst_entry *dst; 50 - struct flowi6 fl6; 51 - struct ip6_flowlabel *flowlabel = NULL; 52 - struct ipv6_txoptions *opt; 147 + struct in6_addr *daddr; 53 148 int addr_type; 54 149 int err; 150 + __be32 fl6_flowlabel = 0; 55 151 56 152 if (usin->sin6_family == AF_INET) { 57 153 if (__ipv6_only_sock(sk)) ··· 162 66 if (usin->sin6_family != AF_INET6) 163 67 return -EAFNOSUPPORT; 164 68 165 - memset(&fl6, 0, sizeof(fl6)); 166 - if (np->sndflow) { 167 - fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 168 - if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 169 - flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 170 - if (!flowlabel) 171 - return -EINVAL; 172 - } 173 - } 69 + if (np->sndflow) 70 + fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 174 71 175 72 addr_type = ipv6_addr_type(&usin->sin6_addr); 176 73 ··· 234 145 } 235 146 236 147 sk->sk_v6_daddr = *daddr; 237 - np->flow_label = fl6.flowlabel; 148 + np->flow_label = fl6_flowlabel; 238 149 239 150 inet->inet_dport = usin->sin6_port; 240 151 ··· 243 154 * destination cache for it. 244 155 */ 245 156 246 - fl6.flowi6_proto = sk->sk_protocol; 247 - fl6.daddr = sk->sk_v6_daddr; 248 - fl6.saddr = np->saddr; 249 - fl6.flowi6_oif = sk->sk_bound_dev_if; 250 - fl6.flowi6_mark = sk->sk_mark; 251 - fl6.fl6_dport = inet->inet_dport; 252 - fl6.fl6_sport = inet->inet_sport; 253 - 254 - if (!fl6.flowi6_oif) 255 - fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 256 - 257 - if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST)) 258 - fl6.flowi6_oif = np->mcast_oif; 259 - 260 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 261 - 262 - rcu_read_lock(); 263 - opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); 264 - final_p = fl6_update_dst(&fl6, opt, &final); 265 - rcu_read_unlock(); 266 - 267 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 268 - err = 0; 269 - if (IS_ERR(dst)) { 270 - err = PTR_ERR(dst); 157 + err = ip6_datagram_dst_update(sk, true); 158 + if (err) 271 159 goto out; 272 - } 273 - 274 - /* source address lookup done in ip6_dst_lookup */ 275 - 276 - if (ipv6_addr_any(&np->saddr)) 277 - np->saddr = fl6.saddr; 278 - 279 - if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 280 - sk->sk_v6_rcv_saddr = fl6.saddr; 281 - inet->inet_rcv_saddr = LOOPBACK4_IPV6; 282 - if (sk->sk_prot->rehash) 283 - sk->sk_prot->rehash(sk); 284 - } 285 - 286 - ip6_dst_store(sk, dst, 287 - ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 288 - &sk->sk_v6_daddr : NULL, 289 - #ifdef CONFIG_IPV6_SUBTREES 290 - ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 291 - &np->saddr : 292 - #endif 293 - NULL); 294 160 295 161 sk->sk_state = TCP_ESTABLISHED; 296 162 sk_set_txhash(sk); 297 163 out: 298 - fl6_sock_release(flowlabel); 299 164 return err; 300 165 } 301 166
+16 -3
net/ipv6/route.c
··· 338 338 return rt; 339 339 } 340 340 341 - static struct rt6_info *ip6_dst_alloc(struct net *net, 342 - struct net_device *dev, 343 - int flags) 341 + struct rt6_info *ip6_dst_alloc(struct net *net, 342 + struct net_device *dev, 343 + int flags) 344 344 { 345 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 346 346 ··· 364 364 365 365 return rt; 366 366 } 367 + EXPORT_SYMBOL(ip6_dst_alloc); 367 368 368 369 static void ip6_dst_destroy(struct dst_entry *dst) 369 370 { ··· 1418 1417 1419 1418 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1420 1419 { 1420 + struct dst_entry *dst; 1421 + 1421 1422 ip6_update_pmtu(skb, sock_net(sk), mtu, 1422 1423 sk->sk_bound_dev_if, sk->sk_mark); 1424 + 1425 + dst = __sk_dst_get(sk); 1426 + if (!dst || !dst->obsolete || 1427 + dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) 1428 + return; 1429 + 1430 + bh_lock_sock(sk); 1431 + if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 1432 + ip6_datagram_dst_update(sk, false); 1433 + bh_unlock_sock(sk); 1423 1434 } 1424 1435 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1425 1436
+1
net/ipv6/udp.c
··· 1539 1539 .sendmsg = udpv6_sendmsg, 1540 1540 .recvmsg = udpv6_recvmsg, 1541 1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1542 + .release_cb = ip6_datagram_release_cb, 1542 1543 .hash = udp_lib_hash, 1543 1544 .unhash = udp_lib_unhash, 1544 1545 .rehash = udp_v6_rehash,
+4
net/netfilter/nf_conntrack_proto_tcp.c
··· 410 410 length--; 411 411 continue; 412 412 default: 413 + if (length < 2) 414 + return; 413 415 opsize=*ptr++; 414 416 if (opsize < 2) /* "silly options" */ 415 417 return; ··· 472 470 length--; 473 471 continue; 474 472 default: 473 + if (length < 2) 474 + return; 475 475 opsize = *ptr++; 476 476 if (opsize < 2) /* "silly options" */ 477 477 return;
+1 -1
net/netlink/af_netlink.c
··· 688 688 689 689 skb_queue_purge(&sk->sk_write_queue); 690 690 691 - if (nlk->portid) { 691 + if (nlk->portid && nlk->bound) { 692 692 struct netlink_notify n = { 693 693 .net = sock_net(sk), 694 694 .protocol = sk->sk_protocol,
+2 -2
net/openvswitch/actions.c
··· 461 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 462 462 463 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 464 - set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 + set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, 465 465 true); 466 466 memcpy(&flow_key->ipv6.addr.src, masked, 467 467 sizeof(flow_key->ipv6.addr.src)); ··· 483 483 NULL, &flags) 484 484 != NEXTHDR_ROUTING); 485 485 486 - set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 + set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked, 487 487 recalc_csum); 488 488 memcpy(&flow_key->ipv6.addr.dst, masked, 489 489 sizeof(flow_key->ipv6.addr.dst));
+1
net/openvswitch/conntrack.c
··· 367 367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 368 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 369 369 370 + skb_orphan(skb); 370 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 372 err = nf_ct_frag6_gather(net, skb, user); 372 373 if (err)
+1
net/packet/af_packet.c
··· 3521 3521 i->ifindex = mreq->mr_ifindex; 3522 3522 i->alen = mreq->mr_alen; 3523 3523 memcpy(i->addr, mreq->mr_address, i->alen); 3524 + memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3524 3525 i->count = 1; 3525 3526 i->next = po->mclist; 3526 3527 po->mclist = i;
+2 -2
net/rds/cong.c
··· 299 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 300 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 301 301 302 - __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 + set_bit_le(off, (void *)map->m_page_addrs[i]); 303 303 } 304 304 305 305 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) ··· 313 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 314 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 315 315 316 - __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 + clear_bit_le(off, (void *)map->m_page_addrs[i]); 317 317 } 318 318 319 319 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
+1 -1
net/rds/ib_cm.c
··· 194 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 195 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 196 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 197 - dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 + dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); 198 198 199 199 /* Advertise flow control */ 200 200 if (ic->i_flowctl) {
+4 -1
net/sched/sch_generic.c
··· 159 159 if (validate) 160 160 skb = validate_xmit_skb_list(skb, dev); 161 161 162 - if (skb) { 162 + if (likely(skb)) { 163 163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 164 164 if (!netif_xmit_frozen_or_stopped(txq)) 165 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 166 166 167 167 HARD_TX_UNLOCK(dev, txq); 168 + } else { 169 + spin_lock(root_lock); 170 + return qdisc_qlen(q); 168 171 } 169 172 spin_lock(root_lock); 170 173
+10 -5
net/sctp/outqueue.c
··· 866 866 * sender MUST assure that at least one T3-rtx 867 867 * timer is running. 868 868 */ 869 - if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 870 - sctp_transport_reset_timers(transport); 869 + if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { 870 + sctp_transport_reset_t3_rtx(transport); 871 + transport->last_time_sent = jiffies; 872 + } 871 873 } 872 874 break; 873 875 ··· 926 924 error = sctp_outq_flush_rtx(q, packet, 927 925 rtx_timeout, &start_timer); 928 926 929 - if (start_timer) 930 - sctp_transport_reset_timers(transport); 927 + if (start_timer) { 928 + sctp_transport_reset_t3_rtx(transport); 929 + transport->last_time_sent = jiffies; 930 + } 931 931 932 932 /* This can happen on COOKIE-ECHO resend. Only 933 933 * one chunk can get bundled with a COOKIE-ECHO. ··· 1066 1062 list_add_tail(&chunk->transmitted_list, 1067 1063 &transport->transmitted); 1068 1064 1069 - sctp_transport_reset_timers(transport); 1065 + sctp_transport_reset_t3_rtx(transport); 1066 + transport->last_time_sent = jiffies; 1070 1067 1071 1068 /* Only let one DATA chunk get bundled with a 1072 1069 * COOKIE-ECHO chunk.
+1 -2
net/sctp/sm_make_chunk.c
··· 3080 3080 return SCTP_ERROR_RSRC_LOW; 3081 3081 3082 3082 /* Start the heartbeat timer. */ 3083 - if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3084 - sctp_transport_hold(peer); 3083 + sctp_transport_reset_hb_timer(peer); 3085 3084 asoc->new_transport = peer; 3086 3085 break; 3087 3086 case SCTP_PARAM_DEL_IP:
+16 -20
net/sctp/sm_sideeffect.c
··· 69 69 sctp_cmd_seq_t *commands, 70 70 gfp_t gfp); 71 71 72 - static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 73 - struct sctp_transport *t); 74 72 /******************************************************************** 75 73 * Helper functions 76 74 ********************************************************************/ ··· 365 367 struct sctp_association *asoc = transport->asoc; 366 368 struct sock *sk = asoc->base.sk; 367 369 struct net *net = sock_net(sk); 370 + u32 elapsed, timeout; 368 371 369 372 bh_lock_sock(sk); 370 373 if (sock_owned_by_user(sk)) { ··· 373 374 374 375 /* Try again later. */ 375 376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) 377 + sctp_transport_hold(transport); 378 + goto out_unlock; 379 + } 380 + 381 + /* Check if we should still send the heartbeat or reschedule */ 382 + elapsed = jiffies - transport->last_time_sent; 383 + timeout = sctp_transport_timeout(transport); 384 + if (elapsed < timeout) { 385 + elapsed = timeout - elapsed; 386 + if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) 376 387 sctp_transport_hold(transport); 377 388 goto out_unlock; 378 389 } ··· 516 507 0); 517 508 518 509 /* Update the hb timer to resend a heartbeat every rto */ 519 - sctp_cmd_hb_timer_update(commands, transport); 510 + sctp_transport_reset_hb_timer(transport); 520 511 } 521 512 522 513 if (transport->state != SCTP_INACTIVE && ··· 643 634 * hold a reference on the transport to make sure none of 644 635 * the needed data structures go away. 645 636 */ 646 - list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 647 - 648 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 649 - sctp_transport_hold(t); 650 - } 637 + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) 638 + sctp_transport_reset_hb_timer(t); 651 639 } 652 640 653 641 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, ··· 674 668 } 675 669 } 676 670 677 - 678 - /* Helper function to update the heartbeat timer. */ 679 - static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 680 - struct sctp_transport *t) 681 - { 682 - /* Update the heartbeat timer. */ 683 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 684 - sctp_transport_hold(t); 685 - } 686 671 687 672 /* Helper function to handle the reception of an HEARTBEAT ACK. */ 688 673 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, ··· 739 742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 740 743 741 744 /* Update the heartbeat timer. */ 742 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 743 - sctp_transport_hold(t); 745 + sctp_transport_reset_hb_timer(t); 744 746 745 747 if (was_unconfirmed && asoc->peer.transport_count == 1) 746 748 sctp_transport_immediate_rtx(t); ··· 1610 1614 1611 1615 case SCTP_CMD_HB_TIMER_UPDATE: 1612 1616 t = cmd->obj.transport; 1613 - sctp_cmd_hb_timer_update(commands, t); 1617 + sctp_transport_reset_hb_timer(t); 1614 1618 break; 1615 1619 1616 1620 case SCTP_CMD_HB_TIMERS_STOP:
+13 -6
net/sctp/transport.c
··· 183 183 /* Start T3_rtx timer if it is not already running and update the heartbeat 184 184 * timer. This routine is called every time a DATA chunk is sent. 185 185 */ 186 - void sctp_transport_reset_timers(struct sctp_transport *transport) 186 + void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) 187 187 { 188 188 /* RFC 2960 6.3.2 Retransmission Timer Rules 189 189 * ··· 197 197 if (!mod_timer(&transport->T3_rtx_timer, 198 198 jiffies + transport->rto)) 199 199 sctp_transport_hold(transport); 200 + } 201 + 202 + void sctp_transport_reset_hb_timer(struct sctp_transport *transport) 203 + { 204 + unsigned long expires; 200 205 201 206 /* When a data chunk is sent, reset the heartbeat interval. */ 202 - if (!mod_timer(&transport->hb_timer, 203 - sctp_transport_timeout(transport))) 204 - sctp_transport_hold(transport); 207 + expires = jiffies + sctp_transport_timeout(transport); 208 + if (time_before(transport->hb_timer.expires, expires) && 209 + !mod_timer(&transport->hb_timer, 210 + expires + prandom_u32_max(transport->rto))) 211 + sctp_transport_hold(transport); 205 212 } 206 213 207 214 /* This transport has been assigned to an association. ··· 602 595 unsigned long sctp_transport_timeout(struct sctp_transport *trans) 603 596 { 604 597 /* RTO + timer slack +/- 50% of RTO */ 605 - unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 598 + unsigned long timeout = trans->rto >> 1; 606 599 607 600 if (trans->state != SCTP_UNCONFIRMED && 608 601 trans->state != SCTP_PF) 609 602 timeout += trans->hbinterval; 610 603 611 - return timeout + jiffies; 604 + return timeout; 612 605 } 613 606 614 607 /* Reset transport variables to their initial values */
+6
net/switchdev/switchdev.c
··· 305 305 if (err && err != -EOPNOTSUPP) 306 306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 307 307 err, attr->id); 308 + if (attr->complete) 309 + attr->complete(dev, err, attr->complete_priv); 308 310 } 309 311 310 312 static int switchdev_port_attr_set_defer(struct net_device *dev, ··· 436 434 if (err && err != -EOPNOTSUPP) 437 435 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 438 436 err, obj->id); 437 + if (obj->complete) 438 + obj->complete(dev, err, obj->complete_priv); 439 439 } 440 440 441 441 static int switchdev_port_obj_add_defer(struct net_device *dev, ··· 506 502 if (err && err != -EOPNOTSUPP) 507 503 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 508 504 err, obj->id); 505 + if (obj->complete) 506 + obj->complete(dev, err, obj->complete_priv); 509 507 } 510 508 511 509 static int switchdev_port_obj_del_defer(struct net_device *dev,
+1
net/tipc/core.c
··· 69 69 if (err) 70 70 goto out_nametbl; 71 71 72 + INIT_LIST_HEAD(&tn->dist_queue); 72 73 err = tipc_topsrv_start(net); 73 74 if (err) 74 75 goto out_subscr;
+3
net/tipc/core.h
··· 103 103 spinlock_t nametbl_lock; 104 104 struct name_table *nametbl; 105 105 106 + /* Name dist queue */ 107 + struct list_head dist_queue; 108 + 106 109 /* Topology subscription server */ 107 110 struct tipc_server *topsrv; 108 111 atomic_t subscription_count;
+26 -9
net/tipc/name_distr.c
··· 40 40 41 41 int sysctl_tipc_named_timeout __read_mostly = 2000; 42 42 43 - /** 44 - * struct tipc_dist_queue - queue holding deferred name table updates 45 - */ 46 - static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); 47 - 48 43 struct distr_queue_item { 49 44 struct distr_item i; 50 45 u32 dtype; ··· 224 229 kfree_rcu(p, rcu); 225 230 } 226 231 232 + /** 233 + * tipc_dist_queue_purge - remove deferred updates from a node that went down 234 + */ 235 + static void tipc_dist_queue_purge(struct net *net, u32 addr) 236 + { 237 + struct tipc_net *tn = net_generic(net, tipc_net_id); 238 + struct distr_queue_item *e, *tmp; 239 + 240 + spin_lock_bh(&tn->nametbl_lock); 241 + list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { 242 + if (e->node != addr) 243 + continue; 244 + list_del(&e->next); 245 + kfree(e); 246 + } 247 + spin_unlock_bh(&tn->nametbl_lock); 248 + } 249 + 227 250 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 228 251 { 229 252 struct publication *publ, *tmp; 230 253 231 254 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 232 255 tipc_publ_purge(net, publ, addr); 256 + tipc_dist_queue_purge(net, addr); 233 257 } 234 258 235 259 /** ··· 293 279 * tipc_named_add_backlog - add a failed name table update to the backlog 294 280 * 295 281 */ 296 - static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 282 + static void tipc_named_add_backlog(struct net *net, struct distr_item *i, 283 + u32 type, u32 node) 297 284 { 298 285 struct distr_queue_item *e; 286 + struct tipc_net *tn = net_generic(net, tipc_net_id); 299 287 unsigned long now = get_jiffies_64(); 300 288 301 289 e = kzalloc(sizeof(*e), GFP_ATOMIC); ··· 307 291 e->node = node; 308 292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 309 293 memcpy(e, i, sizeof(*i)); 310 - list_add_tail(&e->next, &tipc_dist_queue); 294 + list_add_tail(&e->next, &tn->dist_queue); 311 295 } 312 296 313 297 /** ··· 317 301 void tipc_named_process_backlog(struct net *net) 318 302 { 319 303 struct distr_queue_item *e, *tmp; 304 + struct tipc_net *tn = net_generic(net, tipc_net_id); 320 305 char addr[16]; 321 306 unsigned long now = get_jiffies_64(); 322 307 323 - list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 308 + list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { 324 309 if (time_after(e->expires, now)) { 325 310 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 311 continue; ··· 361 344 node = msg_orignode(msg); 362 345 while (count--) { 363 346 if (!tipc_update_nametbl(net, item, node, mtype)) 364 - tipc_named_add_backlog(item, mtype, node); 347 + tipc_named_add_backlog(net, item, mtype, node); 365 348 item++; 366 349 } 367 350 kfree_skb(skb);
+2 -5
net/vmw_vsock/vmci_transport.c
··· 1735 1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1736 1736 err = 0; 1737 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1738 - if (err) 1739 - return err; 1740 - 1741 1738 if (!skb) 1742 - return -EAGAIN; 1739 + return err; 1743 1740 1744 1741 dg = (struct vmci_datagram *)skb->data; 1745 1742 if (!dg) ··· 2151 2154 2152 2155 MODULE_AUTHOR("VMware, Inc."); 2153 2156 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2154 - MODULE_VERSION("1.0.3.0-k"); 2157 + MODULE_VERSION("1.0.4.0-k"); 2155 2158 MODULE_LICENSE("GPL v2"); 2156 2159 MODULE_ALIAS("vmware_vsock"); 2157 2160 MODULE_ALIAS_NETPROTO(PF_VSOCK);
+1 -1
net/wireless/nl80211.c
··· 13216 13216 struct wireless_dev *wdev; 13217 13217 struct cfg80211_beacon_registration *reg, *tmp; 13218 13218 13219 - if (state != NETLINK_URELEASE) 13219 + if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) 13220 13220 return NOTIFY_DONE; 13221 13221 13222 13222 rcu_read_lock();
+2 -3
sound/hda/ext/hdac_ext_stream.c
··· 104 104 */ 105 105 void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) 106 106 { 107 - struct hdac_stream *s; 107 + struct hdac_stream *s, *_s; 108 108 struct hdac_ext_stream *stream; 109 109 struct hdac_bus *bus = ebus_to_hbus(ebus); 110 110 111 - while (!list_empty(&bus->stream_list)) { 112 - s = list_first_entry(&bus->stream_list, struct hdac_stream, list); 111 + list_for_each_entry_safe(s, _s, &bus->stream_list, list) { 113 112 stream = stream_to_hdac_ext_stream(s); 114 113 snd_hdac_ext_stream_decouple(ebus, stream, false); 115 114 list_del(&s->list);
+4 -6
sound/hda/hdac_device.c
··· 299 299 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid, 300 300 int parm) 301 301 { 302 - int val; 302 + unsigned int cmd, val; 303 303 304 - if (codec->regmap) 305 - regcache_cache_bypass(codec->regmap, true); 306 - val = snd_hdac_read_parm(codec, nid, parm); 307 - if (codec->regmap) 308 - regcache_cache_bypass(codec->regmap, false); 304 + cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm; 305 + if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0) 306 + return -1; 309 307 return val; 310 308 } 311 309 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
+50 -10
sound/hda/hdac_i915.c
··· 20 20 #include <sound/core.h> 21 21 #include <sound/hdaudio.h> 22 22 #include <sound/hda_i915.h> 23 + #include <sound/hda_register.h> 23 24 24 25 static struct i915_audio_component *hdac_acomp; 25 26 ··· 98 97 } 99 98 EXPORT_SYMBOL_GPL(snd_hdac_display_power); 100 99 100 + #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ 101 + ((pci)->device == 0x0c0c) || \ 102 + ((pci)->device == 0x0d0c) || \ 103 + ((pci)->device == 0x160c)) 104 + 101 105 /** 102 - * snd_hdac_get_display_clk - Get CDCLK in kHz 106 + * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW 103 107 * @bus: HDA core bus 104 108 * 105 - * This function is supposed to be used only by a HD-audio controller 106 - * driver that needs the interaction with i915 graphics. 109 + * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK 110 + * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) 111 + * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: 112 + * BCLK = CDCLK * M / N 113 + * The values will be lost when the display power well is disabled and need to 114 + * be restored to avoid abnormal playback speed. 107 115 * 108 - * This function queries CDCLK value in kHz from the graphics driver and 109 - * returns the value. A negative code is returned in error. 116 + * Call this function at initializing and changing power well, as well as 117 + * at ELD notifier for the hotplug. 110 118 */ 111 - int snd_hdac_get_display_clk(struct hdac_bus *bus) 119 + void snd_hdac_i915_set_bclk(struct hdac_bus *bus) 112 120 { 113 121 struct i915_audio_component *acomp = bus->audio_component; 122 + struct pci_dev *pci = to_pci_dev(bus->dev); 123 + int cdclk_freq; 124 + unsigned int bclk_m, bclk_n; 114 125 115 - if (!acomp || !acomp->ops) 116 - return -ENODEV; 126 + if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq) 127 + return; /* only for i915 binding */ 128 + if (!CONTROLLER_IN_GPU(pci)) 129 + return; /* only HSW/BDW */ 117 130 118 - return acomp->ops->get_cdclk_freq(acomp->dev); 131 + cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev); 132 + switch (cdclk_freq) { 133 + case 337500: 134 + bclk_m = 16; 135 + bclk_n = 225; 136 + break; 137 + 138 + case 450000: 139 + default: /* default CDCLK 450MHz */ 140 + bclk_m = 4; 141 + bclk_n = 75; 142 + break; 143 + 144 + case 540000: 145 + bclk_m = 4; 146 + bclk_n = 90; 147 + break; 148 + 149 + case 675000: 150 + bclk_m = 8; 151 + bclk_n = 225; 152 + break; 153 + } 154 + 155 + snd_hdac_chip_writew(bus, HSW_EM4, bclk_m); 156 + snd_hdac_chip_writew(bus, HSW_EM5, bclk_n); 119 157 } 120 - EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); 158 + EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); 121 159 122 160 /* There is a fixed mapping between audio pin node and display port 123 161 * on current Intel platforms:
+28 -12
sound/hda/hdac_regmap.c
··· 453 453 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); 454 454 455 455 static int reg_raw_read(struct hdac_device *codec, unsigned int reg, 456 - unsigned int *val) 456 + unsigned int *val, bool uncached) 457 457 { 458 - if (!codec->regmap) 458 + if (uncached || !codec->regmap) 459 459 return hda_reg_read(codec, reg, val); 460 460 else 461 461 return regmap_read(codec->regmap, reg, val); 462 + } 463 + 464 + static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, 465 + unsigned int reg, unsigned int *val, 466 + bool uncached) 467 + { 468 + int err; 469 + 470 + err = reg_raw_read(codec, reg, val, uncached); 471 + if (err == -EAGAIN) { 472 + err = snd_hdac_power_up_pm(codec); 473 + if (!err) 474 + err = reg_raw_read(codec, reg, val, uncached); 475 + snd_hdac_power_down_pm(codec); 476 + } 477 + return err; 462 478 } 463 479 464 480 /** ··· 488 472 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 489 473 unsigned int *val) 490 474 { 491 - int err; 492 - 493 - err = reg_raw_read(codec, reg, val); 494 - if (err == -EAGAIN) { 495 - err = snd_hdac_power_up_pm(codec); 496 - if (!err) 497 - err = reg_raw_read(codec, reg, val); 498 - snd_hdac_power_down_pm(codec); 499 - } 500 - return err; 475 + return __snd_hdac_regmap_read_raw(codec, reg, val, false); 501 476 } 502 477 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw); 478 + 479 + /* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the 480 + * cache but always via hda verbs. 481 + */ 482 + int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, 483 + unsigned int reg, unsigned int *val) 484 + { 485 + return __snd_hdac_regmap_read_raw(codec, reg, val, true); 486 + } 503 487 504 488 /** 505 489 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
+4 -2
sound/pci/hda/hda_generic.c
··· 826 826 bool allow_powerdown) 827 827 { 828 828 hda_nid_t nid, changed = 0; 829 - int i, state; 829 + int i, state, power; 830 830 831 831 for (i = 0; i < path->depth; i++) { 832 832 nid = path->path[i]; ··· 838 838 state = AC_PWRST_D0; 839 839 else 840 840 state = AC_PWRST_D3; 841 - if (!snd_hda_check_power_state(codec, nid, state)) { 841 + power = snd_hda_codec_read(codec, nid, 0, 842 + AC_VERB_GET_POWER_STATE, 0); 843 + if (power != (state | (state << 4))) { 842 844 snd_hda_codec_write(codec, nid, 0, 843 845 AC_VERB_SET_POWER_STATE, state); 844 846 changed = nid;
+7 -52
sound/pci/hda/hda_intel.c
··· 857 857 #define azx_del_card_list(chip) /* NOP */ 858 858 #endif /* CONFIG_PM */ 859 859 860 - /* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK 861 - * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) 862 - * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: 863 - * BCLK = CDCLK * M / N 864 - * The values will be lost when the display power well is disabled and need to 865 - * be restored to avoid abnormal playback speed. 866 - */ 867 - static void haswell_set_bclk(struct hda_intel *hda) 868 - { 869 - struct azx *chip = &hda->chip; 870 - int cdclk_freq; 871 - unsigned int bclk_m, bclk_n; 872 - 873 - if (!hda->need_i915_power) 874 - return; 875 - 876 - cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip)); 877 - switch (cdclk_freq) { 878 - case 337500: 879 - bclk_m = 16; 880 - bclk_n = 225; 881 - break; 882 - 883 - case 450000: 884 - default: /* default CDCLK 450MHz */ 885 - bclk_m = 4; 886 - bclk_n = 75; 887 - break; 888 - 889 - case 540000: 890 - bclk_m = 4; 891 - bclk_n = 90; 892 - break; 893 - 894 - case 675000: 895 - bclk_m = 8; 896 - bclk_n = 225; 897 - break; 898 - } 899 - 900 - azx_writew(chip, HSW_EM4, bclk_m); 901 - azx_writew(chip, HSW_EM5, bclk_n); 902 - } 903 - 904 860 #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 905 861 /* 906 862 * power management ··· 914 958 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 915 959 && hda->need_i915_power) { 916 960 snd_hdac_display_power(azx_bus(chip), true); 917 - haswell_set_bclk(hda); 961 + snd_hdac_i915_set_bclk(azx_bus(chip)); 918 962 } 919 963 if (chip->msi) 920 964 if (pci_enable_msi(pci) < 0) ··· 1014 1058 bus = azx_bus(chip); 1015 1059 if (hda->need_i915_power) { 1016 1060 snd_hdac_display_power(bus, true); 1017 - haswell_set_bclk(hda); 1061 + snd_hdac_i915_set_bclk(bus); 1018 1062 } else { 1019 1063 /* toggle codec wakeup bit for STATESTS read */ 1020 1064 snd_hdac_set_codec_wakeup(bus, true); ··· 1752 1796 /* initialize chip */ 1753 1797 azx_init_pci(chip); 1754 1798 1755 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1756 - struct hda_intel *hda; 1757 - 1758 - hda = container_of(chip, struct hda_intel, chip); 1759 - haswell_set_bclk(hda); 1760 - } 1799 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 1800 + snd_hdac_i915_set_bclk(bus); 1761 1801 1762 1802 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); 1763 1803 ··· 2183 2231 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2184 2232 /* Broxton-P(Apollolake) */ 2185 2233 { PCI_DEVICE(0x8086, 0x5a98), 2234 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2235 + /* Broxton-T */ 2236 + { PCI_DEVICE(0x8086, 0x1a98), 2186 2237 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2187 2238 /* Haswell */ 2188 2239 { PCI_DEVICE(0x8086, 0x0a0c),
+14
sound/pci/hda/patch_cirrus.c
··· 361 361 { 362 362 struct cs_spec *spec = codec->spec; 363 363 int err; 364 + int i; 364 365 365 366 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0); 366 367 if (err < 0) ··· 370 369 err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); 371 370 if (err < 0) 372 371 return err; 372 + 373 + /* keep the ADCs powered up when it's dynamically switchable */ 374 + if (spec->gen.dyn_adc_switch) { 375 + unsigned int done = 0; 376 + for (i = 0; i < spec->gen.input_mux.num_items; i++) { 377 + int idx = spec->gen.dyn_adc_idx[i]; 378 + if (done & (1 << idx)) 379 + continue; 380 + snd_hda_gen_fix_pin_power(codec, 381 + spec->gen.adc_nids[idx]); 382 + done |= 1 << idx; 383 + } 384 + } 373 385 374 386 return 0; 375 387 }
+3
sound/pci/hda/patch_hdmi.c
··· 1858 1858 struct hdmi_spec *spec = codec->spec; 1859 1859 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx); 1860 1860 1861 + if (!per_pin) 1862 + return; 1861 1863 mutex_lock(&per_pin->lock); 1862 1864 per_pin->chmap_set = true; 1863 1865 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap)); ··· 2232 2230 if (atomic_read(&(codec)->core.in_pm)) 2233 2231 return; 2234 2232 2233 + snd_hdac_i915_set_bclk(&codec->bus->core); 2235 2234 check_presence_and_report(codec, pin_nid); 2236 2235 } 2237 2236
+2
sound/pci/hda/patch_realtek.c
··· 5449 5449 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5450 5450 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5451 5451 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5452 + SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5452 5453 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), 5453 5454 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5454 5455 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), ··· 5584 5583 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5585 5584 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5586 5585 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5586 + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5587 5587 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5588 5588 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5589 5589 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+1
sound/pci/pcxhr/pcxhr_core.c
··· 1341 1341 } 1342 1342 1343 1343 pcxhr_msg_thread(mgr); 1344 + mutex_unlock(&mgr->lock); 1344 1345 return IRQ_HANDLED; 1345 1346 }
+1
sound/soc/codecs/Kconfig
··· 629 629 630 630 config SND_SOC_RT5616 631 631 tristate "Realtek RT5616 CODEC" 632 + depends on I2C 632 633 633 634 config SND_SOC_RT5631 634 635 tristate "Realtek ALC5631/RT5631 CODEC"
+12
sound/soc/codecs/arizona.c
··· 249 249 } 250 250 EXPORT_SYMBOL_GPL(arizona_init_spk); 251 251 252 + int arizona_free_spk(struct snd_soc_codec *codec) 253 + { 254 + struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); 255 + struct arizona *arizona = priv->arizona; 256 + 257 + arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona); 258 + arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona); 259 + 260 + return 0; 261 + } 262 + EXPORT_SYMBOL_GPL(arizona_free_spk); 263 + 252 264 static const struct snd_soc_dapm_route arizona_mono_routes[] = { 253 265 { "OUT1R", NULL, "OUT1L" }, 254 266 { "OUT2R", NULL, "OUT2L" },
+2
sound/soc/codecs/arizona.h
··· 307 307 extern int arizona_init_gpio(struct snd_soc_codec *codec); 308 308 extern int arizona_init_mono(struct snd_soc_codec *codec); 309 309 310 + extern int arizona_free_spk(struct snd_soc_codec *codec); 311 + 310 312 extern int arizona_init_dai(struct arizona_priv *priv, int dai); 311 313 312 314 int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
+13 -4
sound/soc/codecs/cs35l32.c
··· 274 274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) 275 275 pdata->sdout_share = val; 276 276 277 - of_property_read_u32(np, "cirrus,boost-manager", &val); 277 + if (of_property_read_u32(np, "cirrus,boost-manager", &val)) 278 + val = -1u; 279 + 278 280 switch (val) { 279 281 case CS35L32_BOOST_MGR_AUTO: 280 282 case CS35L32_BOOST_MGR_AUTO_AUDIO: ··· 284 282 case CS35L32_BOOST_MGR_FIXED: 285 283 pdata->boost_mng = val; 286 284 break; 285 + case -1u: 287 286 default: 288 287 dev_err(&i2c_client->dev, 289 288 "Wrong cirrus,boost-manager DT value %d\n", val); 290 289 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; 291 290 } 292 291 293 - of_property_read_u32(np, "cirrus,sdout-datacfg", &val); 292 + if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val)) 293 + val = -1u; 294 294 switch (val) { 295 295 case CS35L32_DATA_CFG_LR_VP: 296 296 case CS35L32_DATA_CFG_LR_STAT: ··· 300 296 case CS35L32_DATA_CFG_LR_VPSTAT: 301 297 pdata->sdout_datacfg = val; 302 298 break; 299 + case -1u: 303 300 default: 304 301 dev_err(&i2c_client->dev, 305 302 "Wrong cirrus,sdout-datacfg DT value %d\n", val); 306 303 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; 307 304 } 308 305 309 - of_property_read_u32(np, "cirrus,battery-threshold", &val); 306 + if (of_property_read_u32(np, "cirrus,battery-threshold", &val)) 307 + val = -1u; 310 308 switch (val) { 311 309 case CS35L32_BATT_THRESH_3_1V: 312 310 case CS35L32_BATT_THRESH_3_2V: ··· 316 310 case CS35L32_BATT_THRESH_3_4V: 317 311 pdata->batt_thresh = val; 318 312 break; 313 + case -1u: 319 314 default: 320 315 dev_err(&i2c_client->dev, 321 316 "Wrong cirrus,battery-threshold DT value %d\n", val); 322 317 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; 323 318 } 324 319 325 - of_property_read_u32(np, "cirrus,battery-recovery", &val); 320 + if (of_property_read_u32(np, "cirrus,battery-recovery", &val)) 321 + val = -1u; 326 322 switch (val) { 327 323 case CS35L32_BATT_RECOV_3_1V: 328 324 case CS35L32_BATT_RECOV_3_2V: ··· 334 326 case CS35L32_BATT_RECOV_3_6V: 335 327 pdata->batt_recov = val; 336 328 break; 329 + case -1u: 337 330 default: 338 331 dev_err(&i2c_client->dev, 339 332 "Wrong cirrus,battery-recovery DT value %d\n", val);
+3
sound/soc/codecs/cs47l24.c
··· 1108 1108 priv->core.arizona->dapm = NULL; 1109 1109 1110 1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 1111 + 1112 + arizona_free_spk(codec); 1113 + 1111 1114 return 0; 1112 1115 } 1113 1116
+42 -52
sound/soc/codecs/hdac_hdmi.c
··· 1420 1420 } 1421 1421 1422 1422 #ifdef CONFIG_PM 1423 - static int hdmi_codec_resume(struct snd_soc_codec *codec) 1423 + static int hdmi_codec_prepare(struct device *dev) 1424 1424 { 1425 - struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec); 1425 + struct hdac_ext_device *edev = to_hda_ext_device(dev); 1426 + struct hdac_device *hdac = &edev->hdac; 1427 + 1428 + pm_runtime_get_sync(&edev->hdac.dev); 1429 + 1430 + /* 1431 + * Power down afg. 1432 + * codec_read is preferred over codec_write to set the power state. 1433 + * This way verb is send to set the power state and response 1434 + * is received. So setting power state is ensured without using loop 1435 + * to read the state. 1436 + */ 1437 + snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, 1438 + AC_PWRST_D3); 1439 + 1440 + return 0; 1441 + } 1442 + 1443 + static void hdmi_codec_complete(struct device *dev) 1444 + { 1445 + struct hdac_ext_device *edev = to_hda_ext_device(dev); 1426 1446 struct hdac_hdmi_priv *hdmi = edev->private_data; 1427 1447 struct hdac_hdmi_pin *pin; 1428 1448 struct hdac_device *hdac = &edev->hdac; 1429 - struct hdac_bus *bus = hdac->bus; 1430 - int err; 1431 - unsigned long timeout; 1449 + 1450 + /* Power up afg */ 1451 + snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, 1452 + AC_PWRST_D0); 1432 1453 1433 1454 hdac_hdmi_skl_enable_all_pins(&edev->hdac); 1434 1455 hdac_hdmi_skl_enable_dp12(&edev->hdac); 1435 - 1436 - /* Power up afg */ 1437 - if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) { 1438 - 1439 - snd_hdac_codec_write(hdac, hdac->afg, 0, 1440 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 1441 - 1442 - /* Wait till power state is set to D0 */ 1443 - timeout = jiffies + msecs_to_jiffies(1000); 1444 - while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0) 1445 - && time_before(jiffies, timeout)) { 1446 - msleep(50); 1447 - } 1448 - } 1449 1456 1450 1457 /* 1451 1458 * As the ELD notify callback request is not entertained while the ··· 1462 1455 list_for_each_entry(pin, &hdmi->pin_list, head) 1463 1456 hdac_hdmi_present_sense(pin, 1); 1464 1457 1465 - /* 1466 - * Codec power is turned ON during controller resume. 1467 - * Turn it OFF here 1468 - */ 1469 - err = snd_hdac_display_power(bus, false); 1470 - if (err < 0) { 1471 - dev_err(bus->dev, 1472 - "Cannot turn OFF display power on i915, err: %d\n", 1473 - err); 1474 - return err; 1475 - } 1476 - 1477 - return 0; 1458 + pm_runtime_put_sync(&edev->hdac.dev); 1478 1459 } 1479 1460 #else 1480 - #define hdmi_codec_resume NULL 1461 + #define hdmi_codec_prepare NULL 1462 + #define hdmi_codec_complete NULL 1481 1463 #endif 1482 1464 1483 1465 static struct snd_soc_codec_driver hdmi_hda_codec = { 1484 1466 .probe = hdmi_codec_probe, 1485 1467 .remove = hdmi_codec_remove, 1486 - .resume = hdmi_codec_resume, 1487 1468 .idle_bias_off = true, 1488 1469 }; 1489 1470 ··· 1556 1561 struct hdac_ext_device *edev = to_hda_ext_device(dev); 1557 1562 struct hdac_device *hdac = &edev->hdac; 1558 1563 struct hdac_bus *bus = hdac->bus; 1559 - unsigned long timeout; 1560 1564 int err; 1561 1565 1562 1566 dev_dbg(dev, "Enter: %s\n", __func__); ··· 1564 1570 if (!bus) 1565 1571 return 0; 1566 1572 1567 - /* Power down afg */ 1568 - if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) { 1569 - snd_hdac_codec_write(hdac, hdac->afg, 0, 1570 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 1571 - 1572 - /* Wait till power state is set to D3 */ 1573 - timeout = jiffies + msecs_to_jiffies(1000); 1574 - while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3) 1575 - && time_before(jiffies, timeout)) { 1576 - 1577 - msleep(50); 1578 - } 1579 - } 1580 - 1573 + /* 1574 + * Power down afg. 1575 + * codec_read is preferred over codec_write to set the power state. 1576 + * This way verb is send to set the power state and response 1577 + * is received. So setting power state is ensured without using loop 1578 + * to read the state. 1579 + */ 1580 + snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, 1581 + AC_PWRST_D3); 1581 1582 err = snd_hdac_display_power(bus, false); 1582 1583 if (err < 0) { 1583 1584 dev_err(bus->dev, "Cannot turn on display power on i915\n"); ··· 1605 1616 hdac_hdmi_skl_enable_dp12(&edev->hdac); 1606 1617 1607 1618 /* Power up afg */ 1608 - if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) 1609 - snd_hdac_codec_write(hdac, hdac->afg, 0, 1610 - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 1619 + snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, 1620 + AC_PWRST_D0); 1611 1621 1612 1622 return 0; 1613 1623 } ··· 1617 1629 1618 1630 static const struct dev_pm_ops hdac_hdmi_pm = { 1619 1631 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 1632 + .prepare = hdmi_codec_prepare, 1633 + .complete = hdmi_codec_complete, 1620 1634 }; 1621 1635 1622 1636 static const struct hda_device_id hdmi_list[] = {
+71 -55
sound/soc/codecs/nau8825.c
··· 343 343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, 344 344 0), 345 345 346 - /* ADC for button press detection */ 347 - SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL, 348 - NAU8825_SAR_ADC_EN_SFT, 0), 346 + /* ADC for button press detection. A dapm supply widget is used to 347 + * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON 348 + * during suspend. 349 + */ 350 + SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL, 351 + NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0), 349 352 350 353 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), 351 354 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), ··· 610 607 611 608 static void nau8825_restart_jack_detection(struct regmap *regmap) 612 609 { 610 + /* Chip needs one FSCLK cycle in order to generate interrupts, 611 + * as we cannot guarantee one will be provided by the system. Turning 612 + * master mode on then off enables us to generate that FSCLK cycle 613 + * with a minimum of contention on the clock bus. 614 + */ 615 + regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, 616 + NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER); 617 + regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, 618 + NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE); 619 + 613 620 /* this will restart the entire jack detection process including MIC/GND 614 621 * switching and create interrupts. We have to go from 0 to 1 and back 615 622 * to 0 to restart. ··· 741 728 struct regmap *regmap = nau8825->regmap; 742 729 int active_irq, clear_irq = 0, event = 0, event_mask = 0; 743 730 744 - regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq); 731 + if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) { 732 + dev_err(nau8825->dev, "failed to read irq status\n"); 733 + return IRQ_NONE; 734 + } 745 735 746 736 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == 747 737 NAU8825_JACK_EJECTION_DETECTED) { ··· 1157 1141 return ret; 1158 1142 } 1159 1143 } 1160 - 1161 - ret = regcache_sync(nau8825->regmap); 1162 - if (ret) { 1163 - dev_err(codec->dev, 1164 - "Failed to sync cache: %d\n", ret); 1165 - return ret; 1166 - } 1167 1144 } 1168 - 1169 1145 break; 1170 1146 1171 1147 case SND_SOC_BIAS_OFF: 1172 1148 if (nau8825->mclk_freq) 1173 1149 clk_disable_unprepare(nau8825->mclk); 1174 - 1175 - regcache_mark_dirty(nau8825->regmap); 1176 1150 break; 1177 1151 } 1178 1152 return 0; 1179 1153 } 1154 + 1155 + #ifdef CONFIG_PM 1156 + static int nau8825_suspend(struct snd_soc_codec *codec) 1157 + { 1158 + struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); 1159 + 1160 + disable_irq(nau8825->irq); 1161 + regcache_cache_only(nau8825->regmap, true); 1162 + regcache_mark_dirty(nau8825->regmap); 1163 + 1164 + return 0; 1165 + } 1166 + 1167 + static int nau8825_resume(struct snd_soc_codec *codec) 1168 + { 1169 + struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); 1170 + 1171 + /* The chip may lose power and reset in S3. regcache_sync restores 1172 + * register values including configurations for sysclk, irq, and 1173 + * jack/button detection. 1174 + */ 1175 + regcache_cache_only(nau8825->regmap, false); 1176 + regcache_sync(nau8825->regmap); 1177 + 1178 + /* Check the jack plug status directly. If the headset is unplugged 1179 + * during S3 when the chip has no power, there will be no jack 1180 + * detection irq even after the nau8825_restart_jack_detection below, 1181 + * because the chip just thinks no headset has ever been plugged in. 1182 + */ 1183 + if (!nau8825_is_jack_inserted(nau8825->regmap)) { 1184 + nau8825_eject_jack(nau8825); 1185 + snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET); 1186 + } 1187 + 1188 + enable_irq(nau8825->irq); 1189 + 1190 + /* Run jack detection to check the type (OMTP or CTIA) of the headset 1191 + * if there is one. This handles the case where a different type of 1192 + * headset is plugged in during S3. This triggers an IRQ iff a headset 1193 + * is already plugged in. 1194 + */ 1195 + nau8825_restart_jack_detection(nau8825->regmap); 1196 + 1197 + return 0; 1198 + } 1199 + #else 1200 + #define nau8825_suspend NULL 1201 + #define nau8825_resume NULL 1202 + #endif 1180 1203 1181 1204 static struct snd_soc_codec_driver nau8825_codec_driver = { 1182 1205 .probe = nau8825_codec_probe, ··· 1223 1168 .set_pll = nau8825_set_pll, 1224 1169 .set_bias_level = nau8825_set_bias_level, 1225 1170 .suspend_bias_off = true, 1171 + .suspend = nau8825_suspend, 1172 + .resume = nau8825_resume, 1226 1173 1227 1174 .controls = nau8825_controls, 1228 1175 .num_controls = ARRAY_SIZE(nau8825_controls), ··· 1334 1277 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, 1335 1278 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); 1336 1279 1337 - /* Chip needs one FSCLK cycle in order to generate interrupts, 1338 - * as we cannot guarantee one will be provided by the system. Turning 1339 - * master mode on then off enables us to generate that FSCLK cycle 1340 - * with a minimum of contention on the clock bus. 1341 - */ 1342 - regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, 1343 - NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER); 1344 - regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, 1345 - NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE); 1346 - 1347 1280 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, 1348 1281 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1349 1282 "nau8825", nau8825); ··· 1401 1354 return 0; 1402 1355 } 1403 1356 1404 - #ifdef CONFIG_PM_SLEEP 1405 - static int nau8825_suspend(struct device *dev) 1406 - { 1407 - struct i2c_client *client = to_i2c_client(dev); 1408 - struct nau8825 *nau8825 = dev_get_drvdata(dev); 1409 - 1410 - disable_irq(client->irq); 1411 - regcache_cache_only(nau8825->regmap, true); 1412 - regcache_mark_dirty(nau8825->regmap); 1413 - 1414 - return 0; 1415 - } 1416 - 1417 - static int nau8825_resume(struct device *dev) 1418 - { 1419 - struct i2c_client *client = to_i2c_client(dev); 1420 - struct nau8825 *nau8825 = dev_get_drvdata(dev); 1421 - 1422 - regcache_cache_only(nau8825->regmap, false); 1423 - regcache_sync(nau8825->regmap); 1424 - enable_irq(client->irq); 1425 - 1426 - return 0; 1427 - } 1428 - #endif 1429 - 1430 - static const struct dev_pm_ops nau8825_pm = { 1431 - SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume) 1432 - }; 1433 - 1434 1357 static const struct i2c_device_id nau8825_i2c_ids[] = { 1435 1358 { "nau8825", 0 }, 1436 1359 { } ··· 1427 1410 .name = "nau8825", 1428 1411 .of_match_table = of_match_ptr(nau8825_of_ids), 1429 1412 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1430 - .pm = &nau8825_pm, 1431 1413 }, 1432 1414 .probe = nau8825_i2c_probe, 1433 1415 .remove = nau8825_i2c_remove,
+1 -1
sound/soc/codecs/rt5640.c
··· 359 359 360 360 /* Interface data select */ 361 361 static const char * const rt5640_data_select[] = { 362 - "Normal", "left copy to right", "right copy to left", "Swap"}; 362 + "Normal", "Swap", "left copy to right", "right copy to left"}; 363 363 364 364 static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, 365 365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+18 -18
sound/soc/codecs/rt5640.h
··· 443 443 #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) 444 444 #define RT5640_IF1_DAC_SEL_SFT 14 445 445 #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) 446 - #define RT5640_IF1_DAC_SEL_L2R (0x1 << 14) 447 - #define RT5640_IF1_DAC_SEL_R2L (0x2 << 14) 448 - #define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14) 446 + #define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14) 447 + #define RT5640_IF1_DAC_SEL_L2R (0x2 << 14) 448 + #define RT5640_IF1_DAC_SEL_R2L (0x3 << 14) 449 449 #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) 450 450 #define RT5640_IF1_ADC_SEL_SFT 12 451 451 #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) 452 - #define RT5640_IF1_ADC_SEL_L2R (0x1 << 12) 453 - #define RT5640_IF1_ADC_SEL_R2L (0x2 << 12) 454 - #define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12) 452 + #define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12) 453 + #define RT5640_IF1_ADC_SEL_L2R (0x2 << 12) 454 + #define RT5640_IF1_ADC_SEL_R2L (0x3 << 12) 455 455 #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) 456 456 #define RT5640_IF2_DAC_SEL_SFT 10 457 457 #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) 458 - #define RT5640_IF2_DAC_SEL_L2R (0x1 << 10) 459 - #define RT5640_IF2_DAC_SEL_R2L (0x2 << 10) 460 - #define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10) 458 + #define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10) 459 + #define RT5640_IF2_DAC_SEL_L2R (0x2 << 10) 460 + #define RT5640_IF2_DAC_SEL_R2L (0x3 << 10) 461 461 #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) 462 462 #define RT5640_IF2_ADC_SEL_SFT 8 463 463 #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) 464 - #define RT5640_IF2_ADC_SEL_L2R (0x1 << 8) 465 - #define RT5640_IF2_ADC_SEL_R2L (0x2 << 8) 466 - #define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8) 464 + #define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8) 465 + #define RT5640_IF2_ADC_SEL_L2R (0x2 << 8) 466 + #define RT5640_IF2_ADC_SEL_R2L (0x3 << 8) 467 467 #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) 468 468 #define RT5640_IF3_DAC_SEL_SFT 6 469 469 #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) 470 - #define RT5640_IF3_DAC_SEL_L2R (0x1 << 6) 471 - #define RT5640_IF3_DAC_SEL_R2L (0x2 << 6) 472 - #define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6) 470 + #define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6) 471 + #define RT5640_IF3_DAC_SEL_L2R (0x2 << 6) 472 + #define RT5640_IF3_DAC_SEL_R2L (0x3 << 6) 473 473 #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) 474 474 #define RT5640_IF3_ADC_SEL_SFT 4 475 475 #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) 476 - #define RT5640_IF3_ADC_SEL_L2R (0x1 << 4) 477 - #define RT5640_IF3_ADC_SEL_R2L (0x2 << 4) 478 - #define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4) 476 + #define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4) 477 + #define RT5640_IF3_ADC_SEL_L2R (0x2 << 4) 478 + #define RT5640_IF3_ADC_SEL_R2L (0x3 << 4) 479 479 480 480 /* REC Left Mixer Control 1 (0x3b) */ 481 481 #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
+5
sound/soc/codecs/wm5102.c
··· 1955 1955 static int wm5102_codec_remove(struct snd_soc_codec *codec) 1956 1956 { 1957 1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); 1958 + struct arizona *arizona = priv->core.arizona; 1958 1959 1959 1960 wm_adsp2_codec_remove(&priv->core.adsp[0], codec); 1960 1961 1961 1962 priv->core.arizona->dapm = NULL; 1963 + 1964 + arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 1965 + 1966 + arizona_free_spk(codec); 1962 1967 1963 1968 return 0; 1964 1969 }
+2
sound/soc/codecs/wm5110.c
··· 2298 2298 2299 2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 2300 2300 2301 + arizona_free_spk(codec); 2302 + 2301 2303 return 0; 2302 2304 } 2303 2305
+1 -1
sound/soc/codecs/wm8962.c
··· 2471 2471 break; 2472 2472 default: 2473 2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); 2474 - dspclk = wm8962->sysclk; 2474 + dspclk = wm8962->sysclk_rate; 2475 2475 } 2476 2476 2477 2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
+2
sound/soc/codecs/wm8997.c
··· 1072 1072 1073 1073 priv->core.arizona->dapm = NULL; 1074 1074 1075 + arizona_free_spk(codec); 1076 + 1075 1077 return 0; 1076 1078 } 1077 1079
+2
sound/soc/codecs/wm8998.c
··· 1324 1324 1325 1325 priv->core.arizona->dapm = NULL; 1326 1326 1327 + arizona_free_spk(codec); 1328 + 1327 1329 return 0; 1328 1330 } 1329 1331
-1
sound/soc/intel/Kconfig
··· 163 163 tristate 164 164 select SND_HDA_EXT_CORE 165 165 select SND_SOC_TOPOLOGY 166 - select SND_HDA_I915 167 166 select SND_SOC_INTEL_SST 168 167 169 168 config SND_SOC_INTEL_SKL_RT286_MACH
+1 -1
sound/soc/intel/haswell/sst-haswell-ipc.c
··· 1345 1345 return 0; 1346 1346 1347 1347 /* wait for pause to complete before we reset the stream */ 1348 - while (stream->running && tries--) 1348 + while (stream->running && --tries) 1349 1349 msleep(1); 1350 1350 if (!tries) { 1351 1351 dev_err(hsw->dev, "error: reset stream %d still running\n",
+5
sound/soc/intel/skylake/skl-sst-dsp.c
··· 336 336 skl_ipc_int_disable(dsp); 337 337 338 338 free_irq(dsp->irq, dsp); 339 + dsp->cl_dev.ops.cl_cleanup_controller(dsp); 340 + skl_cldma_int_disable(dsp); 341 + skl_ipc_op_int_disable(dsp); 342 + skl_ipc_int_disable(dsp); 343 + 339 344 skl_dsp_disable_core(dsp); 340 345 } 341 346 EXPORT_SYMBOL_GPL(skl_dsp_free);
+27 -13
sound/soc/intel/skylake/skl-topology.c
··· 239 239 { 240 240 int multiplier = 1; 241 241 struct skl_module_fmt *in_fmt, *out_fmt; 242 + int in_rate, out_rate; 242 243 243 244 244 245 /* Since fixups is applied to pin 0 only, ibs, obs needs ··· 250 249 251 250 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 252 251 multiplier = 5; 253 - mcfg->ibs = (in_fmt->s_freq / 1000) * 254 - (mcfg->in_fmt->channels) * 255 - (mcfg->in_fmt->bit_depth >> 3) * 256 - multiplier; 257 252 258 - mcfg->obs = (mcfg->out_fmt->s_freq / 1000) * 259 - (mcfg->out_fmt->channels) * 260 - (mcfg->out_fmt->bit_depth >> 3) * 261 - multiplier; 253 + if (in_fmt->s_freq % 1000) 254 + in_rate = (in_fmt->s_freq / 1000) + 1; 255 + else 256 + in_rate = (in_fmt->s_freq / 1000); 257 + 258 + mcfg->ibs = in_rate * (mcfg->in_fmt->channels) * 259 + (mcfg->in_fmt->bit_depth >> 3) * 260 + multiplier; 261 + 262 + if (mcfg->out_fmt->s_freq % 1000) 263 + out_rate = (mcfg->out_fmt->s_freq / 1000) + 1; 264 + else 265 + out_rate = (mcfg->out_fmt->s_freq / 1000); 266 + 267 + mcfg->obs = out_rate * (mcfg->out_fmt->channels) * 268 + (mcfg->out_fmt->bit_depth >> 3) * 269 + multiplier; 262 270 } 263 271 264 272 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, ··· 495 485 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 496 486 return -ENOMEM; 497 487 488 + skl_tplg_alloc_pipe_mcps(skl, mconfig); 489 + 498 490 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 499 491 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 500 492 mconfig->id.module_id, mconfig->guid); 501 493 if (ret < 0) 502 494 return ret; 495 + 496 + mconfig->m_state = SKL_MODULE_LOADED; 503 497 } 504 498 505 499 /* update blob if blob is null for be with default value */ ··· 523 509 ret = skl_tplg_set_module_params(w, ctx); 524 510 if (ret < 0) 525 511 return ret; 526 - skl_tplg_alloc_pipe_mcps(skl, mconfig); 527 512 } 528 513 529 514 return 0; ··· 537 524 list_for_each_entry(w_module, &pipe->w_list, node) { 538 525 mconfig = w_module->w->priv; 539 526 540 - if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod) 527 + if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod && 528 + mconfig->m_state > SKL_MODULE_UNINIT) 541 529 return ctx->dsp->fw_ops.unload_mod(ctx->dsp, 542 530 mconfig->id.module_id); 543 531 } ··· 571 557 572 558 if (!skl_is_pipe_mem_avail(skl, mconfig)) 573 559 return -ENOMEM; 560 + 561 + skl_tplg_alloc_pipe_mem(skl, mconfig); 562 + skl_tplg_alloc_pipe_mcps(skl, mconfig); 574 563 575 564 /* 576 565 * Create a list of modules for pipe. ··· 617 600 618 601 src_module = dst_module; 619 602 } 620 - 621 - skl_tplg_alloc_pipe_mem(skl, mconfig); 622 - skl_tplg_alloc_pipe_mcps(skl, mconfig); 623 603 624 604 return 0; 625 605 }
+4 -4
sound/soc/intel/skylake/skl-topology.h
··· 274 274 275 275 enum skl_module_state { 276 276 SKL_MODULE_UNINIT = 0, 277 - SKL_MODULE_INIT_DONE = 1, 278 - SKL_MODULE_LOADED = 2, 279 - SKL_MODULE_UNLOADED = 3, 280 - SKL_MODULE_BIND_DONE = 4 277 + SKL_MODULE_LOADED = 1, 278 + SKL_MODULE_INIT_DONE = 2, 279 + SKL_MODULE_BIND_DONE = 3, 280 + SKL_MODULE_UNLOADED = 4, 281 281 }; 282 282 283 283 struct skl_module_cfg {
+23 -9
sound/soc/intel/skylake/skl.c
··· 222 222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci); 223 223 struct skl *skl = ebus_to_skl(ebus); 224 224 struct hdac_bus *bus = ebus_to_hbus(ebus); 225 + int ret = 0; 225 226 226 227 /* 227 228 * Do not suspend if streams which are marked ignore suspend are ··· 233 232 enable_irq_wake(bus->irq); 234 233 pci_save_state(pci); 235 234 pci_disable_device(pci); 236 - return 0; 237 235 } else { 238 - return _skl_suspend(ebus); 236 + ret = _skl_suspend(ebus); 237 + if (ret < 0) 238 + return ret; 239 239 } 240 + 241 + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 242 + ret = snd_hdac_display_power(bus, false); 243 + if (ret < 0) 244 + dev_err(bus->dev, 245 + "Cannot turn OFF display power on i915\n"); 246 + } 247 + 248 + return ret; 240 249 } 241 250 242 251 static int skl_resume(struct device *dev) ··· 327 316 328 317 if (bus->irq >= 0) 329 318 free_irq(bus->irq, (void *)bus); 330 - if (bus->remap_addr) 331 - iounmap(bus->remap_addr); 332 - 333 319 snd_hdac_bus_free_stream_pages(bus); 334 320 snd_hdac_stream_free_all(ebus); 335 321 snd_hdac_link_free_all(ebus); 322 + 323 + if (bus->remap_addr) 324 + iounmap(bus->remap_addr); 325 + 336 326 pci_release_regions(skl->pci); 337 327 pci_disable_device(skl->pci); 338 328 339 329 snd_hdac_ext_bus_exit(ebus); 340 330 331 + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 332 + snd_hdac_i915_exit(&ebus->bus); 341 333 return 0; 342 334 } 343 335 ··· 733 719 if (skl->tplg) 734 720 release_firmware(skl->tplg); 735 721 736 - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 737 - snd_hdac_i915_exit(&ebus->bus); 738 - 739 722 if (pci_dev_run_wake(pci)) 740 723 pm_runtime_get_noresume(&pci->dev); 741 - pci_dev_put(pci); 724 + 725 + /* codec removal, invoke bus_device_remove */ 726 + snd_hdac_ext_bus_device_remove(ebus); 727 + 742 728 skl_platform_unregister(&pci->dev); 743 729 skl_free_dsp(skl); 744 730 skl_machine_device_unregister(skl);
+7
sound/soc/soc-dapm.c
··· 2188 2188 int count = 0; 2189 2189 char *state = "not set"; 2190 2190 2191 + /* card won't be set for the dummy component, as a spot fix 2192 + * we're checking for that case specifically here but in future 2193 + * we will ensure that the dummy component looks like others. 2194 + */ 2195 + if (!cmpnt->card) 2196 + return 0; 2197 + 2191 2198 list_for_each_entry(w, &cmpnt->card->widgets, list) { 2192 2199 if (w->dapm != dapm) 2193 2200 continue;
+29 -9
tools/objtool/Documentation/stack-validation.txt
··· 299 299 Errors in .c files 300 300 ------------------ 301 301 302 - If you're getting an objtool error in a compiled .c file, chances are 303 - the file uses an asm() statement which has a "call" instruction. An 304 - asm() statement with a call instruction must declare the use of the 305 - stack pointer in its output operand. For example, on x86_64: 302 + 1. c_file.o: warning: objtool: funcA() falls through to next function funcB() 306 303 307 - register void *__sp asm("rsp"); 308 - asm volatile("call func" : "+r" (__sp)); 304 + This means that funcA() doesn't end with a return instruction or an 305 + unconditional jump, and that objtool has determined that the function 306 + can fall through into the next function. There could be different 307 + reasons for this: 309 308 310 - Otherwise the stack frame may not get created before the call. 309 + 1) funcA()'s last instruction is a call to a "noreturn" function like 310 + panic(). In this case the noreturn function needs to be added to 311 + objtool's hard-coded global_noreturns array. Feel free to bug the 312 + objtool maintainer, or you can submit a patch. 311 313 312 - Another possible cause for errors in C code is if the Makefile removes 313 - -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 314 + 2) funcA() uses the unreachable() annotation in a section of code 315 + that is actually reachable. 316 + 317 + 3) If funcA() calls an inline function, the object code for funcA() 318 + might be corrupt due to a gcc bug. For more details, see: 319 + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646 320 + 321 + 2. If you're getting any other objtool error in a compiled .c file, it 322 + may be because the file uses an asm() statement which has a "call" 323 + instruction. An asm() statement with a call instruction must declare 324 + the use of the stack pointer in its output operand. For example, on 325 + x86_64: 326 + 327 + register void *__sp asm("rsp"); 328 + asm volatile("call func" : "+r" (__sp)); 329 + 330 + Otherwise the stack frame may not get created before the call. 331 + 332 + 3. Another possible cause for errors in C code is if the Makefile removes 333 + -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 314 334 315 335 Also see the above section for .S file errors for more information what 316 336 the individual error messages mean.
+72 -25
tools/objtool/builtin-check.c
··· 54 54 struct symbol *call_dest; 55 55 struct instruction *jump_dest; 56 56 struct list_head alts; 57 + struct symbol *func; 57 58 }; 58 59 59 60 struct alternative { ··· 67 66 struct list_head insn_list; 68 67 DECLARE_HASHTABLE(insn_hash, 16); 69 68 struct section *rodata, *whitelist; 69 + bool ignore_unreachables, c_file; 70 70 }; 71 71 72 72 const char *objname; ··· 230 228 } 231 229 } 232 230 233 - if (insn->type == INSN_JUMP_DYNAMIC) 231 + if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts)) 234 232 /* sibling call */ 235 233 return 0; 236 234 } ··· 250 248 static int decode_instructions(struct objtool_file *file) 251 249 { 252 250 struct section *sec; 251 + struct symbol *func; 253 252 unsigned long offset; 254 253 struct instruction *insn; 255 254 int ret; ··· 283 280 284 281 hash_add(file->insn_hash, &insn->hash, insn->offset); 285 282 list_add_tail(&insn->list, &file->insn_list); 283 + } 284 + 285 + list_for_each_entry(func, &sec->symbol_list, list) { 286 + if (func->type != STT_FUNC) 287 + continue; 288 + 289 + if (!find_insn(file, sec, func->offset)) { 290 + WARN("%s(): can't find starting instruction", 291 + func->name); 292 + return -1; 293 + } 294 + 295 + func_for_each_insn(file, func, insn) 296 + if (!insn->func) 297 + insn->func = func; 286 298 } 287 299 } 288 300 ··· 682 664 text_rela->addend); 683 665 684 666 /* 685 - * TODO: Document where this is needed, or get rid of it. 686 - * 687 667 * rare case: jmpq *[addr](%rip) 668 + * 669 + * This check is for a rare gcc quirk, currently only seen in 670 + * three driver functions in the kernel, only with certain 671 + * obscure non-distro configs. 672 + * 673 + * As part of an optimization, gcc makes a copy of an existing 674 + * switch jump table, modifies it, and then hard-codes the jump 675 + * (albeit with an indirect jump) to use a single entry in the 676 + * table. The rest of the jump table and some of its jump 677 + * targets remain as dead code. 678 + * 679 + * In such a case we can just crudely ignore all unreachable 680 + * instruction warnings for the entire object file. Ideally we 681 + * would just ignore them for the function, but that would 682 + * require redesigning the code quite a bit. And honestly 683 + * that's just not worth doing: unreachable instruction 684 + * warnings are of questionable value anyway, and this is such 685 + * a rare issue. 686 + * 687 + * kbuild reports: 688 + * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com 689 + * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com 690 + * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com 691 + * 692 + * gcc bug: 693 + * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604 688 694 */ 689 - if (!rodata_rela) 695 + if (!rodata_rela) { 690 696 rodata_rela = find_rela_by_dest(file->rodata, 691 697 text_rela->addend + 4); 698 + if (rodata_rela) 699 + file->ignore_unreachables = true; 700 + } 692 701 693 702 if (!rodata_rela) 694 703 continue; ··· 776 731 static int decode_sections(struct objtool_file *file) 777 732 { 778 733 int ret; 779 - 780 - file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard"); 781 - file->rodata = find_section_by_name(file->elf, ".rodata"); 782 734 783 735 ret = decode_instructions(file); 784 736 if (ret) ··· 841 799 struct alternative *alt; 842 800 struct instruction *insn; 843 801 struct section *sec; 802 + struct symbol *func = NULL; 844 803 unsigned char state; 845 804 int ret; 846 805 ··· 856 813 } 857 814 858 815 while (1) { 816 + if (file->c_file && insn->func) { 817 + if (func && func != insn->func) { 818 + WARN("%s() falls through to next function %s()", 819 + func->name, insn->func->name); 820 + return 1; 821 + } 822 + 823 + func = insn->func; 824 + } 825 + 859 826 if (insn->visited) { 860 827 if (frame_state(insn->state) != frame_state(state)) { 861 828 WARN_FUNC("frame pointer state mismatch", ··· 875 822 876 823 return 0; 877 824 } 878 - 879 - /* 880 - * Catch a rare case where a noreturn function falls through to 881 - * the next function. 882 - */ 883 - if (is_fentry_call(insn) && (state & STATE_FENTRY)) 884 - return 0; 885 825 886 826 insn->visited = true; 887 827 insn->state = state; ··· 1081 1035 continue; 1082 1036 1083 1037 insn = find_insn(file, sec, func->offset); 1084 - if (!insn) { 1085 - WARN("%s(): can't find starting instruction", 1086 - func->name); 1087 - warnings++; 1038 + if (!insn) 1088 1039 continue; 1089 - } 1090 1040 1091 1041 ret = validate_branch(file, insn, 0); 1092 1042 warnings += ret; ··· 1098 1056 if (insn->visited) 1099 1057 continue; 1100 1058 1101 - if (!ignore_unreachable_insn(func, insn) && 1102 - !warnings) { 1103 - WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset); 1104 - warnings++; 1105 - } 1106 - 1107 1059 insn->visited = true; 1060 + 1061 + if (file->ignore_unreachables || warnings || 1062 + ignore_unreachable_insn(func, insn)) 1063 + continue; 1064 + 1065 + WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset); 1066 + warnings++; 1108 1067 } 1109 1068 } 1110 1069 } ··· 1176 1133 1177 1134 INIT_LIST_HEAD(&file.insn_list); 1178 1135 hash_init(file.insn_hash); 1136 + file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard"); 1137 + file.rodata = find_section_by_name(file.elf, ".rodata"); 1138 + file.ignore_unreachables = false; 1139 + file.c_file = find_section_by_name(file.elf, ".comment"); 1179 1140 1180 1141 ret = decode_sections(&file); 1181 1142 if (ret < 0)
+1
tools/testing/selftests/net/.gitignore
··· 3 3 psock_tpacket 4 4 reuseport_bpf 5 5 reuseport_bpf_cpu 6 + reuseport_dualstack
+1 -1
tools/testing/selftests/net/Makefile
··· 4 4 5 5 CFLAGS += -I../../../../usr/include/ 6 6 7 - NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu 7 + NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack 8 8 9 9 all: $(NET_PROGS) 10 10 %: %.c
+208
tools/testing/selftests/net/reuseport_dualstack.c
··· 1 + /* 2 + * It is possible to use SO_REUSEPORT to open multiple sockets bound to 3 + * equivalent local addresses using AF_INET and AF_INET6 at the same time. If 4 + * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should 5 + * receive a given incoming packet. However, when it is not set, incoming v4 6 + * packets should prefer the AF_INET socket(s). This behavior was defined with 7 + * the original SO_REUSEPORT implementation, but broke with 8 + * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection") 9 + * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the 10 + * AF_INET preference for v4 packets. 11 + */ 12 + 13 + #define _GNU_SOURCE 14 + 15 + #include <arpa/inet.h> 16 + #include <errno.h> 17 + #include <error.h> 18 + #include <linux/in.h> 19 + #include <linux/unistd.h> 20 + #include <stdio.h> 21 + #include <stdlib.h> 22 + #include <string.h> 23 + #include <sys/epoll.h> 24 + #include <sys/types.h> 25 + #include <sys/socket.h> 26 + #include <unistd.h> 27 + 28 + static const int PORT = 8888; 29 + 30 + static void build_rcv_fd(int family, int proto, int *rcv_fds, int count) 31 + { 32 + struct sockaddr_storage addr; 33 + struct sockaddr_in *addr4; 34 + struct sockaddr_in6 *addr6; 35 + int opt, i; 36 + 37 + switch (family) { 38 + case AF_INET: 39 + addr4 = (struct sockaddr_in *)&addr; 40 + addr4->sin_family = AF_INET; 41 + addr4->sin_addr.s_addr = htonl(INADDR_ANY); 42 + addr4->sin_port = htons(PORT); 43 + break; 44 + case AF_INET6: 45 + addr6 = (struct sockaddr_in6 *)&addr; 46 + addr6->sin6_family = AF_INET6; 47 + addr6->sin6_addr = in6addr_any; 48 + addr6->sin6_port = htons(PORT); 49 + break; 50 + default: 51 + error(1, 0, "Unsupported family %d", family); 52 + } 53 + 54 + for (i = 0; i < count; ++i) { 55 + rcv_fds[i] = socket(family, proto, 0); 56 + if (rcv_fds[i] < 0) 57 + error(1, errno, "failed to create receive socket"); 58 + 59 + opt = 1; 60 + if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt, 61 + sizeof(opt))) 62 + error(1, errno, "failed to set SO_REUSEPORT"); 63 + 64 + if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr))) 65 + error(1, errno, "failed to bind receive socket"); 66 + 67 + if (proto == SOCK_STREAM && listen(rcv_fds[i], 10)) 68 + error(1, errno, "failed to listen on receive port"); 69 + } 70 + } 71 + 72 + static void send_from_v4(int proto) 73 + { 74 + struct sockaddr_in saddr, daddr; 75 + int fd; 76 + 77 + saddr.sin_family = AF_INET; 78 + saddr.sin_addr.s_addr = htonl(INADDR_ANY); 79 + saddr.sin_port = 0; 80 + 81 + daddr.sin_family = AF_INET; 82 + daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 83 + daddr.sin_port = htons(PORT); 84 + 85 + fd = socket(AF_INET, proto, 0); 86 + if (fd < 0) 87 + error(1, errno, "failed to create send socket"); 88 + 89 + if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr))) 90 + error(1, errno, "failed to bind send socket"); 91 + 92 + if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr))) 93 + error(1, errno, "failed to connect send socket"); 94 + 95 + if (send(fd, "a", 1, 0) < 0) 96 + error(1, errno, "failed to send message"); 97 + 98 + close(fd); 99 + } 100 + 101 + static int receive_once(int epfd, int proto) 102 + { 103 + struct epoll_event ev; 104 + int i, fd; 105 + char buf[8]; 106 + 107 + i = epoll_wait(epfd, &ev, 1, -1); 108 + if (i < 0) 109 + error(1, errno, "epoll_wait failed"); 110 + 111 + if (proto == SOCK_STREAM) { 112 + fd = accept(ev.data.fd, NULL, NULL); 113 + if (fd < 0) 114 + error(1, errno, "failed to accept"); 115 + i = recv(fd, buf, sizeof(buf), 0); 116 + close(fd); 117 + } else { 118 + i = recv(ev.data.fd, buf, sizeof(buf), 0); 119 + } 120 + 121 + if (i < 0) 122 + error(1, errno, "failed to recv"); 123 + 124 + return ev.data.fd; 125 + } 126 + 127 + static void test(int *rcv_fds, int count, int proto) 128 + { 129 + struct epoll_event ev; 130 + int epfd, i, test_fd; 131 + uint16_t test_family; 132 + socklen_t len; 133 + 134 + epfd = epoll_create(1); 135 + if (epfd < 0) 136 + error(1, errno, "failed to create epoll"); 137 + 138 + ev.events = EPOLLIN; 139 + for (i = 0; i < count; ++i) { 140 + ev.data.fd = rcv_fds[i]; 141 + if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev)) 142 + error(1, errno, "failed to register sock epoll"); 143 + } 144 + 145 + send_from_v4(proto); 146 + 147 + test_fd = receive_once(epfd, proto); 148 + if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len)) 149 + error(1, errno, "failed to read socket domain"); 150 + if (test_family != AF_INET) 151 + error(1, 0, "expected to receive on v4 socket but got v6 (%d)", 152 + test_family); 153 + 154 + close(epfd); 155 + } 156 + 157 + int main(void) 158 + { 159 + int rcv_fds[32], i; 160 + 161 + fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n"); 162 + build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5); 163 + build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5); 164 + test(rcv_fds, 10, SOCK_DGRAM); 165 + for (i = 0; i < 10; ++i) 166 + close(rcv_fds[i]); 167 + 168 + fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n"); 169 + build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5); 170 + build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5); 171 + test(rcv_fds, 10, SOCK_DGRAM); 172 + for (i = 0; i < 10; ++i) 173 + close(rcv_fds[i]); 174 + 175 + /* NOTE: UDP socket lookups traverse a different code path when there 176 + * are > 10 sockets in a group. 177 + */ 178 + fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n"); 179 + build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16); 180 + build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16); 181 + test(rcv_fds, 32, SOCK_DGRAM); 182 + for (i = 0; i < 32; ++i) 183 + close(rcv_fds[i]); 184 + 185 + fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n"); 186 + build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16); 187 + build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16); 188 + test(rcv_fds, 32, SOCK_DGRAM); 189 + for (i = 0; i < 32; ++i) 190 + close(rcv_fds[i]); 191 + 192 + fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n"); 193 + build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5); 194 + build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5); 195 + test(rcv_fds, 10, SOCK_STREAM); 196 + for (i = 0; i < 10; ++i) 197 + close(rcv_fds[i]); 198 + 199 + fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n"); 200 + build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5); 201 + build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5); 202 + test(rcv_fds, 10, SOCK_STREAM); 203 + for (i = 0; i < 10; ++i) 204 + close(rcv_fds[i]); 205 + 206 + fprintf(stderr, "SUCCESS\n"); 207 + return 0; 208 + }