Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+6536 -3296
+2 -2
Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
··· 1 - What: state 1 + What: /sys/devices/system/ibm_rtl/state 2 2 Date: Sep 2010 3 3 KernelVersion: 2.6.37 4 4 Contact: Vernon Mauery <vernux@us.ibm.com> ··· 10 10 Users: The ibm-prtm userspace daemon uses this interface. 11 11 12 12 13 - What: version 13 + What: /sys/devices/system/ibm_rtl/version 14 14 Date: Sep 2010 15 15 KernelVersion: 2.6.37 16 16 Contact: Vernon Mauery <vernux@us.ibm.com>
+2 -2
Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
··· 6 6 7 7 Required properties: 8 8 9 - - compatible : should be "aspeed,ast2400-bt-bmc" 9 + - compatible : should be "aspeed,ast2400-ibt-bmc" 10 10 - reg: physical address and size of the registers 11 11 12 12 Optional properties: ··· 17 17 Example: 18 18 19 19 ibt@1e789140 { 20 - compatible = "aspeed,ast2400-bt-bmc"; 20 + compatible = "aspeed,ast2400-ibt-bmc"; 21 21 reg = <0x1e789140 0x18>; 22 22 interrupts = <8>; 23 23 };
+5
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
··· 43 43 reset signal present internally in some host controller IC designs. 44 44 See Documentation/devicetree/bindings/reset/reset.txt for details. 45 45 46 + * reset-names: request name for using "resets" property. Must be "reset". 47 + (It will be used together with "resets" property.) 48 + 46 49 * clocks: from common clock binding: handle to biu and ciu clocks for the 47 50 bus interface unit clock and the card interface unit clock. 48 51 ··· 106 103 interrupts = <0 75 0>; 107 104 #address-cells = <1>; 108 105 #size-cells = <0>; 106 + resets = <&rst 20>; 107 + reset-names = "reset"; 109 108 }; 110 109 111 110 [board specific internal DMA resources]
+8 -3
Documentation/devicetree/bindings/pci/rockchip-pcie.txt
··· 26 26 - "sys" 27 27 - "legacy" 28 28 - "client" 29 - - resets: Must contain five entries for each entry in reset-names. 29 + - resets: Must contain seven entries for each entry in reset-names. 30 30 See ../reset/reset.txt for details. 31 31 - reset-names: Must include the following names 32 32 - "core" 33 33 - "mgmt" 34 34 - "mgmt-sticky" 35 35 - "pipe" 36 + - "pm" 37 + - "aclk" 38 + - "pclk" 36 39 - pinctrl-names : The pin control state names 37 40 - pinctrl-0: The "default" pinctrl state 38 41 - #interrupt-cells: specifies the number of cells needed to encode an ··· 89 86 reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>; 90 87 reg-names = "axi-base", "apb-base"; 91 88 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, 92 - <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; 93 - reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; 89 + <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> , 90 + <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>; 91 + reset-names = "core", "mgmt", "mgmt-sticky", "pipe", 92 + "pm", "pclk", "aclk"; 94 93 phys = <&pcie_phy>; 95 94 phy-names = "pcie-phy"; 96 95 pinctrl-names = "default";
+5 -5
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
··· 14 14 - #size-cells : The value of this property must be 1 15 15 - ranges : defines mapping between pin controller node (parent) to 16 16 gpio-bank node (children). 17 - - interrupt-parent: phandle of the interrupt parent to which the external 18 - GPIO interrupts are forwarded to. 19 - - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node 20 - which includes IRQ mux selection register, and the offset of the IRQ mux 21 - selection register. 22 17 - pins-are-numbered: Specify the subnodes are using numbered pinmux to 23 18 specify pins. 24 19 ··· 32 37 33 38 Optional properties: 34 39 - reset: : Reference to the reset controller 40 + - interrupt-parent: phandle of the interrupt parent to which the external 41 + GPIO interrupts are forwarded to. 42 + - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node 43 + which includes IRQ mux selection register, and the offset of the IRQ mux 44 + selection register. 35 45 36 46 Example: 37 47 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
+1 -1
Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
··· 12 12 13 13 Optional properties: 14 14 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected 15 - - ti,jack_detection: Need to be present if the board capable to detect jack 15 + - ti,jack-detection: Need to be present if the board capable to detect jack 16 16 insertion, removal. 17 17 18 18 Available audio endpoints for the audio-routing table:
-1
Documentation/filesystems/Locking
··· 447 447 int (*flush) (struct file *); 448 448 int (*release) (struct inode *, struct file *); 449 449 int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); 450 - int (*aio_fsync) (struct kiocb *, int datasync); 451 450 int (*fasync) (int, struct file *, int); 452 451 int (*lock) (struct file *, int, struct file_lock *); 453 452 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
-1
Documentation/filesystems/vfs.txt
··· 828 828 int (*flush) (struct file *, fl_owner_t id); 829 829 int (*release) (struct inode *, struct file *); 830 830 int (*fsync) (struct file *, loff_t, loff_t, int datasync); 831 - int (*aio_fsync) (struct kiocb *, int datasync); 832 831 int (*fasync) (int, struct file *, int); 833 832 int (*lock) (struct file *, int, struct file_lock *); 834 833 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+2 -2
Documentation/i2c/i2c-topology
··· 326 326 327 327 This is a good topology. 328 328 329 - .--------. 329 + .--------. 330 330 .----------. .--| dev D1 | 331 331 | parent- |--' '--------' 332 332 .--| locked | .--------. ··· 350 350 351 351 This is a good topology. 352 352 353 - .--------. 353 + .--------. 354 354 .----------. .--| dev D1 | 355 355 | mux- |--' '--------' 356 356 .--| locked | .--------.
+2 -1
Documentation/networking/dsa/dsa.txt
··· 67 67 Switch tagging protocols 68 68 ------------------------ 69 69 70 - DSA currently supports 4 different tagging protocols, and a tag-less mode as 70 + DSA currently supports 5 different tagging protocols, and a tag-less mode as 71 71 well. The different protocols are implemented in: 72 72 73 73 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) 74 74 net/dsa/tag_dsa.c: Marvell's original DSA tag 75 75 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag 76 76 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag 77 + net/dsa/tag_qca.c: Qualcomm's 2 bytes tag 77 78 78 79 The exact format of the tag protocol is vendor specific, but in general, they 79 80 all contain something which:
+11
Documentation/virtual/kvm/api.txt
··· 777 777 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios 778 778 such as migration. 779 779 780 + When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the 781 + set of bits that KVM can return in struct kvm_clock_data's flag member. 782 + 783 + The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned 784 + value is the exact kvmclock value seen by all VCPUs at the instant 785 + when KVM_GET_CLOCK was called. If clear, the returned value is simply 786 + CLOCK_MONOTONIC plus a constant offset; the offset can be modified 787 + with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, 788 + but the exact value read by each VCPU could differ, because the host 789 + TSC is not stable. 790 + 780 791 struct kvm_clock_data { 781 792 __u64 clock; /* kvmclock current value */ 782 793 __u32 flags;
+3 -1
MAINTAINERS
··· 7084 7084 LED SUBSYSTEM 7085 7085 M: Richard Purdie <rpurdie@rpsys.net> 7086 7086 M: Jacek Anaszewski <j.anaszewski@samsung.com> 7087 + M: Pavel Machek <pavel@ucw.cz> 7087 7088 L: linux-leds@vger.kernel.org 7088 7089 T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git 7089 7090 S: Maintained ··· 8058 8057 F: include/linux/mlx4/ 8059 8058 8060 8059 MELLANOX MLX5 core VPI driver 8060 + M: Saeed Mahameed <saeedm@mellanox.com> 8061 8061 M: Matan Barak <matanb@mellanox.com> 8062 8062 M: Leon Romanovsky <leonro@mellanox.com> 8063 8063 L: netdev@vger.kernel.org ··· 9337 9335 M: Keith Busch <keith.busch@intel.com> 9338 9336 L: linux-pci@vger.kernel.org 9339 9337 S: Supported 9340 - F: arch/x86/pci/vmd.c 9338 + F: drivers/pci/host/vmd.c 9341 9339 9342 9340 PCIE DRIVER FOR ST SPEAR13XX 9343 9341 M: Pratyush Anand <pratyush.anand@gmail.com>
+10 -7
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc6 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION* ··· 370 370 CFLAGS_KERNEL = 371 371 AFLAGS_KERNEL = 372 372 LDFLAGS_vmlinux = 373 - CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im 373 + CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 374 374 CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 375 375 376 376 ··· 399 399 -fno-strict-aliasing -fno-common \ 400 400 -Werror-implicit-function-declaration \ 401 401 -Wno-format-security \ 402 - -std=gnu89 402 + -std=gnu89 $(call cc-option,-fno-PIE) 403 + 403 404 404 405 KBUILD_AFLAGS_KERNEL := 405 406 KBUILD_CFLAGS_KERNEL := 406 - KBUILD_AFLAGS := -D__ASSEMBLY__ 407 + KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) 407 408 KBUILD_AFLAGS_MODULE := -DMODULE 408 409 KBUILD_CFLAGS_MODULE := -DMODULE 409 410 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds ··· 621 620 include arch/$(SRCARCH)/Makefile 622 621 623 622 KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 624 - KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) 625 623 KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) 626 624 627 625 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION ··· 629 629 endif 630 630 631 631 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 632 - KBUILD_CFLAGS += -Os 632 + KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) 633 633 else 634 634 ifdef CONFIG_PROFILE_ALL_BRANCHES 635 - KBUILD_CFLAGS += -O2 635 + KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) 636 636 else 637 637 KBUILD_CFLAGS += -O2 638 638 endif 639 639 endif 640 + 641 + KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ 642 + $(call cc-disable-warning,maybe-uninitialized,)) 640 643 641 644 # Tell gcc to never replace conditional load with a non-conditional one 642 645 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+6 -1
arch/arc/Makefile
··· 50 50 51 51 cflags-$(atleast_gcc44) += -fsection-anchors 52 52 53 + cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 54 + cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 55 + 53 56 ifdef CONFIG_ISA_ARCV2 54 57 55 58 ifndef CONFIG_ARC_HAS_LL64 ··· 71 68 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 72 69 # Generic build system uses -O2, we want -O3 73 70 # Note: No need to add to cflags-y as that happens anyways 74 - ARCH_CFLAGS += -O3 71 + # 72 + # Disable the false maybe-uninitialized warings gcc spits out at -O3 73 + ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) 75 74 endif 76 75 77 76 # small data is default for elf32 tool-chain. If not usable, disable it
+1 -1
arch/arc/boot/dts/axc001.dtsi
··· 71 71 reg-io-width = <4>; 72 72 }; 73 73 74 - arcpmu0: pmu { 74 + arcpct0: pct { 75 75 compatible = "snps,arc700-pct"; 76 76 }; 77 77 };
+1 -1
arch/arc/boot/dts/nsim_700.dts
··· 69 69 }; 70 70 }; 71 71 72 - arcpmu0: pmu { 72 + arcpct0: pct { 73 73 compatible = "snps,arc700-pct"; 74 74 }; 75 75 };
+4
arch/arc/boot/dts/nsimosci.dts
··· 83 83 reg = <0xf0003000 0x44>; 84 84 interrupts = <7>; 85 85 }; 86 + 87 + arcpct0: pct { 88 + compatible = "snps,arc700-pct"; 89 + }; 86 90 }; 87 91 };
+1
arch/arc/configs/nsim_700_defconfig
··· 14 14 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" 15 15 CONFIG_KALLSYMS_ALL=y 16 16 CONFIG_EMBEDDED=y 17 + CONFIG_PERF_EVENTS=y 17 18 # CONFIG_SLUB_DEBUG is not set 18 19 # CONFIG_COMPAT_BRK is not set 19 20 CONFIG_KPROBES=y
+1
arch/arc/configs/nsim_hs_defconfig
··· 14 14 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" 15 15 CONFIG_KALLSYMS_ALL=y 16 16 CONFIG_EMBEDDED=y 17 + CONFIG_PERF_EVENTS=y 17 18 # CONFIG_SLUB_DEBUG is not set 18 19 # CONFIG_COMPAT_BRK is not set 19 20 CONFIG_KPROBES=y
+1
arch/arc/configs/nsim_hs_smp_defconfig
··· 12 12 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 13 13 CONFIG_KALLSYMS_ALL=y 14 14 CONFIG_EMBEDDED=y 15 + CONFIG_PERF_EVENTS=y 15 16 # CONFIG_SLUB_DEBUG is not set 16 17 # CONFIG_COMPAT_BRK is not set 17 18 CONFIG_KPROBES=y
+1
arch/arc/configs/nsimosci_defconfig
··· 14 14 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" 15 15 CONFIG_KALLSYMS_ALL=y 16 16 CONFIG_EMBEDDED=y 17 + CONFIG_PERF_EVENTS=y 17 18 # CONFIG_SLUB_DEBUG is not set 18 19 # CONFIG_COMPAT_BRK is not set 19 20 CONFIG_KPROBES=y
+1
arch/arc/configs/nsimosci_hs_defconfig
··· 14 14 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 15 15 CONFIG_KALLSYMS_ALL=y 16 16 CONFIG_EMBEDDED=y 17 + CONFIG_PERF_EVENTS=y 17 18 # CONFIG_SLUB_DEBUG is not set 18 19 # CONFIG_COMPAT_BRK is not set 19 20 CONFIG_KPROBES=y
+1 -2
arch/arc/configs/nsimosci_hs_smp_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 CONFIG_BLK_DEV_INITRD=y 12 12 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 13 + CONFIG_PERF_EVENTS=y 13 14 # CONFIG_COMPAT_BRK is not set 14 15 CONFIG_KPROBES=y 15 16 CONFIG_MODULES=y ··· 35 34 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set 36 35 # CONFIG_INET_XFRM_MODE_TUNNEL is not set 37 36 # CONFIG_INET_XFRM_MODE_BEET is not set 38 - # CONFIG_INET_LRO is not set 39 37 # CONFIG_IPV6 is not set 40 38 # CONFIG_WIRELESS is not set 41 39 CONFIG_DEVTMPFS=y ··· 72 72 # CONFIG_HWMON is not set 73 73 CONFIG_DRM=y 74 74 CONFIG_DRM_ARCPGU=y 75 - CONFIG_FRAMEBUFFER_CONSOLE=y 76 75 CONFIG_LOGO=y 77 76 # CONFIG_HID is not set 78 77 # CONFIG_USB_SUPPORT is not set
+2
arch/arc/include/asm/arcregs.h
··· 43 43 #define STATUS_AE_BIT 5 /* Exception active */ 44 44 #define STATUS_DE_BIT 6 /* PC is in delay slot */ 45 45 #define STATUS_U_BIT 7 /* User/Kernel mode */ 46 + #define STATUS_Z_BIT 11 46 47 #define STATUS_L_BIT 12 /* Loop inhibit */ 47 48 48 49 /* These masks correspond to the status word(STATUS_32) bits */ 49 50 #define STATUS_AE_MASK (1<<STATUS_AE_BIT) 50 51 #define STATUS_DE_MASK (1<<STATUS_DE_BIT) 51 52 #define STATUS_U_MASK (1<<STATUS_U_BIT) 53 + #define STATUS_Z_MASK (1<<STATUS_Z_BIT) 52 54 #define STATUS_L_MASK (1<<STATUS_L_BIT) 53 55 54 56 /*
+2 -2
arch/arc/include/asm/smp.h
··· 37 37 * API expected BY platform smp code (FROM arch smp code) 38 38 * 39 39 * smp_ipi_irq_setup: 40 - * Takes @cpu and @irq to which the arch-common ISR is hooked up 40 + * Takes @cpu and @hwirq to which the arch-common ISR is hooked up 41 41 */ 42 - extern int smp_ipi_irq_setup(int cpu, int irq); 42 + extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq); 43 43 44 44 /* 45 45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+2
arch/arc/kernel/devtree.c
··· 31 31 arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ 32 32 else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) 33 33 arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ 34 + else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps")) 35 + arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */ 34 36 else 35 37 arc_base_baud = 50000000; /* Fixed default 50MHz */ 36 38 }
+20 -12
arch/arc/kernel/mcip.c
··· 181 181 { 182 182 unsigned long flags; 183 183 cpumask_t online; 184 + unsigned int destination_bits; 185 + unsigned int distribution_mode; 184 186 185 187 /* errout if no online cpu per @cpumask */ 186 188 if (!cpumask_and(&online, cpumask, cpu_online_mask)) ··· 190 188 191 189 raw_spin_lock_irqsave(&mcip_lock, flags); 192 190 193 - idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); 194 - idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); 191 + destination_bits = cpumask_bits(&online)[0]; 192 + idu_set_dest(data->hwirq, destination_bits); 193 + 194 + if (ffs(destination_bits) == fls(destination_bits)) 195 + distribution_mode = IDU_M_DISTRI_DEST; 196 + else 197 + distribution_mode = IDU_M_DISTRI_RR; 198 + 199 + idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 195 200 196 201 raw_spin_unlock_irqrestore(&mcip_lock, flags); 197 202 ··· 216 207 217 208 }; 218 209 219 - static int idu_first_irq; 210 + static irq_hw_number_t idu_first_hwirq; 220 211 221 212 static void idu_cascade_isr(struct irq_desc *desc) 222 213 { 223 - struct irq_domain *domain = irq_desc_get_handler_data(desc); 224 - unsigned int core_irq = irq_desc_get_irq(desc); 225 - unsigned int idu_irq; 214 + struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 215 + irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 216 + irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; 226 217 227 - idu_irq = core_irq - idu_first_irq; 228 - generic_handle_irq(irq_find_mapping(domain, idu_irq)); 218 + generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 229 219 } 230 220 231 221 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) ··· 290 282 struct irq_domain *domain; 291 283 /* Read IDU BCR to confirm nr_irqs */ 292 284 int nr_irqs = of_irq_count(intc); 293 - int i, irq; 285 + int i, virq; 294 286 struct mcip_bcr mp; 295 287 296 288 READ_BCR(ARC_REG_MCIP_BCR, mp); ··· 311 303 * however we need it to get the parent virq and set IDU handler 312 304 * as first level isr 313 305 */ 314 - irq = irq_of_parse_and_map(intc, i); 306 + virq = irq_of_parse_and_map(intc, i); 315 307 if (!i) 316 - idu_first_irq = irq; 308 + idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); 317 309 318 - irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); 310 + irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); 319 311 } 320 312 321 313 __mcip_cmd(CMD_IDU_ENABLE, 0);
+11 -9
arch/arc/kernel/process.c
··· 43 43 44 44 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) 45 45 { 46 - int uval; 47 - int ret; 46 + struct pt_regs *regs = current_pt_regs(); 47 + int uval = -EFAULT; 48 48 49 49 /* 50 50 * This is only for old cores lacking LLOCK/SCOND, which by defintion ··· 54 54 */ 55 55 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); 56 56 57 + /* Z indicates to userspace if operation succeded */ 58 + regs->status32 &= ~STATUS_Z_MASK; 59 + 57 60 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 58 61 return -EFAULT; 59 62 60 63 preempt_disable(); 61 64 62 - ret = __get_user(uval, uaddr); 63 - if (ret) 65 + if (__get_user(uval, uaddr)) 64 66 goto done; 65 67 66 - if (uval != expected) 67 - ret = -EAGAIN; 68 - else 69 - ret = __put_user(new, uaddr); 68 + if (uval == expected) { 69 + if (!__put_user(new, uaddr)) 70 + regs->status32 |= STATUS_Z_MASK; 71 + } 70 72 71 73 done: 72 74 preempt_enable(); 73 75 74 - return ret; 76 + return uval; 75 77 } 76 78 77 79 void arch_cpu_idle(void)
+15 -8
arch/arc/kernel/smp.c
··· 22 22 #include <linux/atomic.h> 23 23 #include <linux/cpumask.h> 24 24 #include <linux/reboot.h> 25 + #include <linux/irqdomain.h> 25 26 #include <asm/processor.h> 26 27 #include <asm/setup.h> 27 28 #include <asm/mach_desc.h> ··· 68 67 int i; 69 68 70 69 /* 71 - * Initialise the present map, which describes the set of CPUs 72 - * actually populated at the present time. 70 + * if platform didn't set the present map already, do it now 71 + * boot cpu is set to present already by init/main.c 73 72 */ 74 - for (i = 0; i < max_cpus; i++) 75 - set_cpu_present(i, true); 73 + if (num_present_cpus() <= 1) { 74 + for (i = 0; i < max_cpus; i++) 75 + set_cpu_present(i, true); 76 + } 76 77 } 77 78 78 79 void __init smp_cpus_done(unsigned int max_cpus) ··· 354 351 */ 355 352 static DEFINE_PER_CPU(int, ipi_dev); 356 353 357 - int smp_ipi_irq_setup(int cpu, int irq) 354 + int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq) 358 355 { 359 356 int *dev = per_cpu_ptr(&ipi_dev, cpu); 357 + unsigned int virq = irq_find_mapping(NULL, hwirq); 358 + 359 + if (!virq) 360 + panic("Cannot find virq for root domain and hwirq=%lu", hwirq); 360 361 361 362 /* Boot cpu calls request, all call enable */ 362 363 if (!cpu) { 363 364 int rc; 364 365 365 - rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev); 366 + rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev); 366 367 if (rc) 367 - panic("Percpu IRQ request failed for %d\n", irq); 368 + panic("Percpu IRQ request failed for %u\n", virq); 368 369 } 369 370 370 - enable_percpu_irq(irq, 0); 371 + enable_percpu_irq(virq, 0); 371 372 372 373 return 0; 373 374 }
+11 -8
arch/arc/kernel/time.c
··· 152 152 cycle_t full; 153 153 } stamp; 154 154 155 - 156 - __asm__ __volatile( 157 - "1: \n" 158 - " lr %0, [AUX_RTC_LOW] \n" 159 - " lr %1, [AUX_RTC_HIGH] \n" 160 - " lr %2, [AUX_RTC_CTRL] \n" 161 - " bbit0.nt %2, 31, 1b \n" 162 - : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); 155 + /* 156 + * hardware has an internal state machine which tracks readout of 157 + * low/high and updates the CTRL.status if 158 + * - interrupt/exception taken between the two reads 159 + * - high increments after low has been read 160 + */ 161 + do { 162 + stamp.low = read_aux_reg(AUX_RTC_LOW); 163 + stamp.high = read_aux_reg(AUX_RTC_HIGH); 164 + status = read_aux_reg(AUX_RTC_CTRL); 165 + } while (!(status & _BITUL(31))); 163 166 164 167 return stamp.full; 165 168 }
+26
arch/arc/mm/dma.c
··· 105 105 __free_pages(page, get_order(size)); 106 106 } 107 107 108 + static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, 109 + void *cpu_addr, dma_addr_t dma_addr, size_t size, 110 + unsigned long attrs) 111 + { 112 + unsigned long user_count = vma_pages(vma); 113 + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 114 + unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); 115 + unsigned long off = vma->vm_pgoff; 116 + int ret = -ENXIO; 117 + 118 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 119 + 120 + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 121 + return ret; 122 + 123 + if (off < count && user_count <= (count - off)) { 124 + ret = remap_pfn_range(vma, vma->vm_start, 125 + pfn + off, 126 + user_count << PAGE_SHIFT, 127 + vma->vm_page_prot); 128 + } 129 + 130 + return ret; 131 + } 132 + 108 133 /* 109 134 * streaming DMA Mapping API... 110 135 * CPU accesses page via normal paddr, thus needs to explicitly made ··· 218 193 struct dma_map_ops arc_dma_ops = { 219 194 .alloc = arc_dma_alloc, 220 195 .free = arc_dma_free, 196 + .mmap = arc_dma_mmap, 221 197 .map_page = arc_dma_map_page, 222 198 .map_sg = arc_dma_map_sg, 223 199 .sync_single_for_device = arc_dma_sync_single_for_device,
-6
arch/arc/plat-eznps/smp.c
··· 140 140 mtm_enable_core(cpu); 141 141 } 142 142 143 - static void eznps_ipi_clear(int irq) 144 - { 145 - write_aux_reg(CTOP_AUX_IACK, 1 << irq); 146 - } 147 - 148 143 struct plat_smp_ops plat_smp_ops = { 149 144 .info = smp_cpuinfo_buf, 150 145 .init_early_smp = eznps_init_cpumasks, 151 146 .cpu_kick = eznps_smp_wakeup_cpu, 152 147 .ipi_send = eznps_ipi_send, 153 148 .init_per_cpu = eznps_init_per_cpu, 154 - .ipi_clear = eznps_ipi_clear, 155 149 };
+7 -7
arch/arm/boot/dts/imx53-qsb.dts
··· 64 64 }; 65 65 66 66 ldo3_reg: ldo3 { 67 - regulator-min-microvolt = <600000>; 68 - regulator-max-microvolt = <1800000>; 67 + regulator-min-microvolt = <1725000>; 68 + regulator-max-microvolt = <3300000>; 69 69 regulator-always-on; 70 70 }; 71 71 ··· 76 76 }; 77 77 78 78 ldo5_reg: ldo5 { 79 - regulator-min-microvolt = <1725000>; 80 - regulator-max-microvolt = <3300000>; 79 + regulator-min-microvolt = <1200000>; 80 + regulator-max-microvolt = <3600000>; 81 81 regulator-always-on; 82 82 }; 83 83 ··· 100 100 }; 101 101 102 102 ldo9_reg: ldo9 { 103 - regulator-min-microvolt = <1200000>; 103 + regulator-min-microvolt = <1250000>; 104 104 regulator-max-microvolt = <3600000>; 105 105 regulator-always-on; 106 106 }; 107 107 108 108 ldo10_reg: ldo10 { 109 - regulator-min-microvolt = <1250000>; 110 - regulator-max-microvolt = <3650000>; 109 + regulator-min-microvolt = <1200000>; 110 + regulator-max-microvolt = <3600000>; 111 111 regulator-always-on; 112 112 }; 113 113 };
+5
arch/arm/boot/dts/logicpd-som-lv.dtsi
··· 13 13 }; 14 14 }; 15 15 16 + memory@80000000 { 17 + device_type = "memory"; 18 + reg = <0x80000000 0>; 19 + }; 20 + 16 21 wl12xx_vmmc: wl12xx_vmmc { 17 22 compatible = "regulator-fixed"; 18 23 regulator-name = "vwl1271";
+2 -2
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
··· 13 13 }; 14 14 }; 15 15 16 - memory@0 { 16 + memory@80000000 { 17 17 device_type = "memory"; 18 - reg = <0 0>; 18 + reg = <0x80000000 0>; 19 19 }; 20 20 21 21 leds {
+4 -3
arch/arm/boot/dts/omap5-board-common.dtsi
··· 124 124 compatible = "ti,abe-twl6040"; 125 125 ti,model = "omap5-uevm"; 126 126 127 + ti,jack-detection; 127 128 ti,mclk-freq = <19200000>; 128 129 129 130 ti,mcpdm = <&mcpdm>; ··· 416 415 ti,backup-battery-charge-high-current; 417 416 }; 418 417 419 - gpadc { 418 + gpadc: gpadc { 420 419 compatible = "ti,palmas-gpadc"; 421 420 interrupts = <18 0 422 421 16 0 ··· 476 475 smps6_reg: smps6 { 477 476 /* VDD_DDR3 - over VDD_SMPS6 */ 478 477 regulator-name = "smps6"; 479 - regulator-min-microvolt = <1200000>; 480 - regulator-max-microvolt = <1200000>; 478 + regulator-min-microvolt = <1350000>; 479 + regulator-max-microvolt = <1350000>; 481 480 regulator-always-on; 482 481 regulator-boot-on; 483 482 };
+1 -1
arch/arm/boot/dts/stih410-b2260.dts
··· 74 74 /* Low speed expansion connector */ 75 75 spi0: spi@9844000 { 76 76 label = "LS-SPI0"; 77 - cs-gpio = <&pio30 3 0>; 77 + cs-gpios = <&pio30 3 0>; 78 78 status = "okay"; 79 79 }; 80 80
+4
arch/arm/boot/dts/sun8i-a23-a33.dtsi
··· 282 282 uart1_pins_a: uart1@0 { 283 283 allwinner,pins = "PG6", "PG7"; 284 284 allwinner,function = "uart1"; 285 + allwinner,drive = <SUN4I_PINCTRL_10_MA>; 286 + allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; 285 287 }; 286 288 287 289 uart1_pins_cts_rts_a: uart1-cts-rts@0 { 288 290 allwinner,pins = "PG8", "PG9"; 289 291 allwinner,function = "uart1"; 292 + allwinner,drive = <SUN4I_PINCTRL_10_MA>; 293 + allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; 290 294 }; 291 295 292 296 mmc0_pins_a: mmc0@0 {
+1
arch/arm/include/asm/kvm_asm.h
··· 66 66 extern void __kvm_flush_vm_context(void); 67 67 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 68 68 extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 69 + extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); 69 70 70 71 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 71 72
+3
arch/arm/include/asm/kvm_host.h
··· 57 57 /* VTTBR value associated with below pgd and vmid */ 58 58 u64 vttbr; 59 59 60 + /* The last vcpu id that ran on each physical CPU */ 61 + int __percpu *last_vcpu_ran; 62 + 60 63 /* Timer */ 61 64 struct arch_timer_kvm timer; 62 65
+1
arch/arm/include/asm/kvm_hyp.h
··· 71 71 #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) 72 72 #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) 73 73 #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) 74 + #define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) 74 75 #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) 75 76 #define PRRR __ACCESS_CP15(c10, 0, c2, 0) 76 77 #define NMRR __ACCESS_CP15(c10, 0, c2, 1)
+20
arch/arm/kernel/traps.c
··· 74 74 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 75 75 } 76 76 77 + void dump_backtrace_stm(u32 *stack, u32 instruction) 78 + { 79 + char str[80], *p; 80 + unsigned int x; 81 + int reg; 82 + 83 + for (reg = 10, x = 0, p = str; reg >= 0; reg--) { 84 + if (instruction & BIT(reg)) { 85 + p += sprintf(p, " r%d:%08x", reg, *stack--); 86 + if (++x == 6) { 87 + x = 0; 88 + p = str; 89 + printk("%s\n", str); 90 + } 91 + } 92 + } 93 + if (p != str) 94 + printk("%s\n", str); 95 + } 96 + 77 97 #ifndef CONFIG_ARM_UNWIND 78 98 /* 79 99 * Stack pointers should always be within the kernels view of
+5
arch/arm/kernel/vmlinux-xip.lds.S
··· 3 3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 4 4 */ 5 5 6 + /* No __ro_after_init data in the .rodata section - which will always be ro */ 7 + #define RO_AFTER_INIT_DATA 8 + 6 9 #include <asm-generic/vmlinux.lds.h> 7 10 #include <asm/cache.h> 8 11 #include <asm/thread_info.h> ··· 225 222 ARM_EXIT_KEEP(EXIT_DATA) 226 223 . = ALIGN(PAGE_SIZE); 227 224 __init_end = .; 225 + 226 + *(.data..ro_after_init) 228 227 229 228 NOSAVE_DATA 230 229 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+26 -1
arch/arm/kvm/arm.c
··· 114 114 */ 115 115 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 116 116 { 117 - int ret = 0; 117 + int ret, cpu; 118 118 119 119 if (type) 120 120 return -EINVAL; 121 + 122 + kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); 123 + if (!kvm->arch.last_vcpu_ran) 124 + return -ENOMEM; 125 + 126 + for_each_possible_cpu(cpu) 127 + *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; 121 128 122 129 ret = kvm_alloc_stage2_pgd(kvm); 123 130 if (ret) ··· 148 141 out_free_stage2_pgd: 149 142 kvm_free_stage2_pgd(kvm); 150 143 out_fail_alloc: 144 + free_percpu(kvm->arch.last_vcpu_ran); 145 + kvm->arch.last_vcpu_ran = NULL; 151 146 return ret; 152 147 } 153 148 ··· 176 167 void kvm_arch_destroy_vm(struct kvm *kvm) 177 168 { 178 169 int i; 170 + 171 + free_percpu(kvm->arch.last_vcpu_ran); 172 + kvm->arch.last_vcpu_ran = NULL; 179 173 180 174 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 181 175 if (kvm->vcpus[i]) { ··· 324 312 325 313 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 326 314 { 315 + int *last_ran; 316 + 317 + last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); 318 + 319 + /* 320 + * We might get preempted before the vCPU actually runs, but 321 + * over-invalidation doesn't affect correctness. 322 + */ 323 + if (*last_ran != vcpu->vcpu_id) { 324 + kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); 325 + *last_ran = vcpu->vcpu_id; 326 + } 327 + 327 328 vcpu->cpu = cpu; 328 329 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); 329 330
+15
arch/arm/kvm/hyp/tlb.c
··· 55 55 __kvm_tlb_flush_vmid(kvm); 56 56 } 57 57 58 + void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) 59 + { 60 + struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); 61 + 62 + /* Switch to requested VMID */ 63 + write_sysreg(kvm->arch.vttbr, VTTBR); 64 + isb(); 65 + 66 + write_sysreg(0, TLBIALL); 67 + dsb(nsh); 68 + isb(); 69 + 70 + write_sysreg(0, VTTBR); 71 + } 72 + 58 73 void __hyp_text __kvm_flush_vm_context(void) 59 74 { 60 75 write_sysreg(0, TLBIALLNSNHIS);
+3 -34
arch/arm/lib/backtrace.S
··· 10 10 * 27/03/03 Ian Molton Clean up CONFIG_CPU 11 11 * 12 12 */ 13 + #include <linux/kern_levels.h> 13 14 #include <linux/linkage.h> 14 15 #include <asm/assembler.h> 15 16 .text ··· 84 83 teq r3, r1, lsr #11 85 84 ldreq r0, [frame, #-8] @ get sp 86 85 subeq r0, r0, #4 @ point at the last arg 87 - bleq .Ldumpstm @ dump saved registers 86 + bleq dump_backtrace_stm @ dump saved registers 88 87 89 88 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} 90 89 ldr r3, .Ldsi @ instruction exists, 91 90 teq r3, r1, lsr #11 92 91 subeq r0, frame, #16 93 - bleq .Ldumpstm @ dump saved registers 92 + bleq dump_backtrace_stm @ dump saved registers 94 93 95 94 teq sv_fp, #0 @ zero saved fp means 96 95 beq no_frame @ no further frames ··· 113 112 .long 1004b, 1006b 114 113 .popsection 115 114 116 - #define instr r4 117 - #define reg r5 118 - #define stack r6 119 - 120 - .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} 121 - mov stack, r0 122 - mov instr, r1 123 - mov reg, #10 124 - mov r7, #0 125 - 1: mov r3, #1 126 - ARM( tst instr, r3, lsl reg ) 127 - THUMB( lsl r3, reg ) 128 - THUMB( tst instr, r3 ) 129 - beq 2f 130 - add r7, r7, #1 131 - teq r7, #6 132 - moveq r7, #0 133 - adr r3, .Lcr 134 - addne r3, r3, #1 @ skip newline 135 - ldr r2, [stack], #-4 136 - mov r1, reg 137 - adr r0, .Lfp 138 - bl printk 139 - 2: subs reg, reg, #1 140 - bpl 1b 141 - teq r7, #0 142 - adrne r0, .Lcr 143 - blne printk 144 - ldmfd sp!, {instr, reg, stack, r7, pc} 145 - 146 - .Lfp: .asciz " r%d:%08x%s" 147 - .Lcr: .asciz "\n" 148 115 .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" 149 116 .align 150 117 .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
+1
arch/arm/mach-omap2/Kconfig
··· 71 71 select HAVE_ARM_TWD 72 72 select ARM_ERRATA_754322 73 73 select ARM_ERRATA_775420 74 + select OMAP_INTERCONNECT 74 75 75 76 config SOC_DRA7XX 76 77 bool "TI DRA7XX"
+11 -5
arch/arm/mach-omap2/id.c
··· 205 205 206 206 #define OMAP3_SHOW_FEATURE(feat) \ 207 207 if (omap3_has_ ##feat()) \ 208 - printk(#feat" "); 208 + n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); 209 209 210 210 static void __init omap3_cpuinfo(void) 211 211 { 212 212 const char *cpu_name; 213 + char buf[64]; 214 + int n = 0; 215 + 216 + memset(buf, 0, sizeof(buf)); 213 217 214 218 /* 215 219 * OMAP3430 and OMAP3530 are assumed to be same. ··· 245 241 cpu_name = "OMAP3503"; 246 242 } 247 243 248 - sprintf(soc_name, "%s", cpu_name); 244 + scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); 249 245 250 246 /* Print verbose information */ 251 - pr_info("%s %s (", soc_name, soc_rev); 247 + n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); 252 248 253 249 OMAP3_SHOW_FEATURE(l2cache); 254 250 OMAP3_SHOW_FEATURE(iva); ··· 256 252 OMAP3_SHOW_FEATURE(neon); 257 253 OMAP3_SHOW_FEATURE(isp); 258 254 OMAP3_SHOW_FEATURE(192mhz_clk); 259 - 260 - printk(")\n"); 255 + if (*(buf + n - 1) == ' ') 256 + n--; 257 + n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); 258 + pr_info("%s", buf); 261 259 } 262 260 263 261 #define OMAP3_CHECK_FEATURE(status,feat) \
+3
arch/arm/mach-omap2/prm3xxx.c
··· 319 319 if (has_uart4) { 320 320 en_uart4_mask = OMAP3630_EN_UART4_MASK; 321 321 grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; 322 + } else { 323 + en_uart4_mask = 0; 324 + grpsel_uart4_mask = 0; 322 325 } 323 326 324 327 /* Enable wakeups in PER */
+6
arch/arm/mach-omap2/voltage.c
··· 87 87 return -ENODATA; 88 88 } 89 89 90 + if (!voltdm->volt_data) { 91 + pr_err("%s: No voltage data defined for vdd_%s\n", 92 + __func__, voltdm->name); 93 + return -ENODATA; 94 + } 95 + 90 96 /* Adjust voltage to the exact voltage from the OPP table */ 91 97 for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { 92 98 if (voltdm->volt_data[i].volt_nominal >= target_volt) {
+1 -1
arch/arm/mm/dma-mapping.c
··· 1167 1167 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1168 1168 return 0; 1169 1169 } 1170 - fs_initcall(dma_debug_do_init); 1170 + core_initcall(dma_debug_do_init); 1171 1171 1172 1172 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1173 1173
+1 -1
arch/arm/mm/proc-v7m.S
··· 96 96 ret lr 97 97 ENDPROC(cpu_cm7_proc_fin) 98 98 99 - .section ".text.init", #alloc, #execinstr 99 + .section ".init.text", #alloc, #execinstr 100 100 101 101 __v7m_cm7_setup: 102 102 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
+2 -2
arch/arm64/boot/dts/marvell/armada-37xx.dtsi
··· 105 105 status = "disabled"; 106 106 }; 107 107 108 - nb_perih_clk: nb-periph-clk@13000{ 108 + nb_periph_clk: nb-periph-clk@13000 { 109 109 compatible = "marvell,armada-3700-periph-clock-nb"; 110 110 reg = <0x13000 0x100>; 111 111 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, ··· 113 113 #clock-cells = <1>; 114 114 }; 115 115 116 - sb_perih_clk: sb-periph-clk@18000{ 116 + sb_periph_clk: sb-periph-clk@18000 { 117 117 compatible = "marvell,armada-3700-periph-clock-sb"; 118 118 reg = <0x18000 0x100>; 119 119 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+3 -3
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
··· 130 130 reg = <0x700600 0x50>; 131 131 #address-cells = <0x1>; 132 132 #size-cells = <0x0>; 133 - cell-index = <1>; 134 - clocks = <&cps_syscon0 0 3>; 133 + cell-index = <3>; 134 + clocks = <&cps_syscon0 1 21>; 135 135 status = "disabled"; 136 136 }; 137 137 ··· 140 140 reg = <0x700680 0x50>; 141 141 #address-cells = <1>; 142 142 #size-cells = <0>; 143 - cell-index = <2>; 143 + cell-index = <4>; 144 144 clocks = <&cps_syscon0 1 21>; 145 145 status = "disabled"; 146 146 };
+5 -2
arch/arm64/boot/dts/rockchip/rk3399.dtsi
··· 300 300 ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000 301 301 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>; 302 302 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, 303 - <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; 304 - reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; 303 + <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, 304 + <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, 305 + <&cru SRST_A_PCIE>; 306 + reset-names = "core", "mgmt", "mgmt-sticky", "pipe", 307 + "pm", "pclk", "aclk"; 305 308 status = "disabled"; 306 309 307 310 pcie0_intc: interrupt-controller {
+1
arch/arm64/include/asm/kvm_asm.h
··· 54 54 extern void __kvm_flush_vm_context(void); 55 55 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 56 56 extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 57 + extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); 57 58 58 59 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 59 60
+3
arch/arm64/include/asm/kvm_host.h
··· 62 62 /* VTTBR value associated with above pgd and vmid */ 63 63 u64 vttbr; 64 64 65 + /* The last vcpu id that ran on each physical CPU */ 66 + int __percpu *last_vcpu_ran; 67 + 65 68 /* The maximum number of vCPUs depends on the used GIC model */ 66 69 int max_vcpus; 67 70
+1 -1
arch/arm64/include/asm/kvm_mmu.h
··· 128 128 return v; 129 129 } 130 130 131 - #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 131 + #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) 132 132 133 133 /* 134 134 * We currently only support a 40bit IPA.
+9 -1
arch/arm64/include/asm/perf_event.h
··· 46 46 #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 47 47 #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 48 48 49 - #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ 49 + /* 50 + * PMUv3 event types: required events 51 + */ 52 + #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 53 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 54 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 55 + #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 56 + #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 57 + #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 50 58 51 59 /* 52 60 * Event filters for PMUv3
+1 -9
arch/arm64/kernel/perf_event.c
··· 31 31 32 32 /* 33 33 * ARMv8 PMUv3 Performance Events handling code. 34 - * Common event types. 34 + * Common event types (some are defined in asm/perf_event.h). 35 35 */ 36 - 37 - /* Required events. */ 38 - #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 39 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 40 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 41 - #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 42 - #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 43 - #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 44 36 45 37 /* At least one of the following is required. */ 46 38 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+15
arch/arm64/kvm/hyp/tlb.c
··· 64 64 write_sysreg(0, vttbr_el2); 65 65 } 66 66 67 + void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) 68 + { 69 + struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); 70 + 71 + /* Switch to requested VMID */ 72 + write_sysreg(kvm->arch.vttbr, vttbr_el2); 73 + isb(); 74 + 75 + asm volatile("tlbi vmalle1" : : ); 76 + dsb(nsh); 77 + isb(); 78 + 79 + write_sysreg(0, vttbr_el2); 80 + } 81 + 67 82 void __hyp_text __kvm_flush_vm_context(void) 68 83 { 69 84 dsb(ishst);
+8 -2
arch/arm64/kvm/sys_regs.c
··· 597 597 598 598 idx = ARMV8_PMU_CYCLE_IDX; 599 599 } else { 600 - BUG(); 600 + return false; 601 601 } 602 + } else if (r->CRn == 0 && r->CRm == 9) { 603 + /* PMCCNTR */ 604 + if (pmu_access_event_counter_el0_disabled(vcpu)) 605 + return false; 606 + 607 + idx = ARMV8_PMU_CYCLE_IDX; 602 608 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 603 609 /* PMEVCNTRn_EL0 */ 604 610 if (pmu_access_event_counter_el0_disabled(vcpu)) ··· 612 606 613 607 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 614 608 } else { 615 - BUG(); 609 + return false; 616 610 } 617 611 618 612 if (!pmu_counter_idx_valid(vcpu, idx))
+1
arch/nios2/kernel/time.c
··· 324 324 ret = nios2_clocksource_init(timer); 325 325 break; 326 326 default: 327 + ret = 0; 327 328 break; 328 329 } 329 330
+12 -3
arch/powerpc/include/asm/exception-64s.h
··· 91 91 */ 92 92 #define LOAD_HANDLER(reg, label) \ 93 93 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 94 - ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; 94 + ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label); 95 95 96 96 #define __LOAD_HANDLER(reg, label) \ 97 97 ld reg,PACAKBASE(r13); \ ··· 158 158 std ra,offset(r13); \ 159 159 END_FTR_SECTION_NESTED(ftr,ftr,943) 160 160 161 - #define EXCEPTION_PROLOG_0(area) \ 162 - GET_PACA(r13); \ 161 + #define EXCEPTION_PROLOG_0_PACA(area) \ 163 162 std r9,area+EX_R9(r13); /* save r9 */ \ 164 163 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ 165 164 HMT_MEDIUM; \ 166 165 std r10,area+EX_R10(r13); /* save r10 - r12 */ \ 167 166 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) 167 + 168 + #define EXCEPTION_PROLOG_0(area) \ 169 + GET_PACA(r13); \ 170 + EXCEPTION_PROLOG_0_PACA(area) 168 171 169 172 #define __EXCEPTION_PROLOG_1(area, extra, vec) \ 170 173 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ ··· 196 193 197 194 #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ 198 195 EXCEPTION_PROLOG_0(area); \ 196 + EXCEPTION_PROLOG_1(area, extra, vec); \ 197 + EXCEPTION_PROLOG_PSERIES_1(label, h); 198 + 199 + /* Have the PACA in r13 already */ 200 + #define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \ 201 + EXCEPTION_PROLOG_0_PACA(area); \ 199 202 EXCEPTION_PROLOG_1(area, extra, vec); \ 200 203 EXCEPTION_PROLOG_PSERIES_1(label, h); 201 204
+1
arch/powerpc/include/asm/ppc-opcode.h
··· 460 460 461 461 #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ 462 462 ((IH & 0x7) << 21)) 463 + #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) 463 464 464 465 #endif /* _ASM_POWERPC_PPC_OPCODE_H */
+8 -3
arch/powerpc/kernel/exceptions-64s.S
··· 116 116 117 117 EXC_REAL_BEGIN(system_reset, 0x100, 0x200) 118 118 SET_SCRATCH0(r13) 119 - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 119 + GET_PACA(r13) 120 + clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */ 121 + EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD, 120 122 IDLETEST, 0x100) 121 123 122 124 EXC_REAL_END(system_reset, 0x100, 0x200) ··· 126 124 127 125 #ifdef CONFIG_PPC_P7_NAP 128 126 EXC_COMMON_BEGIN(system_reset_idle_common) 127 + BEGIN_FTR_SECTION 128 + GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */ 129 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 129 130 bl pnv_restore_hyp_resource 130 131 131 132 li r0,PNV_THREAD_RUNNING ··· 174 169 SET_SCRATCH0(r13) /* save r13 */ 175 170 /* 176 171 * Running native on arch 2.06 or later, we may wakeup from winkle 177 - * inside machine check. If yes, then last bit of HSPGR0 would be set 172 + * inside machine check. If yes, then last bit of HSPRG0 would be set 178 173 * to 1. Hence clear it unconditionally. 179 174 */ 180 175 GET_PACA(r13) ··· 393 388 /* 394 389 * Go back to winkle. Please note that this thread was woken up in 395 390 * machine check from winkle and have not restored the per-subcore 396 - * state. Hence before going back to winkle, set last bit of HSPGR0 391 + * state. Hence before going back to winkle, set last bit of HSPRG0 397 392 * to 1. This will make sure that if this thread gets woken up 398 393 * again at reset vector 0x100 then it will get chance to restore 399 394 * the subcore state.
+21 -21
arch/powerpc/kernel/process.c
··· 1215 1215 int instr; 1216 1216 1217 1217 if (!(i % 8)) 1218 - printk("\n"); 1218 + pr_cont("\n"); 1219 1219 1220 1220 #if !defined(CONFIG_BOOKE) 1221 1221 /* If executing with the IMMU off, adjust pc rather ··· 1227 1227 1228 1228 if (!__kernel_text_address(pc) || 1229 1229 probe_kernel_address((unsigned int __user *)pc, instr)) { 1230 - printk(KERN_CONT "XXXXXXXX "); 1230 + pr_cont("XXXXXXXX "); 1231 1231 } else { 1232 1232 if (regs->nip == pc) 1233 - printk(KERN_CONT "<%08x> ", instr); 1233 + pr_cont("<%08x> ", instr); 1234 1234 else 1235 - printk(KERN_CONT "%08x ", instr); 1235 + pr_cont("%08x ", instr); 1236 1236 } 1237 1237 1238 1238 pc += sizeof(int); 1239 1239 } 1240 1240 1241 - printk("\n"); 1241 + pr_cont("\n"); 1242 1242 } 1243 1243 1244 1244 struct regbit { ··· 1282 1282 1283 1283 for (; bits->bit; ++bits) 1284 1284 if (val & bits->bit) { 1285 - printk("%s%s", s, bits->name); 1285 + pr_cont("%s%s", s, bits->name); 1286 1286 s = sep; 1287 1287 } 1288 1288 } ··· 1305 1305 * T: Transactional (bit 34) 1306 1306 */ 1307 1307 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { 1308 - printk(",TM["); 1308 + pr_cont(",TM["); 1309 1309 print_bits(val, msr_tm_bits, ""); 1310 - printk("]"); 1310 + pr_cont("]"); 1311 1311 } 1312 1312 } 1313 1313 #else ··· 1316 1316 1317 1317 static void print_msr_bits(unsigned long val) 1318 1318 { 1319 - printk("<"); 1319 + pr_cont("<"); 1320 1320 print_bits(val, msr_bits, ","); 1321 1321 print_tm_bits(val); 1322 - printk(">"); 1322 + pr_cont(">"); 1323 1323 } 1324 1324 1325 1325 #ifdef CONFIG_PPC64 ··· 1347 1347 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1348 1348 trap = TRAP(regs); 1349 1349 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1350 - printk("CFAR: "REG" ", regs->orig_gpr3); 1350 + pr_cont("CFAR: "REG" ", regs->orig_gpr3); 1351 1351 if (trap == 0x200 || trap == 0x300 || trap == 0x600) 1352 1352 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 1353 - printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1353 + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1354 1354 #else 1355 - printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1355 + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1356 1356 #endif 1357 1357 #ifdef CONFIG_PPC64 1358 - printk("SOFTE: %ld ", regs->softe); 1358 + pr_cont("SOFTE: %ld ", regs->softe); 1359 1359 #endif 1360 1360 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1361 1361 if (MSR_TM_ACTIVE(regs->msr)) 1362 - printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1362 + pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1363 1363 #endif 1364 1364 1365 1365 for (i = 0; i < 32; i++) { 1366 1366 if ((i % REGS_PER_LINE) == 0) 1367 - printk("\nGPR%02d: ", i); 1368 - printk(REG " ", regs->gpr[i]); 1367 + pr_cont("\nGPR%02d: ", i); 1368 + pr_cont(REG " ", regs->gpr[i]); 1369 1369 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 1370 1370 break; 1371 1371 } 1372 - printk("\n"); 1372 + pr_cont("\n"); 1373 1373 #ifdef CONFIG_KALLSYMS 1374 1374 /* 1375 1375 * Lookup NIP late so we have the best change of getting the ··· 1900 1900 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1901 1901 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1902 1902 if ((ip == rth) && curr_frame >= 0) { 1903 - printk(" (%pS)", 1903 + pr_cont(" (%pS)", 1904 1904 (void *)current->ret_stack[curr_frame].ret); 1905 1905 curr_frame--; 1906 1906 } 1907 1907 #endif 1908 1908 if (firstframe) 1909 - printk(" (unreliable)"); 1910 - printk("\n"); 1909 + pr_cont(" (unreliable)"); 1910 + pr_cont("\n"); 1911 1911 } 1912 1912 firstframe = 0; 1913 1913
+14 -6
arch/powerpc/kernel/setup_64.c
··· 226 226 if (firmware_has_feature(FW_FEATURE_OPAL)) 227 227 opal_configure_cores(); 228 228 229 - /* Enable AIL if supported, and we are in hypervisor mode */ 230 - if (early_cpu_has_feature(CPU_FTR_HVMODE) && 231 - early_cpu_has_feature(CPU_FTR_ARCH_207S)) { 232 - unsigned long lpcr = mfspr(SPRN_LPCR); 233 - mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 234 - } 229 + /* AIL on native is done in cpu_ready_for_interrupts() */ 235 230 } 236 231 } 237 232 238 233 static void cpu_ready_for_interrupts(void) 239 234 { 235 + /* 236 + * Enable AIL if supported, and we are in hypervisor mode. This 237 + * is called once for every processor. 238 + * 239 + * If we are not in hypervisor mode the job is done once for 240 + * the whole partition in configure_exceptions(). 241 + */ 242 + if (early_cpu_has_feature(CPU_FTR_HVMODE) && 243 + early_cpu_has_feature(CPU_FTR_ARCH_207S)) { 244 + unsigned long lpcr = mfspr(SPRN_LPCR); 245 + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 246 + } 247 + 240 248 /* Set IR and DR in PACA MSR */ 241 249 get_paca()->kernel_msr = MSR_KERNEL; 242 250 }
+4
arch/powerpc/mm/hash_utils_64.c
··· 1029 1029 { 1030 1030 /* Initialize hash table for that CPU */ 1031 1031 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 1032 + 1033 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 1034 + update_hid_for_hash(); 1035 + 1032 1036 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1033 1037 mtspr(SPRN_SDR1, _SDR1); 1034 1038 else
+4
arch/powerpc/mm/pgtable-radix.c
··· 388 388 * update partition table control register and UPRT 389 389 */ 390 390 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 391 + 392 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 393 + update_hid_for_radix(); 394 + 391 395 lpcr = mfspr(SPRN_LPCR); 392 396 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 393 397
+4
arch/powerpc/mm/tlb-radix.c
··· 50 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 51 51 __tlbiel_pid(pid, set, ric); 52 52 } 53 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 54 + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 53 55 return; 54 56 } 55 57 ··· 85 83 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 86 84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 87 85 asm volatile("ptesync": : :"memory"); 86 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 87 + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 88 88 } 89 89 90 90 static inline void _tlbie_va(unsigned long va, unsigned long pid,
+2
arch/s390/kernel/vmlinux.lds.S
··· 62 62 63 63 . = ALIGN(PAGE_SIZE); 64 64 __start_ro_after_init = .; 65 + __start_data_ro_after_init = .; 65 66 .data..ro_after_init : { 66 67 *(.data..ro_after_init) 67 68 } 69 + __end_data_ro_after_init = .; 68 70 EXCEPTION_TABLE(16) 69 71 . = ALIGN(PAGE_SIZE); 70 72 __end_ro_after_init = .;
+1 -1
arch/s390/pci/pci_dma.c
··· 423 423 dma_addr_t dma_addr_base, dma_addr; 424 424 int flags = ZPCI_PTE_VALID; 425 425 struct scatterlist *s; 426 - unsigned long pa; 426 + unsigned long pa = 0; 427 427 int ret; 428 428 429 429 size = PAGE_ALIGN(size);
+23
arch/sparc/Kconfig
··· 43 43 select ARCH_HAS_SG_CHAIN 44 44 select CPU_NO_EFFICIENT_FFS 45 45 select HAVE_ARCH_HARDENED_USERCOPY 46 + select PROVE_LOCKING_SMALL if PROVE_LOCKING 46 47 47 48 config SPARC32 48 49 def_bool !64BIT ··· 89 88 90 89 config ARCH_PROC_KCORE_TEXT 91 90 def_bool y 91 + 92 + config ARCH_ATU 93 + bool 94 + default y if SPARC64 95 + 96 + config ARCH_DMA_ADDR_T_64BIT 97 + bool 98 + default y if ARCH_ATU 92 99 93 100 config IOMMU_HELPER 94 101 bool ··· 312 303 313 304 config ARCH_SPARSEMEM_DEFAULT 314 305 def_bool y if SPARC64 306 + 307 + config FORCE_MAX_ZONEORDER 308 + int "Maximum zone order" 309 + default "13" 310 + help 311 + The kernel memory allocator divides physically contiguous memory 312 + blocks into "zones", where each zone is a power of two number of 313 + pages. This option selects the largest power of two that the kernel 314 + keeps in the memory allocator. If you need to allocate very large 315 + blocks of physically contiguous memory, then you may need to 316 + increase this value. 317 + 318 + This config option is actually maximum order plus one. For example, 319 + a value of 13 means that the largest free memory block is 2^12 pages. 315 320 316 321 source "mm/Kconfig" 317 322
+343
arch/sparc/include/asm/hypervisor.h
··· 2335 2335 */ 2336 2336 #define HV_FAST_PCI_MSG_SETVALID 0xd3 2337 2337 2338 + /* PCI IOMMU v2 definitions and services 2339 + * 2340 + * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO 2341 + * definitions and services. 2342 + * 2343 + * CTE Clump Table Entry. First level table entry in the ATU. 2344 + * 2345 + * pci_device_list 2346 + * A 32-bit aligned list of pci_devices. 2347 + * 2348 + * pci_device_listp 2349 + * real address of a pci_device_list. 32-bit aligned. 2350 + * 2351 + * iotte IOMMU translation table entry. 2352 + * 2353 + * iotte_attributes 2354 + * IO Attributes for IOMMU v2 mappings. In addition to 2355 + * read, write IOMMU v2 supports relax ordering 2356 + * 2357 + * io_page_list A 64-bit aligned list of real addresses. Each real 2358 + * address in an io_page_list must be properly aligned 2359 + * to the pagesize of the given IOTSB. 2360 + * 2361 + * io_page_list_p Real address of an io_page_list, 64-bit aligned. 2362 + * 2363 + * IOTSB IO Translation Storage Buffer. An aligned table of 2364 + * IOTTEs. Each IOTSB has a pagesize, table size, and 2365 + * virtual address associated with it that must match 2366 + * a pagesize and table size supported by the un-derlying 2367 + * hardware implementation. The alignment requirements 2368 + * for an IOTSB depend on the pagesize used for that IOTSB. 2369 + * Each IOTTE in an IOTSB maps one pagesize-sized page. 2370 + * The size of the IOTSB dictates how large of a virtual 2371 + * address space the IOTSB is capable of mapping. 2372 + * 2373 + * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus 2374 + * iotsb_handle represents a binding of an IOTSB to a 2375 + * PCI root complex. 2376 + * 2377 + * iotsb_index Zero-based IOTTE number within an IOTSB. 2378 + */ 2379 + 2380 + /* The index_count argument consists of two fields: 2381 + * bits 63:48 #iottes and bits 47:0 iotsb_index 2382 + */ 2383 + #define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \ 2384 + (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index))) 2385 + 2386 + /* pci_iotsb_conf() 2387 + * TRAP: HV_FAST_TRAP 2388 + * FUNCTION: HV_FAST_PCI_IOTSB_CONF 2389 + * ARG0: devhandle 2390 + * ARG1: r_addr 2391 + * ARG2: size 2392 + * ARG3: pagesize 2393 + * ARG4: iova 2394 + * RET0: status 2395 + * RET1: iotsb_handle 2396 + * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize 2397 + * EBADALIGN r_addr is not properly aligned 2398 + * ENORADDR r_addr is not a valid real address 2399 + * ETOOMANY No further IOTSBs may be configured 2400 + * EBUSY Duplicate devhandle, raddir, iova combination 2401 + * 2402 + * Create an IOTSB suitable for the PCI root complex identified by devhandle, 2403 + * for the DMA virtual address defined by the argument iova. 2404 + * 2405 + * r_addr is the properly aligned base address of the IOTSB and size is the 2406 + * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to 2407 + * being configured. If it contains any values other than zeros then the 2408 + * behavior is undefined. 2409 + * 2410 + * pagesize is the size of each page in the IOTSB. Note that the combination of 2411 + * size (table size) and pagesize must be valid. 2412 + * 2413 + * virt is the DMA virtual address this IOTSB will map. 2414 + * 2415 + * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1. 2416 + * Once configured, privileged access to the IOTSB memory is prohibited and 2417 + * creates undefined behavior. The only permitted access is indirect via these 2418 + * services. 2419 + */ 2420 + #define HV_FAST_PCI_IOTSB_CONF 0x190 2421 + 2422 + /* pci_iotsb_info() 2423 + * TRAP: HV_FAST_TRAP 2424 + * FUNCTION: HV_FAST_PCI_IOTSB_INFO 2425 + * ARG0: devhandle 2426 + * ARG1: iotsb_handle 2427 + * RET0: status 2428 + * RET1: r_addr 2429 + * RET2: size 2430 + * RET3: pagesize 2431 + * RET4: iova 2432 + * RET5: #bound 2433 + * ERRORS: EINVAL Invalid devhandle or iotsb_handle 2434 + * 2435 + * This service returns configuration information about an IOTSB previously 2436 + * created with pci_iotsb_conf. 2437 + * 2438 + * iotsb_handle value 0 may be used with this service to inquire about the 2439 + * legacy IOTSB that may or may not exist. If the service succeeds, the return 2440 + * values describe the legacy IOTSB and I/O virtual addresses mapped by that 2441 + * table. However, the table base address r_addr may contain the value -1 which 2442 + * indicates a memory range that cannot be accessed or be reclaimed. 2443 + * 2444 + * The return value #bound contains the number of PCI devices that iotsb_handle 2445 + * is currently bound to. 2446 + */ 2447 + #define HV_FAST_PCI_IOTSB_INFO 0x191 2448 + 2449 + /* pci_iotsb_unconf() 2450 + * TRAP: HV_FAST_TRAP 2451 + * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF 2452 + * ARG0: devhandle 2453 + * ARG1: iotsb_handle 2454 + * RET0: status 2455 + * ERRORS: EINVAL Invalid devhandle or iotsb_handle 2456 + * EBUSY The IOTSB is bound and may not be unconfigured 2457 + * 2458 + * This service unconfigures the IOTSB identified by the devhandle and 2459 + * iotsb_handle arguments, previously created with pci_iotsb_conf. 2460 + * The IOTSB must not be currently bound to any device or the service will fail 2461 + * 2462 + * If the call succeeds, iotsb_handle is no longer valid. 2463 + */ 2464 + #define HV_FAST_PCI_IOTSB_UNCONF 0x192 2465 + 2466 + /* pci_iotsb_bind() 2467 + * TRAP: HV_FAST_TRAP 2468 + * FUNCTION: HV_FAST_PCI_IOTSB_BIND 2469 + * ARG0: devhandle 2470 + * ARG1: iotsb_handle 2471 + * ARG2: pci_device 2472 + * RET0: status 2473 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device 2474 + * EBUSY A PCI function is already bound to an IOTSB at the same 2475 + * address range as specified by devhandle, iotsb_handle. 2476 + * 2477 + * This service binds the PCI function specified by the argument pci_device to 2478 + * the IOTSB specified by the arguments devhandle and iotsb_handle. 2479 + * 2480 + * The PCI device function is bound to the specified IOTSB with the IOVA range 2481 + * specified when the IOTSB was configured via pci_iotsb_conf. If the function 2482 + * is already bound then it is unbound first. 2483 + */ 2484 + #define HV_FAST_PCI_IOTSB_BIND 0x193 2485 + 2486 + /* pci_iotsb_unbind() 2487 + * TRAP: HV_FAST_TRAP 2488 + * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND 2489 + * ARG0: devhandle 2490 + * ARG1: iotsb_handle 2491 + * ARG2: pci_device 2492 + * RET0: status 2493 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device 2494 + * ENOMAP The PCI function was not bound to the specified IOTSB 2495 + * 2496 + * This service unbinds the PCI device specified by the argument pci_device 2497 + * from the IOTSB identified * by the arguments devhandle and iotsb_handle. 2498 + * 2499 + * If the PCI device is not bound to the specified IOTSB then this service will 2500 + * fail with status ENOMAP 2501 + */ 2502 + #define HV_FAST_PCI_IOTSB_UNBIND 0x194 2503 + 2504 + /* pci_iotsb_get_binding() 2505 + * TRAP: HV_FAST_TRAP 2506 + * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING 2507 + * ARG0: devhandle 2508 + * ARG1: iotsb_handle 2509 + * ARG2: iova 2510 + * RET0: status 2511 + * RET1: iotsb_handle 2512 + * ERRORS: EINVAL Invalid devhandle, pci_device, or iova 2513 + * ENOMAP The PCI function is not bound to an IOTSB at iova 2514 + * 2515 + * This service returns the IOTSB binding, iotsb_handle, for a given pci_device 2516 + * and DMA virtual address, iova. 2517 + * 2518 + * iova must be the base address of a DMA virtual address range as defined by 2519 + * the iommu-address-ranges property in the root complex device node defined 2520 + * by the argument devhandle. 2521 + */ 2522 + #define HV_FAST_PCI_IOTSB_GET_BINDING 0x195 2523 + 2524 + /* pci_iotsb_map() 2525 + * TRAP: HV_FAST_TRAP 2526 + * FUNCTION: HV_FAST_PCI_IOTSB_MAP 2527 + * ARG0: devhandle 2528 + * ARG1: iotsb_handle 2529 + * ARG2: index_count 2530 + * ARG3: iotte_attributes 2531 + * ARG4: io_page_list_p 2532 + * RET0: status 2533 + * RET1: #mapped 2534 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes, 2535 + * iotsb_index or iotte_attributes 2536 + * EBADALIGN Improperly aligned io_page_list_p or I/O page 2537 + * address in the I/O page list. 2538 + * ENORADDR Invalid io_page_list_p or I/O page address in 2539 + * the I/O page list. 2540 + * 2541 + * This service creates and flushes mappings in the IOTSB defined by the 2542 + * arguments devhandle, iotsb. 2543 + * 2544 + * The index_count argument consists of two fields. Bits 63:48 contain #iotte 2545 + * and bits 47:0 contain iotsb_index 2546 + * 2547 + * The first mapping is created in the IOTSB index specified by iotsb_index. 2548 + * Subsequent mappings are created at iotsb_index+1 and so on. 2549 + * 2550 + * The attributes of each mapping are defined by the argument iotte_attributes. 2551 + * 2552 + * The io_page_list_p specifies the real address of the 64-bit-aligned list of 2553 + * #iottes I/O page addresses. Each page address must be a properly aligned 2554 + * real address of a page to be mapped in the IOTSB. The first entry in the I/O 2555 + * page list contains the real address of the first page, the 2nd entry for the 2556 + * 2nd page, and so on. 2557 + * 2558 + * #iottes must be greater than zero. 2559 + * 2560 + * The return value #mapped is the actual number of mappings created, which may 2561 + * be less than or equal to the argument #iottes. If the function returns 2562 + * successfully with a #mapped value less than the requested #iottes then the 2563 + * caller should continue to invoke the service with updated iotsb_index, 2564 + * #iottes, and io_page_list_p arguments until all pages are mapped. 2565 + * 2566 + * This service must not be used to demap a mapping. In other words, all 2567 + * mappings must be valid and have one or both of the RW attribute bits set. 2568 + * 2569 + * Note: 2570 + * It is implementation-defined whether I/O page real address validity checking 2571 + * is done at time mappings are established or deferred until they are 2572 + * accessed. 2573 + */ 2574 + #define HV_FAST_PCI_IOTSB_MAP 0x196 2575 + 2576 + /* pci_iotsb_map_one() 2577 + * TRAP: HV_FAST_TRAP 2578 + * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE 2579 + * ARG0: devhandle 2580 + * ARG1: iotsb_handle 2581 + * ARG2: iotsb_index 2582 + * ARG3: iotte_attributes 2583 + * ARG4: r_addr 2584 + * RET0: status 2585 + * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index 2586 + * or iotte_attributes 2587 + * EBADALIGN Improperly aligned r_addr 2588 + * ENORADDR Invalid r_addr 2589 + * 2590 + * This service creates and flushes a single mapping in the IOTSB defined by the 2591 + * arguments devhandle, iotsb. 2592 + * 2593 + * The mapping for the page at r_addr is created at the IOTSB index specified by 2594 + * iotsb_index with the attributes iotte_attributes. 2595 + * 2596 + * This service must not be used to demap a mapping. In other words, the mapping 2597 + * must be valid and have one or both of the RW attribute bits set. 2598 + * 2599 + * Note: 2600 + * It is implementation-defined whether I/O page real address validity checking 2601 + * is done at time mappings are established or deferred until they are 2602 + * accessed. 2603 + */ 2604 + #define HV_FAST_PCI_IOTSB_MAP_ONE 0x197 2605 + 2606 + /* pci_iotsb_demap() 2607 + * TRAP: HV_FAST_TRAP 2608 + * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP 2609 + * ARG0: devhandle 2610 + * ARG1: iotsb_handle 2611 + * ARG2: iotsb_index 2612 + * ARG3: #iottes 2613 + * RET0: status 2614 + * RET1: #unmapped 2615 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes 2616 + * 2617 + * This service unmaps and flushes up to #iottes mappings starting at index 2618 + * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb. 2619 + * 2620 + * #iottes must be greater than zero. 2621 + * 2622 + * The actual number of IOTTEs unmapped is returned in #unmapped and may be less 2623 + * than or equal to the requested number of IOTTEs, #iottes. 2624 + * 2625 + * If #unmapped is less than #iottes, the caller should continue to invoke this 2626 + * service with updated iotsb_index and #iottes arguments until all pages are 2627 + * demapped. 2628 + */ 2629 + #define HV_FAST_PCI_IOTSB_DEMAP 0x198 2630 + 2631 + /* pci_iotsb_getmap() 2632 + * TRAP: HV_FAST_TRAP 2633 + * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP 2634 + * ARG0: devhandle 2635 + * ARG1: iotsb_handle 2636 + * ARG2: iotsb_index 2637 + * RET0: status 2638 + * RET1: r_addr 2639 + * RET2: iotte_attributes 2640 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index 2641 + * ENOMAP No mapping was found 2642 + * 2643 + * This service returns the mapping specified by index iotsb_index from the 2644 + * IOTSB defined by the arguments devhandle, iotsb. 2645 + * 2646 + * Upon success, the real address of the mapping shall be returned in 2647 + * r_addr and thethe IOTTE mapping attributes shall be returned in 2648 + * iotte_attributes. 2649 + * 2650 + * The return value iotte_attributes may not include optional features used in 2651 + * the call to create the mapping. 2652 + */ 2653 + #define HV_FAST_PCI_IOTSB_GETMAP 0x199 2654 + 2655 + /* pci_iotsb_sync_mappings() 2656 + * TRAP: HV_FAST_TRAP 2657 + * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 2658 + * ARG0: devhandle 2659 + * ARG1: iotsb_handle 2660 + * ARG2: iotsb_index 2661 + * ARG3: #iottes 2662 + * RET0: status 2663 + * RET1: #synced 2664 + * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes 2665 + * 2666 + * This service synchronizes #iottes mappings starting at index iotsb_index in 2667 + * the IOTSB defined by the arguments devhandle, iotsb. 2668 + * 2669 + * #iottes must be greater than zero. 2670 + * 2671 + * The actual number of IOTTEs synchronized is returned in #synced, which may 2672 + * be less than or equal to the requested number, #iottes. 2673 + * 2674 + * Upon a successful return, #synced is less than #iottes, the caller should 2675 + * continue to invoke this service with updated iotsb_index and #iottes 2676 + * arguments until all pages are synchronized. 2677 + */ 2678 + #define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a 2679 + 2338 2680 /* Logical Domain Channel services. */ 2339 2681 2340 2682 #define LDC_CHANNEL_DOWN 0 ··· 3335 2993 #define HV_GRP_SDIO 0x0108 3336 2994 #define HV_GRP_SDIO_ERR 0x0109 3337 2995 #define HV_GRP_REBOOT_DATA 0x0110 2996 + #define HV_GRP_ATU 0x0111 3338 2997 #define HV_GRP_M7_PERF 0x0114 3339 2998 #define HV_GRP_NIAG_PERF 0x0200 3340 2999 #define HV_GRP_FIRE_PERF 0x0201
+28
arch/sparc/include/asm/iommu_64.h
··· 24 24 unsigned int limit; 25 25 }; 26 26 27 + #define ATU_64_SPACE_SIZE 0x800000000 /* 32G */ 28 + 29 + /* Data structures for SPARC ATU architecture */ 30 + struct atu_iotsb { 31 + void *table; /* IOTSB table base virtual addr*/ 32 + u64 ra; /* IOTSB table real addr */ 33 + u64 dvma_size; /* ranges[3].size or OS slected 32G size */ 34 + u64 dvma_base; /* ranges[3].base */ 35 + u64 table_size; /* IOTSB table size */ 36 + u64 page_size; /* IO PAGE size for IOTSB */ 37 + u32 iotsb_num; /* tsbnum is same as iotsb_handle */ 38 + }; 39 + 40 + struct atu_ranges { 41 + u64 base; 42 + u64 size; 43 + }; 44 + 45 + struct atu { 46 + struct atu_ranges *ranges; 47 + struct atu_iotsb *iotsb; 48 + struct iommu_map_table tbl; 49 + u64 base; 50 + u64 size; 51 + u64 dma_addr_mask; 52 + }; 53 + 27 54 struct iommu { 28 55 struct iommu_map_table tbl; 56 + struct atu *atu; 29 57 spinlock_t lock; 30 58 u32 dma_addr_mask; 31 59 iopte_t *page_table;
+1
arch/sparc/kernel/hvapi.c
··· 39 39 { .group = HV_GRP_SDIO, }, 40 40 { .group = HV_GRP_SDIO_ERR, }, 41 41 { .group = HV_GRP_REBOOT_DATA, }, 42 + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, 42 43 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, 43 44 { .group = HV_GRP_FIRE_PERF, }, 44 45 { .group = HV_GRP_N2_CPU, },
+6 -2
arch/sparc/kernel/iommu.c
··· 760 760 struct iommu *iommu = dev->archdata.iommu; 761 761 u64 dma_addr_mask = iommu->dma_addr_mask; 762 762 763 - if (device_mask >= (1UL << 32UL)) 764 - return 0; 763 + if (device_mask > DMA_BIT_MASK(32)) { 764 + if (iommu->atu) 765 + dma_addr_mask = iommu->atu->dma_addr_mask; 766 + else 767 + return 0; 768 + } 765 769 766 770 if ((device_mask & dma_addr_mask) == dma_addr_mask) 767 771 return 1;
-1
arch/sparc/kernel/iommu_common.h
··· 13 13 #include <linux/scatterlist.h> 14 14 #include <linux/device.h> 15 15 #include <linux/iommu-helper.h> 16 - #include <linux/scatterlist.h> 17 16 18 17 #include <asm/iommu.h> 19 18
+360 -58
arch/sparc/kernel/pci_sun4v.c
··· 44 44 { .major = 1, .minor = 1 }, 45 45 }; 46 46 47 + static unsigned long vatu_major = 1; 48 + static unsigned long vatu_minor = 1; 49 + 47 50 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 48 51 49 52 struct iommu_batch { ··· 72 69 } 73 70 74 71 /* Interrupts must be disabled. */ 75 - static long iommu_batch_flush(struct iommu_batch *p) 72 + static long iommu_batch_flush(struct iommu_batch *p, u64 mask) 76 73 { 77 74 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 75 + u64 *pglist = p->pglist; 76 + u64 index_count; 78 77 unsigned long devhandle = pbm->devhandle; 79 78 unsigned long prot = p->prot; 80 79 unsigned long entry = p->entry; 81 - u64 *pglist = p->pglist; 82 80 unsigned long npages = p->npages; 81 + unsigned long iotsb_num; 82 + unsigned long ret; 83 + long num; 83 84 84 85 /* VPCI maj=1, min=[0,1] only supports read and write */ 85 86 if (vpci_major < 2) 86 87 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); 87 88 88 89 while (npages != 0) { 89 - long num; 90 - 91 - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 92 - npages, prot, __pa(pglist)); 93 - if (unlikely(num < 0)) { 94 - if (printk_ratelimit()) 95 - printk("iommu_batch_flush: IOMMU map of " 96 - "[%08lx:%08llx:%lx:%lx:%lx] failed with " 97 - "status %ld\n", 98 - devhandle, HV_PCI_TSBID(0, entry), 99 - npages, prot, __pa(pglist), num); 100 - return -1; 90 + if (mask <= DMA_BIT_MASK(32)) { 91 + num = pci_sun4v_iommu_map(devhandle, 92 + HV_PCI_TSBID(0, entry), 93 + npages, 94 + prot, 95 + __pa(pglist)); 96 + if (unlikely(num < 0)) { 97 + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", 98 + __func__, 99 + devhandle, 100 + HV_PCI_TSBID(0, entry), 101 + npages, prot, __pa(pglist), 102 + num); 103 + return -1; 104 + } 105 + } else { 106 + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), 107 + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; 108 + ret = pci_sun4v_iotsb_map(devhandle, 109 + iotsb_num, 110 + index_count, 111 + prot, 112 + __pa(pglist), 113 + &num); 114 + if (unlikely(ret != HV_EOK)) { 115 + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", 116 + __func__, 117 + devhandle, iotsb_num, 118 + index_count, prot, 119 + __pa(pglist), ret); 120 + return -1; 121 + } 101 122 } 102 - 103 123 entry += num; 104 124 npages -= num; 105 125 pglist += num; ··· 134 108 return 0; 135 109 } 136 110 137 - static inline void iommu_batch_new_entry(unsigned long entry) 111 + static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) 138 112 { 139 113 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 140 114 141 115 if (p->entry + p->npages == entry) 142 116 return; 143 117 if (p->entry != ~0UL) 144 - iommu_batch_flush(p); 118 + iommu_batch_flush(p, mask); 145 119 p->entry = entry; 146 120 } 147 121 148 122 /* Interrupts must be disabled. */ 149 - static inline long iommu_batch_add(u64 phys_page) 123 + static inline long iommu_batch_add(u64 phys_page, u64 mask) 150 124 { 151 125 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 152 126 ··· 154 128 155 129 p->pglist[p->npages++] = phys_page; 156 130 if (p->npages == PGLIST_NENTS) 157 - return iommu_batch_flush(p); 131 + return iommu_batch_flush(p, mask); 158 132 159 133 return 0; 160 134 } 161 135 162 136 /* Interrupts must be disabled. */ 163 - static inline long iommu_batch_end(void) 137 + static inline long iommu_batch_end(u64 mask) 164 138 { 165 139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 166 140 167 141 BUG_ON(p->npages >= PGLIST_NENTS); 168 142 169 - return iommu_batch_flush(p); 143 + return iommu_batch_flush(p, mask); 170 144 } 171 145 172 146 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 173 147 dma_addr_t *dma_addrp, gfp_t gfp, 174 148 unsigned long attrs) 175 149 { 150 + u64 mask; 176 151 unsigned long flags, order, first_page, npages, n; 177 152 unsigned long prot = 0; 178 153 struct iommu *iommu; 154 + struct atu *atu; 155 + struct iommu_map_table *tbl; 179 156 struct page *page; 180 157 void *ret; 181 158 long entry; ··· 203 174 memset((char *)first_page, 0, PAGE_SIZE << order); 204 175 205 176 iommu = dev->archdata.iommu; 177 + atu = iommu->atu; 206 178 207 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 179 + mask = dev->coherent_dma_mask; 180 + if (mask <= DMA_BIT_MASK(32)) 181 + tbl = &iommu->tbl; 182 + else 183 + tbl = &atu->tbl; 184 + 185 + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 208 186 (unsigned long)(-1), 0); 209 187 210 188 if (unlikely(entry == IOMMU_ERROR_CODE)) 211 189 goto range_alloc_fail; 212 190 213 - *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 191 + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 214 192 ret = (void *) first_page; 215 193 first_page = __pa(first_page); 216 194 ··· 229 193 entry); 230 194 231 195 for (n = 0; n < npages; n++) { 232 - long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 196 + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); 233 197 if (unlikely(err < 0L)) 234 198 goto iommu_map_fail; 235 199 } 236 200 237 - if (unlikely(iommu_batch_end() < 0L)) 201 + if (unlikely(iommu_batch_end(mask) < 0L)) 238 202 goto iommu_map_fail; 239 203 240 204 local_irq_restore(flags); ··· 242 206 return ret; 243 207 244 208 iommu_map_fail: 245 - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 209 + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 246 210 247 211 range_alloc_fail: 248 212 free_pages(first_page, order); 249 213 return NULL; 250 214 } 251 215 252 - static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, 253 - unsigned long npages) 216 + unsigned long dma_4v_iotsb_bind(unsigned long devhandle, 217 + unsigned long iotsb_num, 218 + struct pci_bus *bus_dev) 254 219 { 255 - u32 devhandle = *(u32 *)demap_arg; 220 + struct pci_dev *pdev; 221 + unsigned long err; 222 + unsigned int bus; 223 + unsigned int device; 224 + unsigned int fun; 225 + 226 + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { 227 + if (pdev->subordinate) { 228 + /* No need to bind pci bridge */ 229 + dma_4v_iotsb_bind(devhandle, iotsb_num, 230 + pdev->subordinate); 231 + } else { 232 + bus = bus_dev->number; 233 + device = PCI_SLOT(pdev->devfn); 234 + fun = PCI_FUNC(pdev->devfn); 235 + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, 236 + HV_PCI_DEVICE_BUILD(bus, 237 + device, 238 + fun)); 239 + 240 + /* If bind fails for one device it is going to fail 241 + * for rest of the devices because we are sharing 242 + * IOTSB. So in case of failure simply return with 243 + * error. 244 + */ 245 + if (err) 246 + return err; 247 + } 248 + } 249 + 250 + return 0; 251 + } 252 + 253 + static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, 254 + dma_addr_t dvma, unsigned long iotsb_num, 255 + unsigned long entry, unsigned long npages) 256 + { 256 257 unsigned long num, flags; 258 + unsigned long ret; 257 259 258 260 local_irq_save(flags); 259 261 do { 260 - num = pci_sun4v_iommu_demap(devhandle, 261 - HV_PCI_TSBID(0, entry), 262 - npages); 263 - 262 + if (dvma <= DMA_BIT_MASK(32)) { 263 + num = pci_sun4v_iommu_demap(devhandle, 264 + HV_PCI_TSBID(0, entry), 265 + npages); 266 + } else { 267 + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, 268 + entry, npages, &num); 269 + if (unlikely(ret != HV_EOK)) { 270 + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", 271 + ret); 272 + } 273 + } 264 274 entry += num; 265 275 npages -= num; 266 276 } while (npages != 0); ··· 318 236 { 319 237 struct pci_pbm_info *pbm; 320 238 struct iommu *iommu; 239 + struct atu *atu; 240 + struct iommu_map_table *tbl; 321 241 unsigned long order, npages, entry; 242 + unsigned long iotsb_num; 322 243 u32 devhandle; 323 244 324 245 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 325 246 iommu = dev->archdata.iommu; 326 247 pbm = dev->archdata.host_controller; 248 + atu = iommu->atu; 327 249 devhandle = pbm->devhandle; 328 - entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 329 - dma_4v_iommu_demap(&devhandle, entry, npages); 330 - iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); 250 + 251 + if (dvma <= DMA_BIT_MASK(32)) { 252 + tbl = &iommu->tbl; 253 + iotsb_num = 0; /* we don't care for legacy iommu */ 254 + } else { 255 + tbl = &atu->tbl; 256 + iotsb_num = atu->iotsb->iotsb_num; 257 + } 258 + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); 259 + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); 260 + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); 331 261 order = get_order(size); 332 262 if (order < 10) 333 263 free_pages((unsigned long)cpu, order); ··· 351 257 unsigned long attrs) 352 258 { 353 259 struct iommu *iommu; 260 + struct atu *atu; 261 + struct iommu_map_table *tbl; 262 + u64 mask; 354 263 unsigned long flags, npages, oaddr; 355 264 unsigned long i, base_paddr; 356 - u32 bus_addr, ret; 357 265 unsigned long prot; 266 + dma_addr_t bus_addr, ret; 358 267 long entry; 359 268 360 269 iommu = dev->archdata.iommu; 270 + atu = iommu->atu; 361 271 362 272 if (unlikely(direction == DMA_NONE)) 363 273 goto bad; ··· 370 272 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 371 273 npages >>= IO_PAGE_SHIFT; 372 274 373 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 275 + mask = *dev->dma_mask; 276 + if (mask <= DMA_BIT_MASK(32)) 277 + tbl = &iommu->tbl; 278 + else 279 + tbl = &atu->tbl; 280 + 281 + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 374 282 (unsigned long)(-1), 0); 375 283 376 284 if (unlikely(entry == IOMMU_ERROR_CODE)) 377 285 goto bad; 378 286 379 - bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 287 + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 380 288 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 381 289 base_paddr = __pa(oaddr & IO_PAGE_MASK); 382 290 prot = HV_PCI_MAP_ATTR_READ; ··· 397 293 iommu_batch_start(dev, prot, entry); 398 294 399 295 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 400 - long err = iommu_batch_add(base_paddr); 296 + long err = iommu_batch_add(base_paddr, mask); 401 297 if (unlikely(err < 0L)) 402 298 goto iommu_map_fail; 403 299 } 404 - if (unlikely(iommu_batch_end() < 0L)) 300 + if (unlikely(iommu_batch_end(mask) < 0L)) 405 301 goto iommu_map_fail; 406 302 407 303 local_irq_restore(flags); ··· 414 310 return DMA_ERROR_CODE; 415 311 416 312 iommu_map_fail: 417 - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 313 + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 418 314 return DMA_ERROR_CODE; 419 315 } 420 316 ··· 424 320 { 425 321 struct pci_pbm_info *pbm; 426 322 struct iommu *iommu; 323 + struct atu *atu; 324 + struct iommu_map_table *tbl; 427 325 unsigned long npages; 326 + unsigned long iotsb_num; 428 327 long entry; 429 328 u32 devhandle; 430 329 ··· 439 332 440 333 iommu = dev->archdata.iommu; 441 334 pbm = dev->archdata.host_controller; 335 + atu = iommu->atu; 442 336 devhandle = pbm->devhandle; 443 337 444 338 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 445 339 npages >>= IO_PAGE_SHIFT; 446 340 bus_addr &= IO_PAGE_MASK; 447 - entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; 448 - dma_4v_iommu_demap(&devhandle, entry, npages); 449 - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 341 + 342 + if (bus_addr <= DMA_BIT_MASK(32)) { 343 + iotsb_num = 0; /* we don't care for legacy iommu */ 344 + tbl = &iommu->tbl; 345 + } else { 346 + iotsb_num = atu->iotsb->iotsb_num; 347 + tbl = &atu->tbl; 348 + } 349 + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; 350 + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); 351 + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 450 352 } 451 353 452 354 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ··· 469 353 unsigned long seg_boundary_size; 470 354 int outcount, incount, i; 471 355 struct iommu *iommu; 356 + struct atu *atu; 357 + struct iommu_map_table *tbl; 358 + u64 mask; 472 359 unsigned long base_shift; 473 360 long err; 474 361 475 362 BUG_ON(direction == DMA_NONE); 476 363 477 364 iommu = dev->archdata.iommu; 365 + atu = iommu->atu; 366 + 478 367 if (nelems == 0 || !iommu) 479 368 return 0; 480 369 ··· 505 384 max_seg_size = dma_get_max_seg_size(dev); 506 385 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 507 386 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 508 - base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; 387 + 388 + mask = *dev->dma_mask; 389 + if (mask <= DMA_BIT_MASK(32)) 390 + tbl = &iommu->tbl; 391 + else 392 + tbl = &atu->tbl; 393 + 394 + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; 395 + 509 396 for_each_sg(sglist, s, nelems, i) { 510 397 unsigned long paddr, npages, entry, out_entry = 0, slen; 511 398 ··· 526 397 /* Allocate iommu entries for that segment */ 527 398 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 528 399 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 529 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, 400 + entry = iommu_tbl_range_alloc(dev, tbl, npages, 530 401 &handle, (unsigned long)(-1), 0); 531 402 532 403 /* Handle failure */ 533 404 if (unlikely(entry == IOMMU_ERROR_CODE)) { 534 - if (printk_ratelimit()) 535 - printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 536 - " npages %lx\n", iommu, paddr, npages); 405 + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", 406 + tbl, paddr, npages); 537 407 goto iommu_map_failed; 538 408 } 539 409 540 - iommu_batch_new_entry(entry); 410 + iommu_batch_new_entry(entry, mask); 541 411 542 412 /* Convert entry to a dma_addr_t */ 543 - dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); 413 + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); 544 414 dma_addr |= (s->offset & ~IO_PAGE_MASK); 545 415 546 416 /* Insert into HW table */ 547 417 paddr &= IO_PAGE_MASK; 548 418 while (npages--) { 549 - err = iommu_batch_add(paddr); 419 + err = iommu_batch_add(paddr, mask); 550 420 if (unlikely(err < 0L)) 551 421 goto iommu_map_failed; 552 422 paddr += IO_PAGE_SIZE; ··· 580 452 dma_next = dma_addr + slen; 581 453 } 582 454 583 - err = iommu_batch_end(); 455 + err = iommu_batch_end(mask); 584 456 585 457 if (unlikely(err < 0L)) 586 458 goto iommu_map_failed; ··· 603 475 vaddr = s->dma_address & IO_PAGE_MASK; 604 476 npages = iommu_num_pages(s->dma_address, s->dma_length, 605 477 IO_PAGE_SIZE); 606 - iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 478 + iommu_tbl_range_free(tbl, vaddr, npages, 607 479 IOMMU_ERROR_CODE); 608 480 /* XXX demap? XXX */ 609 481 s->dma_address = DMA_ERROR_CODE; ··· 624 496 struct pci_pbm_info *pbm; 625 497 struct scatterlist *sg; 626 498 struct iommu *iommu; 499 + struct atu *atu; 627 500 unsigned long flags, entry; 501 + unsigned long iotsb_num; 628 502 u32 devhandle; 629 503 630 504 BUG_ON(direction == DMA_NONE); 631 505 632 506 iommu = dev->archdata.iommu; 633 507 pbm = dev->archdata.host_controller; 508 + atu = iommu->atu; 634 509 devhandle = pbm->devhandle; 635 510 636 511 local_irq_save(flags); ··· 643 512 dma_addr_t dma_handle = sg->dma_address; 644 513 unsigned int len = sg->dma_length; 645 514 unsigned long npages; 646 - struct iommu_map_table *tbl = &iommu->tbl; 515 + struct iommu_map_table *tbl; 647 516 unsigned long shift = IO_PAGE_SHIFT; 648 517 649 518 if (!len) 650 519 break; 651 520 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 521 + 522 + if (dma_handle <= DMA_BIT_MASK(32)) { 523 + iotsb_num = 0; /* we don't care for legacy iommu */ 524 + tbl = &iommu->tbl; 525 + } else { 526 + iotsb_num = atu->iotsb->iotsb_num; 527 + tbl = &atu->tbl; 528 + } 652 529 entry = ((dma_handle - tbl->table_map_base) >> shift); 653 - dma_4v_iommu_demap(&devhandle, entry, npages); 654 - iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 530 + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, 531 + entry, npages); 532 + iommu_tbl_range_free(tbl, dma_handle, npages, 655 533 IOMMU_ERROR_CODE); 656 534 sg = sg_next(sg); 657 535 } ··· 719 579 } 720 580 } 721 581 return cnt; 582 + } 583 + 584 + static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) 585 + { 586 + struct atu *atu = pbm->iommu->atu; 587 + struct atu_iotsb *iotsb; 588 + void *table; 589 + u64 table_size; 590 + u64 iotsb_num; 591 + unsigned long order; 592 + unsigned long err; 593 + 594 + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); 595 + if (!iotsb) { 596 + err = -ENOMEM; 597 + goto out_err; 598 + } 599 + atu->iotsb = iotsb; 600 + 601 + /* calculate size of IOTSB */ 602 + table_size = (atu->size / IO_PAGE_SIZE) * 8; 603 + order = get_order(table_size); 604 + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 605 + if (!table) { 606 + err = -ENOMEM; 607 + goto table_failed; 608 + } 609 + iotsb->table = table; 610 + iotsb->ra = __pa(table); 611 + iotsb->dvma_size = atu->size; 612 + iotsb->dvma_base = atu->base; 613 + iotsb->table_size = table_size; 614 + iotsb->page_size = IO_PAGE_SIZE; 615 + 616 + /* configure and register IOTSB with HV */ 617 + err = pci_sun4v_iotsb_conf(pbm->devhandle, 618 + iotsb->ra, 619 + iotsb->table_size, 620 + iotsb->page_size, 621 + iotsb->dvma_base, 622 + &iotsb_num); 623 + if (err) { 624 + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); 625 + goto iotsb_conf_failed; 626 + } 627 + iotsb->iotsb_num = iotsb_num; 628 + 629 + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); 630 + if (err) { 631 + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); 632 + goto iotsb_conf_failed; 633 + } 634 + 635 + return 0; 636 + 637 + iotsb_conf_failed: 638 + free_pages((unsigned long)table, order); 639 + table_failed: 640 + kfree(iotsb); 641 + out_err: 642 + return err; 643 + } 644 + 645 + static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) 646 + { 647 + struct atu *atu = pbm->iommu->atu; 648 + unsigned long err; 649 + const u64 *ranges; 650 + u64 map_size, num_iotte; 651 + u64 dma_mask; 652 + const u32 *page_size; 653 + int len; 654 + 655 + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", 656 + &len); 657 + if (!ranges) { 658 + pr_err(PFX "No iommu-address-ranges\n"); 659 + return -EINVAL; 660 + } 661 + 662 + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", 663 + NULL); 664 + if (!page_size) { 665 + pr_err(PFX "No iommu-pagesizes\n"); 666 + return -EINVAL; 667 + } 668 + 669 + /* There are 4 iommu-address-ranges supported. Each range is pair of 670 + * {base, size}. The ranges[0] and ranges[1] are 32bit address space 671 + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit 672 + * address ranges to support 64bit addressing. Because 'size' for 673 + * address ranges[2] and ranges[3] are same we can select either of 674 + * ranges[2] or ranges[3] for mapping. However due to 'size' is too 675 + * large for OS to allocate IOTSB we are using fix size 32G 676 + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices 677 + * to share. 678 + */ 679 + atu->ranges = (struct atu_ranges *)ranges; 680 + atu->base = atu->ranges[3].base; 681 + atu->size = ATU_64_SPACE_SIZE; 682 + 683 + /* Create IOTSB */ 684 + err = pci_sun4v_atu_alloc_iotsb(pbm); 685 + if (err) { 686 + pr_err(PFX "Error creating ATU IOTSB\n"); 687 + return err; 688 + } 689 + 690 + /* Create ATU iommu map. 691 + * One bit represents one iotte in IOTSB table. 692 + */ 693 + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); 694 + num_iotte = atu->size / IO_PAGE_SIZE; 695 + map_size = num_iotte / 8; 696 + atu->tbl.table_map_base = atu->base; 697 + atu->dma_addr_mask = dma_mask; 698 + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); 699 + if (!atu->tbl.map) 700 + return -ENOMEM; 701 + 702 + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, 703 + NULL, false /* no large_pool */, 704 + 0 /* default npools */, 705 + false /* want span boundary checking */); 706 + 707 + return 0; 722 708 } 723 709 724 710 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) ··· 1184 918 1185 919 pci_sun4v_scan_bus(pbm, &op->dev); 1186 920 921 + /* if atu_init fails its not complete failure. 922 + * we can still continue using legacy iommu. 923 + */ 924 + if (pbm->iommu->atu) { 925 + err = pci_sun4v_atu_init(pbm); 926 + if (err) { 927 + kfree(pbm->iommu->atu); 928 + pbm->iommu->atu = NULL; 929 + pr_err(PFX "ATU init failed, err=%d\n", err); 930 + } 931 + } 932 + 1187 933 pbm->next = pci_pbm_root; 1188 934 pci_pbm_root = pbm; 1189 935 ··· 1209 931 struct pci_pbm_info *pbm; 1210 932 struct device_node *dp; 1211 933 struct iommu *iommu; 934 + struct atu *atu; 1212 935 u32 devhandle; 1213 936 int i, err = -ENODEV; 937 + static bool hv_atu = true; 1214 938 1215 939 dp = op->dev.of_node; 1216 940 ··· 1233 953 } 1234 954 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", 1235 955 vpci_major, vpci_minor); 956 + 957 + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); 958 + if (err) { 959 + /* don't return an error if we fail to register the 960 + * ATU group, but ATU hcalls won't be available. 961 + */ 962 + hv_atu = false; 963 + pr_err(PFX "Could not register hvapi ATU err=%d\n", 964 + err); 965 + } else { 966 + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", 967 + vatu_major, vatu_minor); 968 + } 1236 969 1237 970 dma_ops = &sun4v_dma_ops; 1238 971 } ··· 1284 991 } 1285 992 1286 993 pbm->iommu = iommu; 994 + iommu->atu = NULL; 995 + if (hv_atu) { 996 + atu = kzalloc(sizeof(*atu), GFP_KERNEL); 997 + if (!atu) 998 + pr_err(PFX "Could not allocate atu\n"); 999 + else 1000 + iommu->atu = atu; 1001 + } 1287 1002 1288 1003 err = pci_sun4v_pbm_init(pbm, op, devhandle); 1289 1004 if (err) ··· 1302 1001 return 0; 1303 1002 1304 1003 out_free_iommu: 1004 + kfree(iommu->atu); 1305 1005 kfree(pbm->iommu); 1306 1006 1307 1007 out_free_controller:
+21
arch/sparc/kernel/pci_sun4v.h
··· 89 89 unsigned long msinum, 90 90 unsigned long valid); 91 91 92 + /* Sun4v HV IOMMU v2 APIs */ 93 + unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, 94 + unsigned long ra, 95 + unsigned long table_size, 96 + unsigned long page_size, 97 + unsigned long dvma_base, 98 + u64 *iotsb_num); 99 + unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, 100 + unsigned long iotsb_num, 101 + unsigned int pci_device); 102 + unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, 103 + unsigned long iotsb_num, 104 + unsigned long iotsb_index_iottes, 105 + unsigned long io_attributes, 106 + unsigned long io_page_list_pa, 107 + long *mapped); 108 + unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, 109 + unsigned long iotsb_num, 110 + unsigned long iotsb_index, 111 + unsigned long iottes, 112 + unsigned long *demapped); 92 113 #endif /* !(_PCI_SUN4V_H) */
+68
arch/sparc/kernel/pci_sun4v_asm.S
··· 360 360 mov %o0, %o0 361 361 ENDPROC(pci_sun4v_msg_setvalid) 362 362 363 + /* 364 + * %o0: devhandle 365 + * %o1: r_addr 366 + * %o2: size 367 + * %o3: pagesize 368 + * %o4: virt 369 + * %o5: &iotsb_num/&iotsb_handle 370 + * 371 + * returns %o0: status 372 + * %o1: iotsb_num/iotsb_handle 373 + */ 374 + ENTRY(pci_sun4v_iotsb_conf) 375 + mov %o5, %g1 376 + mov HV_FAST_PCI_IOTSB_CONF, %o5 377 + ta HV_FAST_TRAP 378 + retl 379 + stx %o1, [%g1] 380 + ENDPROC(pci_sun4v_iotsb_conf) 381 + 382 + /* 383 + * %o0: devhandle 384 + * %o1: iotsb_num/iotsb_handle 385 + * %o2: pci_device 386 + * 387 + * returns %o0: status 388 + */ 389 + ENTRY(pci_sun4v_iotsb_bind) 390 + mov HV_FAST_PCI_IOTSB_BIND, %o5 391 + ta HV_FAST_TRAP 392 + retl 393 + nop 394 + ENDPROC(pci_sun4v_iotsb_bind) 395 + 396 + /* 397 + * %o0: devhandle 398 + * %o1: iotsb_num/iotsb_handle 399 + * %o2: index_count 400 + * %o3: iotte_attributes 401 + * %o4: io_page_list_p 402 + * %o5: &mapped 403 + * 404 + * returns %o0: status 405 + * %o1: #mapped 406 + */ 407 + ENTRY(pci_sun4v_iotsb_map) 408 + mov %o5, %g1 409 + mov HV_FAST_PCI_IOTSB_MAP, %o5 410 + ta HV_FAST_TRAP 411 + retl 412 + stx %o1, [%g1] 413 + ENDPROC(pci_sun4v_iotsb_map) 414 + 415 + /* 416 + * %o0: devhandle 417 + * %o1: iotsb_num/iotsb_handle 418 + * %o2: iotsb_index 419 + * %o3: #iottes 420 + * %o4: &demapped 421 + * 422 + * returns %o0: status 423 + * %o1: #demapped 424 + */ 425 + ENTRY(pci_sun4v_iotsb_demap) 426 + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 427 + ta HV_FAST_TRAP 428 + retl 429 + stx %o1, [%o4] 430 + ENDPROC(pci_sun4v_iotsb_demap)
+2 -2
arch/sparc/kernel/signal_32.c
··· 89 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 90 90 91 91 /* 1. Make sure we are not getting garbage from the user */ 92 - if (!invalid_frame_pointer(sf, sizeof(*sf))) 92 + if (invalid_frame_pointer(sf, sizeof(*sf))) 93 93 goto segv_and_exit; 94 94 95 95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) ··· 150 150 151 151 synchronize_user_stack(); 152 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 153 - if (!invalid_frame_pointer(sf, sizeof(*sf))) 153 + if (invalid_frame_pointer(sf, sizeof(*sf))) 154 154 goto segv; 155 155 156 156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+64 -7
arch/sparc/mm/init_64.c
··· 802 802 }; 803 803 static struct mdesc_mblock *mblocks; 804 804 static int num_mblocks; 805 + static int find_numa_node_for_addr(unsigned long pa, 806 + struct node_mem_mask *pnode_mask); 805 807 806 - static unsigned long ra_to_pa(unsigned long addr) 808 + static unsigned long __init ra_to_pa(unsigned long addr) 807 809 { 808 810 int i; 809 811 ··· 821 819 return addr; 822 820 } 823 821 824 - static int find_node(unsigned long addr) 822 + static int __init find_node(unsigned long addr) 825 823 { 824 + static bool search_mdesc = true; 825 + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; 826 + static int last_index; 826 827 int i; 827 828 828 829 addr = ra_to_pa(addr); ··· 835 830 if ((addr & p->mask) == p->val) 836 831 return i; 837 832 } 838 - /* The following condition has been observed on LDOM guests.*/ 839 - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 840 - " rule. Some physical memory will be owned by node 0."); 841 - return 0; 833 + /* The following condition has been observed on LDOM guests because 834 + * node_masks only contains the best latency mask and value. 835 + * LDOM guest's mdesc can contain a single latency group to 836 + * cover multiple address range. Print warning message only if the 837 + * address cannot be found in node_masks nor mdesc. 838 + */ 839 + if ((search_mdesc) && 840 + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { 841 + /* find the available node in the mdesc */ 842 + last_index = find_numa_node_for_addr(addr, &last_mem_mask); 843 + numadbg("find_node: latency group for address 0x%lx is %d\n", 844 + addr, last_index); 845 + if ((last_index < 0) || (last_index >= num_node_masks)) { 846 + /* WARN_ONCE() and use default group 0 */ 847 + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); 848 + search_mdesc = false; 849 + last_index = 0; 850 + } 851 + } 852 + 853 + return last_index; 842 854 } 843 855 844 - static u64 memblock_nid_range(u64 start, u64 end, int *nid) 856 + static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) 845 857 { 846 858 *nid = find_node(start); 847 859 start += PAGE_SIZE; ··· 1180 1158 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; 1181 1159 } 1182 1160 return numa_latency[from][to]; 1161 + } 1162 + 1163 + static int find_numa_node_for_addr(unsigned long pa, 1164 + struct node_mem_mask *pnode_mask) 1165 + { 1166 + struct mdesc_handle *md = mdesc_grab(); 1167 + u64 node, arc; 1168 + int i = 0; 1169 + 1170 + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1171 + if (node == MDESC_NODE_NULL) 1172 + goto out; 1173 + 1174 + mdesc_for_each_node_by_name(md, node, "group") { 1175 + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { 1176 + u64 target = mdesc_arc_target(md, arc); 1177 + struct mdesc_mlgroup *m = find_mlgroup(target); 1178 + 1179 + if (!m) 1180 + continue; 1181 + if ((pa & m->mask) == m->match) { 1182 + if (pnode_mask) { 1183 + pnode_mask->mask = m->mask; 1184 + pnode_mask->val = m->match; 1185 + } 1186 + mdesc_release(md); 1187 + return i; 1188 + } 1189 + } 1190 + i++; 1191 + } 1192 + 1193 + out: 1194 + mdesc_release(md); 1195 + return -1; 1183 1196 } 1184 1197 1185 1198 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+3
arch/tile/include/asm/cache.h
··· 61 61 */ 62 62 #define __write_once __read_mostly 63 63 64 + /* __ro_after_init is the generic name for the tile arch __write_once. */ 65 + #define __ro_after_init __read_mostly 66 + 64 67 #endif /* _ASM_TILE_CACHE_H */
+2 -2
arch/x86/crypto/aesni-intel_glue.c
··· 888 888 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 889 889 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 890 890 struct scatter_walk src_sg_walk; 891 - struct scatter_walk dst_sg_walk; 891 + struct scatter_walk dst_sg_walk = {}; 892 892 unsigned int i; 893 893 894 894 /* Assuming we are supporting rfc4106 64-bit extended */ ··· 968 968 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 969 969 u8 authTag[16]; 970 970 struct scatter_walk src_sg_walk; 971 - struct scatter_walk dst_sg_walk; 971 + struct scatter_walk dst_sg_walk = {}; 972 972 unsigned int i; 973 973 974 974 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
+28 -4
arch/x86/events/intel/uncore_snb.c
··· 8 8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 9 9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 10 10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 11 - #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 12 - #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 11 + #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 12 + #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c 13 + #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 14 + #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 15 + #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 16 + #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 13 17 14 18 /* SNB event control */ 15 19 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff ··· 620 616 621 617 static const struct pci_device_id skl_uncore_pci_ids[] = { 622 618 { /* IMC */ 623 - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 619 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), 624 620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 625 621 }, 626 622 { /* IMC */ 627 623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 624 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 625 + }, 626 + { /* IMC */ 627 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), 628 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 629 + }, 630 + { /* IMC */ 631 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), 632 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 633 + }, 634 + { /* IMC */ 635 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), 636 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 637 + }, 638 + { /* IMC */ 639 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), 628 640 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 629 641 }, 630 642 ··· 686 666 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 687 667 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 688 668 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 689 - IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 669 + IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ 690 670 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 671 + IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ 672 + IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 673 + IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 674 + IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 691 675 { /* end marker */ } 692 676 }; 693 677
+1
arch/x86/include/asm/intel-mid.h
··· 17 17 18 18 extern int intel_mid_pci_init(void); 19 19 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); 20 + extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev); 20 21 21 22 extern void intel_mid_pwr_power_off(void); 22 23
+4 -1
arch/x86/kernel/apm_32.c
··· 1042 1042 1043 1043 if (apm_info.get_power_status_broken) 1044 1044 return APM_32_UNSUPPORTED; 1045 - if (apm_bios_call(&call)) 1045 + if (apm_bios_call(&call)) { 1046 + if (!call.err) 1047 + return APM_NO_ERROR; 1046 1048 return call.err; 1049 + } 1047 1050 *status = call.ebx; 1048 1051 *bat = call.ecx; 1049 1052 if (apm_info.get_power_status_swabinminutes) {
+1 -5
arch/x86/kernel/cpu/amd.c
··· 347 347 #ifdef CONFIG_SMP 348 348 unsigned bits; 349 349 int cpu = smp_processor_id(); 350 - unsigned int socket_id, core_complex_id; 351 350 352 351 bits = c->x86_coreid_bits; 353 352 /* Low order bits define the core id (index of core in socket) */ ··· 364 365 if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) 365 366 return; 366 367 367 - socket_id = (c->apicid >> bits) - 1; 368 - core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; 369 - 370 - per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; 368 + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; 371 369 #endif 372 370 } 373 371
+30 -2
arch/x86/kernel/cpu/common.c
··· 979 979 } 980 980 981 981 /* 982 + * The physical to logical package id mapping is initialized from the 983 + * acpi/mptables information. Make sure that CPUID actually agrees with 984 + * that. 985 + */ 986 + static void sanitize_package_id(struct cpuinfo_x86 *c) 987 + { 988 + #ifdef CONFIG_SMP 989 + unsigned int pkg, apicid, cpu = smp_processor_id(); 990 + 991 + apicid = apic->cpu_present_to_apicid(cpu); 992 + pkg = apicid >> boot_cpu_data.x86_coreid_bits; 993 + 994 + if (apicid != c->initial_apicid) { 995 + pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n", 996 + cpu, apicid, c->initial_apicid); 997 + c->initial_apicid = apicid; 998 + } 999 + if (pkg != c->phys_proc_id) { 1000 + pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n", 1001 + cpu, pkg, c->phys_proc_id); 1002 + c->phys_proc_id = pkg; 1003 + } 1004 + c->logical_proc_id = topology_phys_to_logical_pkg(pkg); 1005 + #else 1006 + c->logical_proc_id = 0; 1007 + #endif 1008 + } 1009 + 1010 + /* 982 1011 * This does the hard work of actually picking apart the CPU stuff... 983 1012 */ 984 1013 static void identify_cpu(struct cpuinfo_x86 *c) ··· 1132 1103 #ifdef CONFIG_NUMA 1133 1104 numa_add_cpu(smp_processor_id()); 1134 1105 #endif 1135 - /* The boot/hotplug time assigment got cleared, restore it */ 1136 - c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id); 1106 + sanitize_package_id(c); 1137 1107 } 1138 1108 1139 1109 /*
+27 -31
arch/x86/kvm/irq_comm.c
··· 156 156 } 157 157 158 158 159 + static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 160 + struct kvm *kvm, int irq_source_id, int level, 161 + bool line_status) 162 + { 163 + if (!level) 164 + return -1; 165 + 166 + return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 167 + } 168 + 159 169 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 160 170 struct kvm *kvm, int irq_source_id, int level, 161 171 bool line_status) ··· 173 163 struct kvm_lapic_irq irq; 174 164 int r; 175 165 176 - if (unlikely(e->type != KVM_IRQ_ROUTING_MSI)) 177 - return -EWOULDBLOCK; 166 + switch (e->type) { 167 + case KVM_IRQ_ROUTING_HV_SINT: 168 + return kvm_hv_set_sint(e, kvm, irq_source_id, level, 169 + line_status); 178 170 179 - if (kvm_msi_route_invalid(kvm, e)) 180 - return -EINVAL; 171 + case KVM_IRQ_ROUTING_MSI: 172 + if (kvm_msi_route_invalid(kvm, e)) 173 + return -EINVAL; 181 174 182 - kvm_set_msi_irq(kvm, e, &irq); 175 + kvm_set_msi_irq(kvm, e, &irq); 183 176 184 - if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 185 - return r; 186 - else 187 - return -EWOULDBLOCK; 177 + if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 178 + return r; 179 + break; 180 + 181 + default: 182 + break; 183 + } 184 + 185 + return -EWOULDBLOCK; 188 186 } 189 187 190 188 int kvm_request_irq_source_id(struct kvm *kvm) ··· 270 252 if (kimn->irq == gsi) 271 253 kimn->func(kimn, mask); 272 254 srcu_read_unlock(&kvm->irq_srcu, idx); 273 - } 274 - 275 - static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 276 - struct kvm *kvm, int irq_source_id, int level, 277 - bool line_status) 278 - { 279 - if (!level) 280 - return -1; 281 - 282 - return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 283 255 } 284 256 285 257 int kvm_set_routing_entry(struct kvm *kvm, ··· 429 421 } 430 422 } 431 423 srcu_read_unlock(&kvm->irq_srcu, idx); 432 - } 433 - 434 - int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, 435 - int irq_source_id, int level, bool line_status) 436 - { 437 - switch (irq->type) { 438 - case KVM_IRQ_ROUTING_HV_SINT: 439 - return kvm_hv_set_sint(irq, kvm, irq_source_id, level, 440 - line_status); 441 - default: 442 - return -EWOULDBLOCK; 443 - } 444 424 } 445 425 446 426 void kvm_arch_irq_routing_update(struct kvm *kvm)
+34 -13
arch/x86/kvm/x86.c
··· 210 210 struct kvm_shared_msrs *locals 211 211 = container_of(urn, struct kvm_shared_msrs, urn); 212 212 struct kvm_shared_msr_values *values; 213 + unsigned long flags; 213 214 215 + /* 216 + * Disabling irqs at this point since the following code could be 217 + * interrupted and executed through kvm_arch_hardware_disable() 218 + */ 219 + local_irq_save(flags); 220 + if (locals->registered) { 221 + locals->registered = false; 222 + user_return_notifier_unregister(urn); 223 + } 224 + local_irq_restore(flags); 214 225 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 215 226 values = &locals->values[slot]; 216 227 if (values->host != values->curr) { ··· 229 218 values->curr = values->host; 230 219 } 231 220 } 232 - locals->registered = false; 233 - user_return_notifier_unregister(urn); 234 221 } 235 222 236 223 static void shared_msr_update(unsigned slot, u32 msr) ··· 1733 1724 1734 1725 static u64 __get_kvmclock_ns(struct kvm *kvm) 1735 1726 { 1736 - struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0); 1737 1727 struct kvm_arch *ka = &kvm->arch; 1738 - s64 ns; 1728 + struct pvclock_vcpu_time_info hv_clock; 1739 1729 1740 - if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) { 1741 - u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1742 - ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc); 1743 - } else { 1744 - ns = ktime_get_boot_ns() + ka->kvmclock_offset; 1730 + spin_lock(&ka->pvclock_gtod_sync_lock); 1731 + if (!ka->use_master_clock) { 1732 + spin_unlock(&ka->pvclock_gtod_sync_lock); 1733 + return ktime_get_boot_ns() + ka->kvmclock_offset; 1745 1734 } 1746 1735 1747 - return ns; 1736 + hv_clock.tsc_timestamp = ka->master_cycle_now; 1737 + hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 1738 + spin_unlock(&ka->pvclock_gtod_sync_lock); 1739 + 1740 + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 1741 + &hv_clock.tsc_shift, 1742 + &hv_clock.tsc_to_system_mul); 1743 + return __pvclock_read_cycles(&hv_clock, rdtsc()); 1748 1744 } 1749 1745 1750 1746 u64 get_kvmclock_ns(struct kvm *kvm) ··· 2610 2596 case KVM_CAP_PIT_STATE2: 2611 2597 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2612 2598 case KVM_CAP_XEN_HVM: 2613 - case KVM_CAP_ADJUST_CLOCK: 2614 2599 case KVM_CAP_VCPU_EVENTS: 2615 2600 case KVM_CAP_HYPERV: 2616 2601 case KVM_CAP_HYPERV_VAPIC: ··· 2635 2622 case KVM_CAP_PCI_2_3: 2636 2623 #endif 2637 2624 r = 1; 2625 + break; 2626 + case KVM_CAP_ADJUST_CLOCK: 2627 + r = KVM_CLOCK_TSC_STABLE; 2638 2628 break; 2639 2629 case KVM_CAP_X86_SMM: 2640 2630 /* SMBASE is usually relocated above 1M on modern chipsets, ··· 3431 3415 }; 3432 3416 case KVM_SET_VAPIC_ADDR: { 3433 3417 struct kvm_vapic_addr va; 3418 + int idx; 3434 3419 3435 3420 r = -EINVAL; 3436 3421 if (!lapic_in_kernel(vcpu)) ··· 3439 3422 r = -EFAULT; 3440 3423 if (copy_from_user(&va, argp, sizeof va)) 3441 3424 goto out; 3425 + idx = srcu_read_lock(&vcpu->kvm->srcu); 3442 3426 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3427 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 3443 3428 break; 3444 3429 } 3445 3430 case KVM_X86_SETUP_MCE: { ··· 4122 4103 struct kvm_clock_data user_ns; 4123 4104 u64 now_ns; 4124 4105 4125 - now_ns = get_kvmclock_ns(kvm); 4106 + local_irq_disable(); 4107 + now_ns = __get_kvmclock_ns(kvm); 4126 4108 user_ns.clock = now_ns; 4127 - user_ns.flags = 0; 4109 + user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; 4110 + local_irq_enable(); 4128 4111 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 4129 4112 4130 4113 r = -EFAULT;
+1 -1
arch/x86/platform/efi/efi.c
··· 861 861 int count = 0, pg_shift = 0; 862 862 void *new_memmap = NULL; 863 863 efi_status_t status; 864 - phys_addr_t pa; 864 + unsigned long pa; 865 865 866 866 efi.systab = NULL; 867 867
+57 -23
arch/x86/platform/efi/efi_64.c
··· 31 31 #include <linux/io.h> 32 32 #include <linux/reboot.h> 33 33 #include <linux/slab.h> 34 + #include <linux/ucs2_string.h> 34 35 35 36 #include <asm/setup.h> 36 37 #include <asm/page.h> ··· 211 210 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); 212 211 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 213 212 } 213 + 214 + /* 215 + * Wrapper for slow_virt_to_phys() that handles NULL addresses. 216 + */ 217 + static inline phys_addr_t 218 + virt_to_phys_or_null_size(void *va, unsigned long size) 219 + { 220 + bool bad_size; 221 + 222 + if (!va) 223 + return 0; 224 + 225 + if (virt_addr_valid(va)) 226 + return virt_to_phys(va); 227 + 228 + /* 229 + * A fully aligned variable on the stack is guaranteed not to 230 + * cross a page bounary. Try to catch strings on the stack by 231 + * checking that 'size' is a power of two. 232 + */ 233 + bad_size = size > PAGE_SIZE || !is_power_of_2(size); 234 + 235 + WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); 236 + 237 + return slow_virt_to_phys(va); 238 + } 239 + 240 + #define virt_to_phys_or_null(addr) \ 241 + virt_to_phys_or_null_size((addr), sizeof(*(addr))) 214 242 215 243 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 216 244 { ··· 524 494 525 495 spin_lock(&rtc_lock); 526 496 527 - phys_tm = virt_to_phys(tm); 528 - phys_tc = virt_to_phys(tc); 497 + phys_tm = virt_to_phys_or_null(tm); 498 + phys_tc = virt_to_phys_or_null(tc); 529 499 530 500 status = efi_thunk(get_time, phys_tm, phys_tc); 531 501 ··· 541 511 542 512 spin_lock(&rtc_lock); 543 513 544 - phys_tm = virt_to_phys(tm); 514 + phys_tm = virt_to_phys_or_null(tm); 545 515 546 516 status = efi_thunk(set_time, phys_tm); 547 517 ··· 559 529 560 530 spin_lock(&rtc_lock); 561 531 562 - phys_enabled = virt_to_phys(enabled); 563 - phys_pending = virt_to_phys(pending); 564 - phys_tm = virt_to_phys(tm); 532 + phys_enabled = virt_to_phys_or_null(enabled); 533 + phys_pending = virt_to_phys_or_null(pending); 534 + phys_tm = virt_to_phys_or_null(tm); 565 535 566 536 status = efi_thunk(get_wakeup_time, phys_enabled, 567 537 phys_pending, phys_tm); ··· 579 549 580 550 spin_lock(&rtc_lock); 581 551 582 - phys_tm = virt_to_phys(tm); 552 + phys_tm = virt_to_phys_or_null(tm); 583 553 584 554 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 585 555 ··· 588 558 return status; 589 559 } 590 560 561 + static unsigned long efi_name_size(efi_char16_t *name) 562 + { 563 + return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1; 564 + } 591 565 592 566 static efi_status_t 593 567 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, ··· 601 567 u32 phys_name, phys_vendor, phys_attr; 602 568 u32 phys_data_size, phys_data; 603 569 604 - phys_data_size = virt_to_phys(data_size); 605 - phys_vendor = virt_to_phys(vendor); 606 - phys_name = virt_to_phys(name); 607 - phys_attr = virt_to_phys(attr); 608 - phys_data = virt_to_phys(data); 570 + phys_data_size = virt_to_phys_or_null(data_size); 571 + phys_vendor = virt_to_phys_or_null(vendor); 572 + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); 573 + phys_attr = virt_to_phys_or_null(attr); 574 + phys_data = virt_to_phys_or_null_size(data, *data_size); 609 575 610 576 status = efi_thunk(get_variable, phys_name, phys_vendor, 611 577 phys_attr, phys_data_size, phys_data); ··· 620 586 u32 phys_name, phys_vendor, phys_data; 621 587 efi_status_t status; 622 588 623 - phys_name = virt_to_phys(name); 624 - phys_vendor = virt_to_phys(vendor); 625 - phys_data = virt_to_phys(data); 589 + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); 590 + phys_vendor = virt_to_phys_or_null(vendor); 591 + phys_data = virt_to_phys_or_null_size(data, data_size); 626 592 627 593 /* If data_size is > sizeof(u32) we've got problems */ 628 594 status = efi_thunk(set_variable, phys_name, phys_vendor, ··· 639 605 efi_status_t status; 640 606 u32 phys_name_size, phys_name, phys_vendor; 641 607 642 - phys_name_size = virt_to_phys(name_size); 643 - phys_vendor = virt_to_phys(vendor); 644 - phys_name = virt_to_phys(name); 608 + phys_name_size = virt_to_phys_or_null(name_size); 609 + phys_vendor = virt_to_phys_or_null(vendor); 610 + phys_name = virt_to_phys_or_null_size(name, *name_size); 645 611 646 612 status = efi_thunk(get_next_variable, phys_name_size, 647 613 phys_name, phys_vendor); ··· 655 621 efi_status_t status; 656 622 u32 phys_count; 657 623 658 - phys_count = virt_to_phys(count); 624 + phys_count = virt_to_phys_or_null(count); 659 625 status = efi_thunk(get_next_high_mono_count, phys_count); 660 626 661 627 return status; ··· 667 633 { 668 634 u32 phys_data; 669 635 670 - phys_data = virt_to_phys(data); 636 + phys_data = virt_to_phys_or_null_size(data, data_size); 671 637 672 638 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 673 639 } ··· 695 661 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 696 662 return EFI_UNSUPPORTED; 697 663 698 - phys_storage = virt_to_phys(storage_space); 699 - phys_remaining = virt_to_phys(remaining_space); 700 - phys_max = virt_to_phys(max_variable_size); 664 + phys_storage = virt_to_phys_or_null(storage_space); 665 + phys_remaining = virt_to_phys_or_null(remaining_space); 666 + phys_max = virt_to_phys_or_null(max_variable_size); 701 667 702 668 status = efi_thunk(query_variable_info, attr, phys_storage, 703 669 phys_remaining, phys_max);
+19
arch/x86/platform/intel-mid/pwr.c
··· 272 272 } 273 273 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); 274 274 275 + pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev) 276 + { 277 + struct mid_pwr *pwr = midpwr; 278 + int id, reg, bit; 279 + u32 power; 280 + 281 + if (!pwr || !pwr->available) 282 + return PCI_UNKNOWN; 283 + 284 + id = intel_mid_pwr_get_lss_id(pdev); 285 + if (id < 0) 286 + return PCI_UNKNOWN; 287 + 288 + reg = (id * LSS_PWS_BITS) / 32; 289 + bit = (id * LSS_PWS_BITS) % 32; 290 + power = mid_pwr_get_state(pwr, reg); 291 + return (__force pci_power_t)((power >> bit) & 3); 292 + } 293 + 275 294 void intel_mid_pwr_power_off(void) 276 295 { 277 296 struct mid_pwr *pwr = midpwr;
+1
arch/x86/purgatory/Makefile
··· 16 16 17 17 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large 18 18 KBUILD_CFLAGS += -m$(BITS) 19 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 19 20 20 21 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 21 22 $(call if_changed,ld)
+8 -1
arch/xtensa/include/uapi/asm/unistd.h
··· 767 767 #define __NR_pwritev2 347 768 768 __SYSCALL(347, sys_pwritev2, 6) 769 769 770 - #define __NR_syscall_count 348 770 + #define __NR_pkey_mprotect 348 771 + __SYSCALL(348, sys_pkey_mprotect, 4) 772 + #define __NR_pkey_alloc 349 773 + __SYSCALL(349, sys_pkey_alloc, 2) 774 + #define __NR_pkey_free 350 775 + __SYSCALL(350, sys_pkey_free, 1) 776 + 777 + #define __NR_syscall_count 351 771 778 772 779 /* 773 780 * sysxtensa syscall handler
+7 -7
arch/xtensa/kernel/time.c
··· 172 172 { 173 173 of_clk_init(NULL); 174 174 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT 175 - printk("Calibrating CPU frequency "); 175 + pr_info("Calibrating CPU frequency "); 176 176 calibrate_ccount(); 177 - printk("%d.%02d MHz\n", (int)ccount_freq/1000000, 178 - (int)(ccount_freq/10000)%100); 177 + pr_cont("%d.%02d MHz\n", 178 + (int)ccount_freq / 1000000, 179 + (int)(ccount_freq / 10000) % 100); 179 180 #else 180 181 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; 181 182 #endif ··· 211 210 void calibrate_delay(void) 212 211 { 213 212 loops_per_jiffy = ccount_freq / HZ; 214 - printk("Calibrating delay loop (skipped)... " 215 - "%lu.%02lu BogoMIPS preset\n", 216 - loops_per_jiffy/(1000000/HZ), 217 - (loops_per_jiffy/(10000/HZ)) % 100); 213 + pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", 214 + loops_per_jiffy / (1000000 / HZ), 215 + (loops_per_jiffy / (10000 / HZ)) % 100); 218 216 } 219 217 #endif
+22 -52
arch/xtensa/kernel/traps.c
··· 465 465 466 466 for (i = 0; i < 16; i++) { 467 467 if ((i % 8) == 0) 468 - printk(KERN_INFO "a%02d:", i); 469 - printk(KERN_CONT " %08lx", regs->areg[i]); 468 + pr_info("a%02d:", i); 469 + pr_cont(" %08lx", regs->areg[i]); 470 470 } 471 - printk(KERN_CONT "\n"); 472 - 473 - printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 474 - regs->pc, regs->ps, regs->depc, regs->excvaddr); 475 - printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 476 - regs->lbeg, regs->lend, regs->lcount, regs->sar); 471 + pr_cont("\n"); 472 + pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 473 + regs->pc, regs->ps, regs->depc, regs->excvaddr); 474 + pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 475 + regs->lbeg, regs->lend, regs->lcount, regs->sar); 477 476 if (user_mode(regs)) 478 - printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 479 - regs->windowbase, regs->windowstart, regs->wmask, 480 - regs->syscall); 477 + pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 478 + regs->windowbase, regs->windowstart, regs->wmask, 479 + regs->syscall); 481 480 } 482 481 483 482 static int show_trace_cb(struct stackframe *frame, void *data) 484 483 { 485 484 if (kernel_text_address(frame->pc)) { 486 - printk(" [<%08lx>] ", frame->pc); 487 - print_symbol("%s\n", frame->pc); 485 + pr_cont(" [<%08lx>]", frame->pc); 486 + print_symbol(" %s\n", frame->pc); 488 487 } 489 488 return 0; 490 489 } ··· 493 494 if (!sp) 494 495 sp = stack_pointer(task); 495 496 496 - printk("Call Trace:"); 497 - #ifdef CONFIG_KALLSYMS 498 - printk("\n"); 499 - #endif 497 + pr_info("Call Trace:\n"); 500 498 walk_stackframe(sp, show_trace_cb, NULL); 501 - printk("\n"); 499 + #ifndef CONFIG_KALLSYMS 500 + pr_cont("\n"); 501 + #endif 502 502 } 503 - 504 - /* 505 - * This routine abuses get_user()/put_user() to reference pointers 506 - * with at least a bit of error checking ... 507 - */ 508 503 509 504 static int kstack_depth_to_print = 24; 510 505 ··· 511 518 sp = stack_pointer(task); 512 519 stack = sp; 513 520 514 - printk("\nStack: "); 521 + pr_info("Stack:\n"); 515 522 516 523 for (i = 0; i < kstack_depth_to_print; i++) { 517 524 if (kstack_end(sp)) 518 525 break; 519 - if (i && ((i % 8) == 0)) 520 - printk("\n "); 521 - printk("%08lx ", *sp++); 526 + pr_cont(" %08lx", *sp++); 527 + if (i % 8 == 7) 528 + pr_cont("\n"); 522 529 } 523 - printk("\n"); 524 530 show_trace(task, stack); 525 - } 526 - 527 - void show_code(unsigned int *pc) 528 - { 529 - long i; 530 - 531 - printk("\nCode:"); 532 - 533 - for(i = -3 ; i < 6 ; i++) { 534 - unsigned long insn; 535 - if (__get_user(insn, pc + i)) { 536 - printk(" (Bad address in pc)\n"); 537 - break; 538 - } 539 - printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); 540 - } 541 531 } 542 532 543 533 DEFINE_SPINLOCK(die_lock); ··· 528 552 void die(const char * str, struct pt_regs * regs, long err) 529 553 { 530 554 static int die_counter; 531 - int nl = 0; 532 555 533 556 console_verbose(); 534 557 spin_lock_irq(&die_lock); 535 558 536 - printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); 537 - #ifdef CONFIG_PREEMPT 538 - printk("PREEMPT "); 539 - nl = 1; 540 - #endif 541 - if (nl) 542 - printk("\n"); 559 + pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, 560 + IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); 543 561 show_regs(regs); 544 562 if (!user_mode(regs)) 545 563 show_stack(NULL, (unsigned long*)regs->areg[1]);
+10 -7
crypto/algif_hash.c
··· 214 214 215 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 216 216 217 - if (ctx->more) { 217 + if (!result) { 218 + err = af_alg_wait_for_completion( 219 + crypto_ahash_init(&ctx->req), 220 + &ctx->completion); 221 + if (err) 222 + goto unlock; 223 + } 224 + 225 + if (!result || ctx->more) { 218 226 ctx->more = 0; 219 227 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), 220 228 &ctx->completion); 221 229 if (err) 222 230 goto unlock; 223 - } else if (!result) { 224 - err = af_alg_wait_for_completion( 225 - crypto_ahash_digest(&ctx->req), 226 - &ctx->completion); 227 231 } 228 232 229 233 err = memcpy_to_msg(msg, ctx->result, len); 230 234 231 - hash_free_result(sk, ctx); 232 - 233 235 unlock: 236 + hash_free_result(sk, ctx); 234 237 release_sock(sk); 235 238 236 239 return err ?: len;
+2 -8
drivers/acpi/acpi_apd.c
··· 122 122 int ret; 123 123 124 124 if (!dev_desc) { 125 - pdev = acpi_create_platform_device(adev); 125 + pdev = acpi_create_platform_device(adev, NULL); 126 126 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 127 127 } 128 128 ··· 139 139 goto err_out; 140 140 } 141 141 142 - if (dev_desc->properties) { 143 - ret = device_add_properties(&adev->dev, dev_desc->properties); 144 - if (ret) 145 - goto err_out; 146 - } 147 - 148 142 adev->driver_data = pdata; 149 - pdev = acpi_create_platform_device(adev); 143 + pdev = acpi_create_platform_device(adev, dev_desc->properties); 150 144 if (!IS_ERR_OR_NULL(pdev)) 151 145 return 1; 152 146
+2 -8
drivers/acpi/acpi_lpss.c
··· 395 395 396 396 dev_desc = (const struct lpss_device_desc *)id->driver_data; 397 397 if (!dev_desc) { 398 - pdev = acpi_create_platform_device(adev); 398 + pdev = acpi_create_platform_device(adev, NULL); 399 399 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 400 400 } 401 401 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); ··· 451 451 goto err_out; 452 452 } 453 453 454 - if (dev_desc->properties) { 455 - ret = device_add_properties(&adev->dev, dev_desc->properties); 456 - if (ret) 457 - goto err_out; 458 - } 459 - 460 454 adev->driver_data = pdata; 461 - pdev = acpi_create_platform_device(adev); 455 + pdev = acpi_create_platform_device(adev, dev_desc->properties); 462 456 if (!IS_ERR_OR_NULL(pdev)) { 463 457 return 1; 464 458 }
+4 -1
drivers/acpi/acpi_platform.c
··· 50 50 /** 51 51 * acpi_create_platform_device - Create platform device for ACPI device node 52 52 * @adev: ACPI device node to create a platform device for. 53 + * @properties: Optional collection of build-in properties. 53 54 * 54 55 * Check if the given @adev can be represented as a platform device and, if 55 56 * that's the case, create and register a platform device, populate its common ··· 58 57 * 59 58 * Name of the platform device will be the same as @adev's. 60 59 */ 61 - struct platform_device *acpi_create_platform_device(struct acpi_device *adev) 60 + struct platform_device *acpi_create_platform_device(struct acpi_device *adev, 61 + struct property_entry *properties) 62 62 { 63 63 struct platform_device *pdev = NULL; 64 64 struct platform_device_info pdevinfo; ··· 108 106 pdevinfo.res = resources; 109 107 pdevinfo.num_res = count; 110 108 pdevinfo.fwnode = acpi_fwnode_handle(adev); 109 + pdevinfo.properties = properties; 111 110 112 111 if (acpi_dma_supported(adev)) 113 112 pdevinfo.dma_mask = DMA_BIT_MASK(32);
+4 -6
drivers/acpi/acpica/tbfadt.c
··· 480 480 u32 i; 481 481 482 482 /* 483 - * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which 483 + * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which 484 484 * should be zero are indeed zero. This will workaround BIOSs that 485 485 * inadvertently place values in these fields. 486 486 * 487 487 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located 488 488 * at offset 45, 55, 95, and the word located at offset 109, 110. 489 489 * 490 - * Note: The FADT revision value is unreliable because of BIOS errors. 491 - * The table length is instead used as the final word on the version. 492 - * 493 - * Note: FADT revision 3 is the ACPI 2.0 version of the FADT. 490 + * Note: The FADT revision value is unreliable. Only the length can be 491 + * trusted. 494 492 */ 495 - if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { 493 + if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) { 496 494 acpi_gbl_FADT.preferred_profile = 0; 497 495 acpi_gbl_FADT.pstate_control = 0; 498 496 acpi_gbl_FADT.cst_control = 0;
+2 -2
drivers/acpi/dptf/int340x_thermal.c
··· 34 34 const struct acpi_device_id *id) 35 35 { 36 36 if (IS_ENABLED(CONFIG_INT340X_THERMAL)) 37 - acpi_create_platform_device(adev); 37 + acpi_create_platform_device(adev, NULL); 38 38 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ 39 39 else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && 40 40 id->driver_data == INT3401_DEVICE) 41 - acpi_create_platform_device(adev); 41 + acpi_create_platform_device(adev, NULL); 42 42 return 1; 43 43 } 44 44
+1 -1
drivers/acpi/scan.c
··· 1734 1734 &is_spi_i2c_slave); 1735 1735 acpi_dev_free_resource_list(&resource_list); 1736 1736 if (!is_spi_i2c_slave) { 1737 - acpi_create_platform_device(device); 1737 + acpi_create_platform_device(device, NULL); 1738 1738 acpi_device_set_enumerated(device); 1739 1739 } else { 1740 1740 blocking_notifier_call_chain(&acpi_reconfig_chain,
+3 -2
drivers/base/dd.c
··· 324 324 { 325 325 int ret = -EPROBE_DEFER; 326 326 int local_trigger_count = atomic_read(&deferred_trigger_count); 327 - bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE); 327 + bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) && 328 + !drv->suppress_bind_attrs; 328 329 329 330 if (defer_all_probes) { 330 331 /* ··· 384 383 if (test_remove) { 385 384 test_remove = false; 386 385 387 - if (dev->bus && dev->bus->remove) 386 + if (dev->bus->remove) 388 387 dev->bus->remove(dev); 389 388 else if (drv->remove) 390 389 drv->remove(dev);
+4 -4
drivers/base/power/main.c
··· 1027 1027 TRACE_DEVICE(dev); 1028 1028 TRACE_SUSPEND(0); 1029 1029 1030 + dpm_wait_for_children(dev, async); 1031 + 1030 1032 if (async_error) 1031 1033 goto Complete; 1032 1034 ··· 1039 1037 1040 1038 if (dev->power.syscore || dev->power.direct_complete) 1041 1039 goto Complete; 1042 - 1043 - dpm_wait_for_children(dev, async); 1044 1040 1045 1041 if (dev->pm_domain) { 1046 1042 info = "noirq power domain "; ··· 1174 1174 1175 1175 __pm_runtime_disable(dev, false); 1176 1176 1177 + dpm_wait_for_children(dev, async); 1178 + 1177 1179 if (async_error) 1178 1180 goto Complete; 1179 1181 ··· 1186 1184 1187 1185 if (dev->power.syscore || dev->power.direct_complete) 1188 1186 goto Complete; 1189 - 1190 - dpm_wait_for_children(dev, async); 1191 1187 1192 1188 if (dev->pm_domain) { 1193 1189 info = "late power domain ";
-41
drivers/block/aoe/aoecmd.c
··· 853 853 return n; 854 854 } 855 855 856 - /* This can be removed if we are certain that no users of the block 857 - * layer will ever use zero-count pages in bios. Otherwise we have to 858 - * protect against the put_page sometimes done by the network layer. 859 - * 860 - * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for 861 - * discussion. 862 - * 863 - * We cannot use get_page in the workaround, because it insists on a 864 - * positive page count as a precondition. So we use _refcount directly. 865 - */ 866 - static void 867 - bio_pageinc(struct bio *bio) 868 - { 869 - struct bio_vec bv; 870 - struct page *page; 871 - struct bvec_iter iter; 872 - 873 - bio_for_each_segment(bv, bio, iter) { 874 - /* Non-zero page count for non-head members of 875 - * compound pages is no longer allowed by the kernel. 876 - */ 877 - page = compound_head(bv.bv_page); 878 - page_ref_inc(page); 879 - } 880 - } 881 - 882 - static void 883 - bio_pagedec(struct bio *bio) 884 - { 885 - struct page *page; 886 - struct bio_vec bv; 887 - struct bvec_iter iter; 888 - 889 - bio_for_each_segment(bv, bio, iter) { 890 - page = compound_head(bv.bv_page); 891 - page_ref_dec(page); 892 - } 893 - } 894 - 895 856 static void 896 857 bufinit(struct buf *buf, struct request *rq, struct bio *bio) 897 858 { ··· 860 899 buf->rq = rq; 861 900 buf->bio = bio; 862 901 buf->iter = bio->bi_iter; 863 - bio_pageinc(bio); 864 902 } 865 903 866 904 static struct buf * ··· 1087 1127 if (buf == d->ip.buf) 1088 1128 d->ip.buf = NULL; 1089 1129 rq = buf->rq; 1090 - bio_pagedec(buf->bio); 1091 1130 mempool_free(buf, d->bufpool); 1092 1131 n = (unsigned long) rq->special; 1093 1132 rq->special = (void *) --n;
+1 -1
drivers/block/drbd/drbd_main.c
··· 1871 1871 drbd_update_congested(connection); 1872 1872 } 1873 1873 do { 1874 - rv = kernel_sendmsg(sock, &msg, &iov, 1, size); 1874 + rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 1875 1875 if (rv == -EAGAIN) { 1876 1876 if (we_should_drop_the_connection(connection, sock)) 1877 1877 break;
+1 -1
drivers/block/nbd.c
··· 599 599 return -EINVAL; 600 600 601 601 sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); 602 - if (!sreq) 602 + if (IS_ERR(sreq)) 603 603 return -ENOMEM; 604 604 605 605 mutex_unlock(&nbd->tx_lock);
+2 -2
drivers/char/ipmi/bt-bmc.c
··· 484 484 } 485 485 486 486 static const struct of_device_id bt_bmc_match[] = { 487 - { .compatible = "aspeed,ast2400-bt-bmc" }, 487 + { .compatible = "aspeed,ast2400-ibt-bmc" }, 488 488 { }, 489 489 }; 490 490 ··· 502 502 MODULE_DEVICE_TABLE(of, bt_bmc_match); 503 503 MODULE_LICENSE("GPL"); 504 504 MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>"); 505 - MODULE_DESCRIPTION("Linux device interface to the BT interface"); 505 + MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
-3
drivers/char/ppdev.c
··· 748 748 } 749 749 750 750 if (pp->pdev) { 751 - const char *name = pp->pdev->name; 752 - 753 751 parport_unregister_device(pp->pdev); 754 - kfree(name); 755 752 pp->pdev = NULL; 756 753 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 757 754 }
+8 -5
drivers/clk/clk-qoriq.c
··· 700 700 struct mux_hwclock *hwc, 701 701 const struct clk_ops *ops, 702 702 unsigned long min_rate, 703 + unsigned long max_rate, 703 704 unsigned long pct80_rate, 704 705 const char *fmt, int idx) 705 706 { ··· 728 727 rate > pct80_rate) 729 728 continue; 730 729 if (rate < min_rate) 730 + continue; 731 + if (rate > max_rate) 731 732 continue; 732 733 733 734 parent_names[j] = div->name; ··· 762 759 struct mux_hwclock *hwc; 763 760 const struct clockgen_pll_div *div; 764 761 unsigned long plat_rate, min_rate; 765 - u64 pct80_rate; 762 + u64 max_rate, pct80_rate; 766 763 u32 clksel; 767 764 768 765 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); ··· 790 787 return NULL; 791 788 } 792 789 793 - pct80_rate = clk_get_rate(div->clk); 794 - pct80_rate *= 8; 790 + max_rate = clk_get_rate(div->clk); 791 + pct80_rate = max_rate * 8; 795 792 do_div(pct80_rate, 10); 796 793 797 794 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); ··· 801 798 else 802 799 min_rate = plat_rate / 2; 803 800 804 - return create_mux_common(cg, hwc, &cmux_ops, min_rate, 801 + return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, 805 802 pct80_rate, "cg-cmux%d", idx); 806 803 } 807 804 ··· 816 813 hwc->reg = cg->regs + 0x20 * idx + 0x10; 817 814 hwc->info = cg->info.hwaccel[idx]; 818 815 819 - return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, 816 + return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, 820 817 "cg-hwaccel%d", idx); 821 818 } 822 819
+4 -6
drivers/clk/clk-xgene.c
··· 463 463 struct xgene_clk *pclk = to_xgene_clk(hw); 464 464 unsigned long flags = 0; 465 465 u32 data; 466 - phys_addr_t reg; 467 466 468 467 if (pclk->lock) 469 468 spin_lock_irqsave(pclk->lock, flags); 470 469 471 470 if (pclk->param.csr_reg != NULL) { 472 471 pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); 473 - reg = __pa(pclk->param.csr_reg); 474 472 /* First enable the clock */ 475 473 data = xgene_clk_read(pclk->param.csr_reg + 476 474 pclk->param.reg_clk_offset); 477 475 data |= pclk->param.reg_clk_mask; 478 476 xgene_clk_write(data, pclk->param.csr_reg + 479 477 pclk->param.reg_clk_offset); 480 - pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", 481 - clk_hw_get_name(hw), &reg, 478 + pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n", 479 + clk_hw_get_name(hw), 482 480 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, 483 481 data); 484 482 ··· 486 488 data &= ~pclk->param.reg_csr_mask; 487 489 xgene_clk_write(data, pclk->param.csr_reg + 488 490 pclk->param.reg_csr_offset); 489 - pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", 490 - clk_hw_get_name(hw), &reg, 491 + pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n", 492 + clk_hw_get_name(hw), 491 493 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, 492 494 data); 493 495 }
+6 -2
drivers/clk/imx/clk-pllv3.c
··· 223 223 temp64 *= mfn; 224 224 do_div(temp64, mfd); 225 225 226 - return (parent_rate * div) + (u32)temp64; 226 + return parent_rate * div + (unsigned long)temp64; 227 227 } 228 228 229 229 static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, ··· 247 247 do_div(temp64, parent_rate); 248 248 mfn = temp64; 249 249 250 - return parent_rate * div + parent_rate * mfn / mfd; 250 + temp64 = (u64)parent_rate; 251 + temp64 *= mfn; 252 + do_div(temp64, mfd); 253 + 254 + return parent_rate * div + (unsigned long)temp64; 251 255 } 252 256 253 257 static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+1 -1
drivers/clk/mmp/clk-of-mmp2.c
··· 313 313 } 314 314 315 315 pxa_unit->apmu_base = of_iomap(np, 1); 316 - if (!pxa_unit->mpmu_base) { 316 + if (!pxa_unit->apmu_base) { 317 317 pr_err("failed to map apmu registers\n"); 318 318 return; 319 319 }
+1 -1
drivers/clk/mmp/clk-of-pxa168.c
··· 262 262 } 263 263 264 264 pxa_unit->apmu_base = of_iomap(np, 1); 265 - if (!pxa_unit->mpmu_base) { 265 + if (!pxa_unit->apmu_base) { 266 266 pr_err("failed to map apmu registers\n"); 267 267 return; 268 268 }
+2 -2
drivers/clk/mmp/clk-of-pxa910.c
··· 282 282 } 283 283 284 284 pxa_unit->apmu_base = of_iomap(np, 1); 285 - if (!pxa_unit->mpmu_base) { 285 + if (!pxa_unit->apmu_base) { 286 286 pr_err("failed to map apmu registers\n"); 287 287 return; 288 288 } ··· 294 294 } 295 295 296 296 pxa_unit->apbcp_base = of_iomap(np, 3); 297 - if (!pxa_unit->mpmu_base) { 297 + if (!pxa_unit->apbcp_base) { 298 298 pr_err("failed to map apbcp registers\n"); 299 299 return; 300 300 }
+1 -4
drivers/clk/rockchip/clk-ddr.c
··· 144 144 ddrclk->ddr_flag = ddr_flag; 145 145 146 146 clk = clk_register(NULL, &ddrclk->hw); 147 - if (IS_ERR(clk)) { 148 - pr_err("%s: could not register ddrclk %s\n", __func__, name); 147 + if (IS_ERR(clk)) 149 148 kfree(ddrclk); 150 - return NULL; 151 - } 152 149 153 150 return clk; 154 151 }
+14 -8
drivers/clk/samsung/clk-exynos-clkout.c
··· 132 132 pr_err("%s: failed to register clkout clock\n", __func__); 133 133 } 134 134 135 + /* 136 + * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting 137 + * the OF_POPULATED flag on the pmu device tree node, so later the 138 + * Exynos PMU platform device can be properly probed with PMU driver. 139 + */ 140 + 135 141 static void __init exynos4_clkout_init(struct device_node *node) 136 142 { 137 143 exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); 138 144 } 139 - CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu", 145 + CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu", 140 146 exynos4_clkout_init); 141 - CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu", 147 + CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu", 142 148 exynos4_clkout_init); 143 - CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu", 149 + CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu", 144 150 exynos4_clkout_init); 145 - CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu", 151 + CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu", 146 152 exynos4_clkout_init); 147 153 148 154 static void __init exynos5_clkout_init(struct device_node *node) 149 155 { 150 156 exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); 151 157 } 152 - CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", 158 + CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu", 153 159 exynos5_clkout_init); 154 - CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu", 160 + CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu", 155 161 exynos5_clkout_init); 156 - CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", 162 + CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu", 157 163 exynos5_clkout_init); 158 - CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", 164 + CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu", 159 165 exynos5_clkout_init);
+10 -1
drivers/crypto/caam/caamalg.c
··· 137 137 } 138 138 139 139 buf = it_page + it->offset; 140 - len = min(tlen, it->length); 140 + len = min_t(size_t, tlen, it->length); 141 141 print_hex_dump(level, prefix_str, prefix_type, rowsize, 142 142 groupsize, buf, len, ascii); 143 143 tlen -= len; ··· 4581 4581 4582 4582 /* Skip AES algorithms if not supported by device */ 4583 4583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 4584 + continue; 4585 + 4586 + /* 4587 + * Check support for AES modes not available 4588 + * on LP devices. 4589 + */ 4590 + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 4591 + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == 4592 + OP_ALG_AAI_XTS) 4584 4593 continue; 4585 4594 4586 4595 t_alg = caam_alg_alloc(alg);
+1
drivers/dma/Kconfig
··· 306 306 depends on ARCH_MMP || COMPILE_TEST 307 307 select DMA_ENGINE 308 308 select MMP_SRAM if ARCH_MMP 309 + select GENERIC_ALLOCATOR 309 310 help 310 311 Support the MMP Two-Channel DMA engine. 311 312 This engine used for MMP Audio DMA and pxa910 SQU.
+26 -5
drivers/dma/cppi41.c
··· 317 317 318 318 while (val) { 319 319 u32 desc, len; 320 + int error; 321 + 322 + error = pm_runtime_get(cdd->ddev.dev); 323 + if (error < 0) 324 + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 325 + __func__, error); 320 326 321 327 q_num = __fls(val); 322 328 val &= ~(1 << q_num); ··· 344 338 dma_cookie_complete(&c->txd); 345 339 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 346 340 347 - /* Paired with cppi41_dma_issue_pending */ 348 341 pm_runtime_mark_last_busy(cdd->ddev.dev); 349 342 pm_runtime_put_autosuspend(cdd->ddev.dev); 350 343 } ··· 367 362 int error; 368 363 369 364 error = pm_runtime_get_sync(cdd->ddev.dev); 370 - if (error < 0) 365 + if (error < 0) { 366 + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 367 + __func__, error); 368 + pm_runtime_put_noidle(cdd->ddev.dev); 369 + 371 370 return error; 371 + } 372 372 373 373 dma_cookie_init(chan); 374 374 dma_async_tx_descriptor_init(&c->txd, chan); ··· 395 385 int error; 396 386 397 387 error = pm_runtime_get_sync(cdd->ddev.dev); 398 - if (error < 0) 388 + if (error < 0) { 389 + pm_runtime_put_noidle(cdd->ddev.dev); 390 + 399 391 return; 392 + } 400 393 401 394 WARN_ON(!list_empty(&cdd->pending)); 402 395 ··· 473 460 struct cppi41_dd *cdd = c->cdd; 474 461 int error; 475 462 476 - /* PM runtime paired with dmaengine_desc_get_callback_invoke */ 477 463 error = pm_runtime_get(cdd->ddev.dev); 478 464 if ((error != -EINPROGRESS) && error < 0) { 465 + pm_runtime_put_noidle(cdd->ddev.dev); 479 466 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", 480 467 error); 481 468 ··· 486 473 push_desc_queue(c); 487 474 else 488 475 pending_desc(c); 476 + 477 + pm_runtime_mark_last_busy(cdd->ddev.dev); 478 + pm_runtime_put_autosuspend(cdd->ddev.dev); 489 479 } 490 480 491 481 static u32 get_host_pd0(u32 length) ··· 1075 1059 deinit_cppi41(dev, cdd); 1076 1060 err_init_cppi: 1077 1061 pm_runtime_dont_use_autosuspend(dev); 1078 - pm_runtime_put_sync(dev); 1079 1062 err_get_sync: 1063 + pm_runtime_put_sync(dev); 1080 1064 pm_runtime_disable(dev); 1081 1065 iounmap(cdd->usbss_mem); 1082 1066 iounmap(cdd->ctrl_mem); ··· 1088 1072 static int cppi41_dma_remove(struct platform_device *pdev) 1089 1073 { 1090 1074 struct cppi41_dd *cdd = platform_get_drvdata(pdev); 1075 + int error; 1091 1076 1077 + error = pm_runtime_get_sync(&pdev->dev); 1078 + if (error < 0) 1079 + dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", 1080 + __func__, error); 1092 1081 of_dma_controller_free(pdev->dev.of_node); 1093 1082 dma_async_device_unregister(&cdd->ddev); 1094 1083
+1
drivers/dma/edma.c
··· 1628 1628 if (echan->slot[0] < 0) { 1629 1629 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1630 1630 EDMA_CHAN_SLOT(echan->ch_num)); 1631 + ret = echan->slot[0]; 1631 1632 goto err_slot; 1632 1633 } 1633 1634
+1 -1
drivers/dma/sun6i-dma.c
··· 578 578 579 579 burst = convert_burst(8); 580 580 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); 581 - v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 581 + v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 582 582 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 583 583 DMA_CHAN_CFG_DST_LINEAR_MODE | 584 584 DMA_CHAN_CFG_SRC_LINEAR_MODE |
-4
drivers/gpio/Kconfig
··· 22 22 23 23 if GPIOLIB 24 24 25 - config GPIO_DEVRES 26 - def_bool y 27 - depends on HAS_IOMEM 28 - 29 25 config OF_GPIO 30 26 def_bool y 31 27 depends on OF
+1 -1
drivers/gpio/Makefile
··· 2 2 3 3 ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 4 4 5 - obj-$(CONFIG_GPIO_DEVRES) += devres.o 5 + obj-$(CONFIG_GPIOLIB) += devres.o 6 6 obj-$(CONFIG_GPIOLIB) += gpiolib.o 7 7 obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o 8 8 obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+2 -2
drivers/gpio/gpio-pca953x.c
··· 372 372 373 373 bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); 374 374 375 - memcpy(reg_val, chip->reg_output, NBANK(chip)); 376 375 mutex_lock(&chip->i2c_lock); 376 + memcpy(reg_val, chip->reg_output, NBANK(chip)); 377 377 for (bank = 0; bank < NBANK(chip); bank++) { 378 378 bank_mask = mask[bank / sizeof(*mask)] >> 379 379 ((bank % sizeof(*mask)) * 8); 380 380 if (bank_mask) { 381 381 bank_val = bits[bank / sizeof(*bits)] >> 382 382 ((bank % sizeof(*bits)) * 8); 383 + bank_val &= bank_mask; 383 384 reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; 384 385 } 385 386 } ··· 608 607 609 608 if (client->irq && irq_base != -1 610 609 && (chip->driver_data & PCA_INT)) { 611 - 612 610 ret = pca953x_read_regs(chip, 613 611 chip->regs->input, chip->irq_stat); 614 612 if (ret)
+1 -1
drivers/gpio/gpio-tc3589x.c
··· 97 97 if (ret < 0) 98 98 return ret; 99 99 100 - return !!(ret & BIT(pos)); 100 + return !(ret & BIT(pos)); 101 101 } 102 102 103 103 static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
+5 -2
drivers/gpio/gpiolib.c
··· 2737 2737 if (IS_ERR(desc)) 2738 2738 return PTR_ERR(desc); 2739 2739 2740 - /* Flush direction if something changed behind our back */ 2741 - if (chip->get_direction) { 2740 + /* 2741 + * If it's fast: flush the direction setting if something changed 2742 + * behind our back 2743 + */ 2744 + if (!chip->can_sleep && chip->get_direction) { 2742 2745 int dir = chip->get_direction(chip, offset); 2743 2746 2744 2747 if (dir)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 459 459 u64 metadata_flags; 460 460 void *metadata; 461 461 u32 metadata_size; 462 + unsigned prime_shared_count; 462 463 /* list of all virtual address to which this bo 463 464 * is associated to 464 465 */
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 395 395 { 396 396 int i, ret; 397 397 struct device *dev; 398 - 399 398 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 399 + 400 + /* return early if no ACP */ 401 + if (!adev->acp.acp_genpd) 402 + return 0; 400 403 401 404 for (i = 0; i < ACP_DEVS ; i++) { 402 405 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 132 132 entry->priority = min(info[i].bo_priority, 133 133 AMDGPU_BO_LIST_MAX_PRIORITY); 134 134 entry->tv.bo = &entry->robj->tbo; 135 - entry->tv.shared = true; 135 + entry->tv.shared = !entry->robj->prime_shared_count; 136 136 137 137 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 138 138 gds_obj = entry->robj;
+11 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
··· 795 795 if (!adev->pm.fw) { 796 796 switch (adev->asic_type) { 797 797 case CHIP_TOPAZ: 798 - strcpy(fw_name, "amdgpu/topaz_smc.bin"); 798 + if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || 799 + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || 800 + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) 801 + strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); 802 + else 803 + strcpy(fw_name, "amdgpu/topaz_smc.bin"); 799 804 break; 800 805 case CHIP_TONGA: 801 - strcpy(fw_name, "amdgpu/tonga_smc.bin"); 806 + if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || 807 + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) 808 + strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); 809 + else 810 + strcpy(fw_name, "amdgpu/tonga_smc.bin"); 802 811 break; 803 812 case CHIP_FIJI: 804 813 strcpy(fw_name, "amdgpu/fiji_smc.bin");
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 769 769 { 770 770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 771 771 772 - if (amdgpu_connector->ddc_bus->has_aux) { 772 + if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) { 773 773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); 774 774 amdgpu_connector->ddc_bus->has_aux = false; 775 775 }
+7 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 658 658 return false; 659 659 660 660 if (amdgpu_passthrough(adev)) { 661 - /* for FIJI: In whole GPU pass-through virtualization case 662 - * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) 663 - * so amdgpu_card_posted return false and driver will incorrectly skip vPost. 664 - * but if we force vPost do in pass-through case, the driver reload will hang. 665 - * whether doing vPost depends on amdgpu_card_posted if smc version is above 666 - * 00160e00 for FIJI. 661 + /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 662 + * some old smc fw still need driver do vPost otherwise gpu hang, while 663 + * those smc fw version above 22.15 doesn't have this flaw, so we force 664 + * vpost executed for smc version below 22.15 667 665 */ 668 666 if (adev->asic_type == CHIP_FIJI) { 669 667 int err; ··· 672 674 return true; 673 675 674 676 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 675 - if (fw_ver >= 0x00160e00) 676 - return !amdgpu_card_posted(adev); 677 + if (fw_ver < 0x00160e00) 678 + return true; 677 679 } 678 - } else { 679 - /* in bare-metal case, amdgpu_card_posted return false 680 - * after system reboot/boot, and return true if driver 681 - * reloaded. 682 - * we shouldn't do vPost after driver reload otherwise GPU 683 - * could hang. 684 - */ 685 - if (amdgpu_card_posted(adev)) 686 - return false; 687 680 } 688 - 689 - /* we assume vPost is neede for all other cases */ 690 - return true; 681 + return !amdgpu_card_posted(adev); 691 682 } 692 683 693 684 /**
+24 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 735 735 736 736 static int __init amdgpu_init(void) 737 737 { 738 - amdgpu_sync_init(); 739 - amdgpu_fence_slab_init(); 738 + int r; 739 + 740 + r = amdgpu_sync_init(); 741 + if (r) 742 + goto error_sync; 743 + 744 + r = amdgpu_fence_slab_init(); 745 + if (r) 746 + goto error_fence; 747 + 748 + r = amd_sched_fence_slab_init(); 749 + if (r) 750 + goto error_sched; 751 + 740 752 if (vgacon_text_force()) { 741 753 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); 742 754 return -EINVAL; ··· 760 748 amdgpu_register_atpx_handler(); 761 749 /* let modprobe override vga console setting */ 762 750 return drm_pci_init(driver, pdriver); 751 + 752 + error_sched: 753 + amdgpu_fence_slab_fini(); 754 + 755 + error_fence: 756 + amdgpu_sync_fini(); 757 + 758 + error_sync: 759 + return r; 763 760 } 764 761 765 762 static void __exit amdgpu_exit(void) ··· 777 756 drm_pci_exit(driver, pdriver); 778 757 amdgpu_unregister_atpx_handler(); 779 758 amdgpu_sync_fini(); 759 + amd_sched_fence_slab_fini(); 780 760 amdgpu_fence_slab_fini(); 781 761 } 782 762
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 99 99 100 100 if ((amdgpu_runtime_pm != 0) && 101 101 amdgpu_has_atpx() && 102 + (amdgpu_is_atpx_hybrid() || 103 + amdgpu_has_atpx_dgpu_power_cntl()) && 102 104 ((flags & AMD_IS_APU) == 0)) 103 105 flags |= AMD_IS_PX; 104 106
+19 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
··· 74 74 if (ret) 75 75 return ERR_PTR(ret); 76 76 77 + bo->prime_shared_count = 1; 77 78 return &bo->gem_base; 78 79 } 79 80 80 81 int amdgpu_gem_prime_pin(struct drm_gem_object *obj) 81 82 { 82 83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 83 - int ret = 0; 84 + long ret = 0; 84 85 85 86 ret = amdgpu_bo_reserve(bo, false); 86 87 if (unlikely(ret != 0)) 87 88 return ret; 88 89 90 + /* 91 + * Wait for all shared fences to complete before we switch to future 92 + * use of exclusive fence on this prime shared bo. 93 + */ 94 + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, 95 + MAX_SCHEDULE_TIMEOUT); 96 + if (unlikely(ret < 0)) { 97 + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); 98 + amdgpu_bo_unreserve(bo); 99 + return ret; 100 + } 101 + 89 102 /* pin buffer into GTT */ 90 103 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 104 + if (likely(ret == 0)) 105 + bo->prime_shared_count++; 106 + 91 107 amdgpu_bo_unreserve(bo); 92 108 return ret; 93 109 } ··· 118 102 return; 119 103 120 104 amdgpu_bo_unpin(bo); 105 + if (bo->prime_shared_count) 106 + bo->prime_shared_count--; 121 107 amdgpu_bo_unreserve(bo); 122 108 } 123 109
+2
drivers/gpu/drm/amd/amdgpu/vi.c
··· 80 80 #include "dce_virtual.h" 81 81 82 82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 83 + MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 83 84 MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 85 + MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); 84 86 MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 85 87 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 86 88 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
··· 272 272 PHM_FUNC_CHECK(hwmgr); 273 273 274 274 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) 275 - return -EINVAL; 275 + return false; 276 276 277 277 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); 278 278 }
+4 -2
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
··· 710 710 uint32_t vol; 711 711 int ret = 0; 712 712 713 - if (hwmgr->chip_id < CHIP_POLARIS10) { 714 - atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); 713 + if (hwmgr->chip_id < CHIP_TONGA) { 714 + ret = atomctrl_get_voltage_evv(hwmgr, id, voltage); 715 + } else if (hwmgr->chip_id < CHIP_POLARIS10) { 716 + ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); 715 717 if (*voltage >= 2000 || *voltage == 0) 716 718 *voltage = 1150; 717 719 } else {
+43 -25
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 1460 1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1461 1461 1462 1462 1463 - if (table_info == NULL) 1464 - return -EINVAL; 1465 - 1466 - sclk_table = table_info->vdd_dep_on_sclk; 1467 - 1468 1463 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1469 1464 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1470 1465 1471 1466 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1472 - if (0 == phm_get_sclk_for_voltage_evv(hwmgr, 1467 + if ((hwmgr->pp_table_version == PP_TABLE_V1) 1468 + && !phm_get_sclk_for_voltage_evv(hwmgr, 1473 1469 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1474 1470 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1475 1471 PHM_PlatformCaps_ClockStretcher)) { 1472 + sclk_table = table_info->vdd_dep_on_sclk; 1473 + 1476 1474 for (j = 1; j < sclk_table->count; j++) { 1477 1475 if (sclk_table->entries[j].clk == sclk && 1478 1476 sclk_table->entries[j].cks_enable == 0) { ··· 1496 1498 } 1497 1499 } 1498 1500 } else { 1499 - 1500 1501 if ((hwmgr->pp_table_version == PP_TABLE_V0) 1501 1502 || !phm_get_sclk_for_voltage_evv(hwmgr, 1502 1503 table_info->vddc_lookup_table, vv_id, &sclk)) { 1503 1504 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1504 1505 PHM_PlatformCaps_ClockStretcher)) { 1506 + if (table_info == NULL) 1507 + return -EINVAL; 1508 + sclk_table = table_info->vdd_dep_on_sclk; 1509 + 1505 1510 for (j = 1; j < sclk_table->count; j++) { 1506 1511 if (sclk_table->entries[j].clk == sclk && 1507 1512 sclk_table->entries[j].cks_enable == 0) { ··· 2134 2133 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2135 2134 2136 2135 if (tab) { 2136 + vddc = tab->vddc; 2137 2137 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, 2138 2138 &data->vddc_leakage); 2139 2139 tab->vddc = vddc; 2140 + vddci = tab->vddci; 2140 2141 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, 2141 2142 &data->vddci_leakage); 2142 2143 tab->vddci = vddci; ··· 4231 4228 { 4232 4229 struct phm_ppt_v1_information *table_info = 4233 4230 (struct phm_ppt_v1_information *)hwmgr->pptable; 4234 - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 4231 + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; 4232 + struct phm_clock_voltage_dependency_table *sclk_table; 4235 4233 int i; 4236 4234 4237 - if (table_info == NULL) 4238 - return -EINVAL; 4239 - 4240 - dep_sclk_table = table_info->vdd_dep_on_sclk; 4241 - 4242 - for (i = 0; i < dep_sclk_table->count; i++) { 4243 - clocks->clock[i] = dep_sclk_table->entries[i].clk; 4244 - clocks->count++; 4235 + if (hwmgr->pp_table_version == PP_TABLE_V1) { 4236 + if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) 4237 + return -EINVAL; 4238 + dep_sclk_table = table_info->vdd_dep_on_sclk; 4239 + for (i = 0; i < dep_sclk_table->count; i++) { 4240 + clocks->clock[i] = dep_sclk_table->entries[i].clk; 4241 + clocks->count++; 4242 + } 4243 + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4244 + sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 4245 + for (i = 0; i < sclk_table->count; i++) { 4246 + clocks->clock[i] = sclk_table->entries[i].clk; 4247 + clocks->count++; 4248 + } 4245 4249 } 4250 + 4246 4251 return 0; 4247 4252 } 4248 4253 ··· 4272 4261 (struct phm_ppt_v1_information *)hwmgr->pptable; 4273 4262 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 4274 4263 int i; 4264 + struct phm_clock_voltage_dependency_table *mclk_table; 4275 4265 4276 - if (table_info == NULL) 4277 - return -EINVAL; 4278 - 4279 - dep_mclk_table = table_info->vdd_dep_on_mclk; 4280 - 4281 - for (i = 0; i < dep_mclk_table->count; i++) { 4282 - clocks->clock[i] = dep_mclk_table->entries[i].clk; 4283 - clocks->latency[i] = smu7_get_mem_latency(hwmgr, 4266 + if (hwmgr->pp_table_version == PP_TABLE_V1) { 4267 + if (table_info == NULL) 4268 + return -EINVAL; 4269 + dep_mclk_table = table_info->vdd_dep_on_mclk; 4270 + for (i = 0; i < dep_mclk_table->count; i++) { 4271 + clocks->clock[i] = dep_mclk_table->entries[i].clk; 4272 + clocks->latency[i] = smu7_get_mem_latency(hwmgr, 4284 4273 dep_mclk_table->entries[i].clk); 4285 - clocks->count++; 4274 + clocks->count++; 4275 + } 4276 + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4277 + mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 4278 + for (i = 0; i < mclk_table->count; i++) { 4279 + clocks->clock[i] = mclk_table->entries[i].clk; 4280 + clocks->count++; 4281 + } 4286 4282 } 4287 4283 return 0; 4288 4284 }
+3 -3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
··· 30 30 struct phm_fan_speed_info *fan_speed_info) 31 31 { 32 32 if (hwmgr->thermal_controller.fanInfo.bNoFan) 33 - return 0; 33 + return -ENODEV; 34 34 35 35 fan_speed_info->supports_percent_read = true; 36 36 fan_speed_info->supports_percent_write = true; ··· 60 60 uint64_t tmp64; 61 61 62 62 if (hwmgr->thermal_controller.fanInfo.bNoFan) 63 - return 0; 63 + return -ENODEV; 64 64 65 65 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 66 66 CG_FDO_CTRL1, FMAX_DUTY100); ··· 89 89 if (hwmgr->thermal_controller.fanInfo.bNoFan || 90 90 (hwmgr->thermal_controller.fanInfo. 91 91 ucTachometerPulsesPerRevolution == 0)) 92 - return 0; 92 + return -ENODEV; 93 93 94 94 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 95 95 CG_TACH_STATUS, TACH_PERIOD);
-13
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 34 34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35 35 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); 36 36 37 - struct kmem_cache *sched_fence_slab; 38 - atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); 39 - 40 37 /* Initialize a given run queue struct */ 41 38 static void amd_sched_rq_init(struct amd_sched_rq *rq) 42 39 { ··· 615 618 INIT_LIST_HEAD(&sched->ring_mirror_list); 616 619 spin_lock_init(&sched->job_list_lock); 617 620 atomic_set(&sched->hw_rq_count, 0); 618 - if (atomic_inc_return(&sched_fence_slab_ref) == 1) { 619 - sched_fence_slab = kmem_cache_create( 620 - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, 621 - SLAB_HWCACHE_ALIGN, NULL); 622 - if (!sched_fence_slab) 623 - return -ENOMEM; 624 - } 625 621 626 622 /* Each scheduler will run on a seperate kernel thread */ 627 623 sched->thread = kthread_run(amd_sched_main, sched, sched->name); ··· 635 645 { 636 646 if (sched->thread) 637 647 kthread_stop(sched->thread); 638 - rcu_barrier(); 639 - if (atomic_dec_and_test(&sched_fence_slab_ref)) 640 - kmem_cache_destroy(sched_fence_slab); 641 648 }
+3 -3
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
··· 30 30 struct amd_gpu_scheduler; 31 31 struct amd_sched_rq; 32 32 33 - extern struct kmem_cache *sched_fence_slab; 34 - extern atomic_t sched_fence_slab_ref; 35 - 36 33 /** 37 34 * A scheduler entity is a wrapper around a job queue or a group 38 35 * of other entities. Entities take turns emitting jobs from their ··· 141 144 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 142 145 struct amd_sched_entity *entity); 143 146 void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 147 + 148 + int amd_sched_fence_slab_init(void); 149 + void amd_sched_fence_slab_fini(void); 144 150 145 151 struct amd_sched_fence *amd_sched_fence_create( 146 152 struct amd_sched_entity *s_entity, void *owner);
+19
drivers/gpu/drm/amd/scheduler/sched_fence.c
··· 27 27 #include <drm/drmP.h> 28 28 #include "gpu_scheduler.h" 29 29 30 + static struct kmem_cache *sched_fence_slab; 31 + 32 + int amd_sched_fence_slab_init(void) 33 + { 34 + sched_fence_slab = kmem_cache_create( 35 + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, 36 + SLAB_HWCACHE_ALIGN, NULL); 37 + if (!sched_fence_slab) 38 + return -ENOMEM; 39 + 40 + return 0; 41 + } 42 + 43 + void amd_sched_fence_slab_fini(void) 44 + { 45 + rcu_barrier(); 46 + kmem_cache_destroy(sched_fence_slab); 47 + } 48 + 30 49 struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, 31 50 void *owner) 32 51 {
+17 -142
drivers/gpu/drm/arc/arcpgu_hdmi.c
··· 14 14 * 15 15 */ 16 16 17 - #include <drm/drm_crtc_helper.h> 17 + #include <drm/drm_crtc.h> 18 18 #include <drm/drm_encoder_slave.h> 19 - #include <drm/drm_atomic_helper.h> 20 19 21 20 #include "arcpgu.h" 22 - 23 - struct arcpgu_drm_connector { 24 - struct drm_connector connector; 25 - struct drm_encoder_slave *encoder_slave; 26 - }; 27 - 28 - static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) 29 - { 30 - const struct drm_encoder_slave_funcs *sfuncs; 31 - struct drm_encoder_slave *slave; 32 - struct arcpgu_drm_connector *con = 33 - container_of(connector, struct arcpgu_drm_connector, connector); 34 - 35 - slave = con->encoder_slave; 36 - if (slave == NULL) { 37 - dev_err(connector->dev->dev, 38 - "connector_get_modes: cannot find slave encoder for connector\n"); 39 - return 0; 40 - } 41 - 42 - sfuncs = slave->slave_funcs; 43 - if (sfuncs->get_modes == NULL) 44 - return 0; 45 - 46 - return sfuncs->get_modes(&slave->base, connector); 47 - } 48 - 49 - static enum drm_connector_status 50 - arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) 51 - { 52 - enum drm_connector_status status = connector_status_unknown; 53 - const struct drm_encoder_slave_funcs *sfuncs; 54 - struct drm_encoder_slave *slave; 55 - 56 - struct arcpgu_drm_connector *con = 57 - container_of(connector, struct arcpgu_drm_connector, connector); 58 - 59 - slave = con->encoder_slave; 60 - if (slave == NULL) { 61 - dev_err(connector->dev->dev, 62 - "connector_detect: cannot find slave encoder for connector\n"); 63 - return status; 64 - } 65 - 66 - sfuncs = slave->slave_funcs; 67 - if (sfuncs && sfuncs->detect) 68 - return sfuncs->detect(&slave->base, connector); 69 - 70 - dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n"); 71 - return status; 72 - } 73 - 74 - static void arcpgu_drm_connector_destroy(struct drm_connector *connector) 75 - { 76 - drm_connector_unregister(connector); 77 - drm_connector_cleanup(connector); 78 - } 79 - 80 - static const struct drm_connector_helper_funcs 81 - arcpgu_drm_connector_helper_funcs = { 82 - .get_modes = arcpgu_drm_connector_get_modes, 83 - }; 84 - 85 - static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 86 - .dpms = drm_helper_connector_dpms, 87 - .reset = drm_atomic_helper_connector_reset, 88 - .detect = arcpgu_drm_connector_detect, 89 - .fill_modes = drm_helper_probe_single_connector_modes, 90 - .destroy = arcpgu_drm_connector_destroy, 91 - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 92 - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 93 - }; 94 - 95 - static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = { 96 - .dpms = drm_i2c_encoder_dpms, 97 - .mode_fixup = drm_i2c_encoder_mode_fixup, 98 - .mode_set = drm_i2c_encoder_mode_set, 99 - .prepare = drm_i2c_encoder_prepare, 100 - .commit = drm_i2c_encoder_commit, 101 - .detect = drm_i2c_encoder_detect, 102 - }; 103 21 104 22 static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { 105 23 .destroy = drm_encoder_cleanup, ··· 25 107 26 108 int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) 27 109 { 28 - struct arcpgu_drm_connector *arcpgu_connector; 29 - struct drm_i2c_encoder_driver *driver; 30 - struct drm_encoder_slave *encoder; 31 - struct drm_connector *connector; 32 - struct i2c_client *i2c_slave; 33 - int ret; 110 + struct drm_encoder *encoder; 111 + struct drm_bridge *bridge; 112 + 113 + int ret = 0; 34 114 35 115 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); 36 116 if (encoder == NULL) 37 117 return -ENOMEM; 38 118 39 - i2c_slave = of_find_i2c_device_by_node(np); 40 - if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { 41 - dev_err(drm->dev, "failed to find i2c slave encoder\n"); 119 + /* Locate drm bridge from the hdmi encoder DT node */ 120 + bridge = of_drm_find_bridge(np); 121 + if (!bridge) 42 122 return -EPROBE_DEFER; 43 - } 44 123 45 - if (i2c_slave->dev.driver == NULL) { 46 - dev_err(drm->dev, "failed to find i2c slave driver\n"); 47 - return -EPROBE_DEFER; 48 - } 49 - 50 - driver = 51 - to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); 52 - ret = driver->encoder_init(i2c_slave, drm, encoder); 53 - if (ret) { 54 - dev_err(drm->dev, "failed to initialize i2c encoder slave\n"); 55 - return ret; 56 - } 57 - 58 - encoder->base.possible_crtcs = 1; 59 - encoder->base.possible_clones = 0; 60 - ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, 124 + encoder->possible_crtcs = 1; 125 + encoder->possible_clones = 0; 126 + ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, 61 127 DRM_MODE_ENCODER_TMDS, NULL); 62 128 if (ret) 63 129 return ret; 64 130 65 - drm_encoder_helper_add(&encoder->base, 66 - &arcpgu_drm_encoder_helper_funcs); 131 + /* Link drm_bridge to encoder */ 132 + bridge->encoder = encoder; 133 + encoder->bridge = bridge; 67 134 68 - arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), 69 - GFP_KERNEL); 70 - if (!arcpgu_connector) { 71 - ret = -ENOMEM; 72 - goto error_encoder_cleanup; 73 - } 135 + ret = drm_bridge_attach(drm, bridge); 136 + if (ret) 137 + drm_encoder_cleanup(encoder); 74 138 75 - connector = &arcpgu_connector->connector; 76 - drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); 77 - ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, 78 - DRM_MODE_CONNECTOR_HDMIA); 79 - if (ret < 0) { 80 - dev_err(drm->dev, "failed to initialize drm connector\n"); 81 - goto error_encoder_cleanup; 82 - } 83 - 84 - ret = drm_mode_connector_attach_encoder(connector, &encoder->base); 85 - if (ret < 0) { 86 - dev_err(drm->dev, "could not attach connector to encoder\n"); 87 - drm_connector_unregister(connector); 88 - goto error_connector_cleanup; 89 - } 90 - 91 - arcpgu_connector->encoder_slave = encoder; 92 - 93 - return 0; 94 - 95 - error_connector_cleanup: 96 - drm_connector_cleanup(connector); 97 - 98 - error_encoder_cleanup: 99 - drm_encoder_cleanup(&encoder->base); 100 139 return ret; 101 140 }
+11 -2
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
··· 25 25 static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 26 26 struct drm_crtc_state *old_crtc_state) 27 27 { 28 + struct drm_device *dev = crtc->dev; 29 + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 28 30 struct drm_pending_vblank_event *event = crtc->state->event; 31 + 32 + regmap_write(fsl_dev->regmap, 33 + DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); 29 34 30 35 if (event) { 31 36 crtc->state->event = NULL; ··· 44 39 } 45 40 } 46 41 47 - static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 42 + static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, 43 + struct drm_crtc_state *old_crtc_state) 48 44 { 49 45 struct drm_device *dev = crtc->dev; 50 46 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 47 + 48 + /* always disable planes on the CRTC */ 49 + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); 51 50 52 51 drm_crtc_vblank_off(crtc); 53 52 ··· 131 122 } 132 123 133 124 static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 125 + .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, 134 126 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 135 - .disable = fsl_dcu_drm_disable_crtc, 136 127 .enable = fsl_dcu_drm_crtc_enable, 137 128 .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, 138 129 };
-4
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
··· 59 59 60 60 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); 61 61 regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); 62 - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 63 - DCU_UPDATE_MODE_READREG); 64 62 65 63 return ret; 66 64 } ··· 137 139 drm_handle_vblank(dev, 0); 138 140 139 141 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); 140 - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 141 - DCU_UPDATE_MODE_READREG); 142 142 143 143 return IRQ_HANDLED; 144 144 }
-5
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
··· 160 160 DCU_LAYER_POST_SKIP(0) | 161 161 DCU_LAYER_PRE_SKIP(0)); 162 162 } 163 - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 164 - DCU_MODE_DCU_MODE_MASK, 165 - DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 166 - regmap_write(fsl_dev->regmap, 167 - DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); 168 163 169 164 return; 170 165 }
+17 -3
drivers/gpu/drm/i915/i915_gem.c
··· 1806 1806 /* Use a partial view if it is bigger than available space */ 1807 1807 chunk_size = MIN_CHUNK_PAGES; 1808 1808 if (i915_gem_object_is_tiled(obj)) 1809 - chunk_size = max(chunk_size, tile_row_pages(obj)); 1809 + chunk_size = roundup(chunk_size, tile_row_pages(obj)); 1810 1810 1811 1811 memset(&view, 0, sizeof(view)); 1812 1812 view.type = I915_GGTT_VIEW_PARTIAL; ··· 3543 3543 if (view->type == I915_GGTT_VIEW_NORMAL) 3544 3544 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 3545 3545 PIN_MAPPABLE | PIN_NONBLOCK); 3546 - if (IS_ERR(vma)) 3547 - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); 3546 + if (IS_ERR(vma)) { 3547 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 3548 + unsigned int flags; 3549 + 3550 + /* Valleyview is definitely limited to scanning out the first 3551 + * 512MiB. Lets presume this behaviour was inherited from the 3552 + * g4x display engine and that all earlier gen are similarly 3553 + * limited. Testing suggests that it is a little more 3554 + * complicated than this. For example, Cherryview appears quite 3555 + * happy to scanout from anywhere within its global aperture. 3556 + */ 3557 + flags = 0; 3558 + if (HAS_GMCH_DISPLAY(i915)) 3559 + flags = PIN_MAPPABLE; 3560 + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 3561 + } 3548 3562 if (IS_ERR(vma)) 3549 3563 goto err_unpin_display; 3550 3564
+8
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1281 1281 return ctx; 1282 1282 } 1283 1283 1284 + static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 1285 + { 1286 + return !(obj->cache_level == I915_CACHE_NONE || 1287 + obj->cache_level == I915_CACHE_WT); 1288 + } 1289 + 1284 1290 void i915_vma_move_to_active(struct i915_vma *vma, 1285 1291 struct drm_i915_gem_request *req, 1286 1292 unsigned int flags) ··· 1317 1311 1318 1312 /* update for the implicit flush after a batch */ 1319 1313 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 1314 + if (!obj->cache_dirty && gpu_write_needs_clflush(obj)) 1315 + obj->cache_dirty = true; 1320 1316 } 1321 1317 1322 1318 if (flags & EXEC_OBJECT_NEEDS_FENCE)
+22 -8
drivers/gpu/drm/i915/intel_bios.c
··· 1143 1143 if (!child) 1144 1144 return; 1145 1145 1146 - aux_channel = child->raw[25]; 1146 + aux_channel = child->common.aux_channel; 1147 1147 ddc_pin = child->common.ddc_pin; 1148 1148 1149 1149 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; ··· 1673 1673 return false; 1674 1674 } 1675 1675 1676 - bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) 1676 + static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, 1677 + enum port port) 1677 1678 { 1678 1679 static const struct { 1679 1680 u16 dp, hdmi; ··· 1688 1687 [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, 1689 1688 [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, 1690 1689 }; 1691 - int i; 1692 1690 1693 1691 if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) 1694 1692 return false; 1695 1693 1696 - if (!dev_priv->vbt.child_dev_num) 1694 + if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != 1695 + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) 1697 1696 return false; 1697 + 1698 + if (p_child->common.dvo_port == port_mapping[port].dp) 1699 + return true; 1700 + 1701 + /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ 1702 + if (p_child->common.dvo_port == port_mapping[port].hdmi && 1703 + p_child->common.aux_channel != 0) 1704 + return true; 1705 + 1706 + return false; 1707 + } 1708 + 1709 + bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, 1710 + enum port port) 1711 + { 1712 + int i; 1698 1713 1699 1714 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1700 1715 const union child_device_config *p_child = 1701 1716 &dev_priv->vbt.child_dev[i]; 1702 1717 1703 - if ((p_child->common.dvo_port == port_mapping[port].dp || 1704 - p_child->common.dvo_port == port_mapping[port].hdmi) && 1705 - (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == 1706 - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) 1718 + if (child_dev_is_dp_dual_mode(p_child, port)) 1707 1719 return true; 1708 1720 } 1709 1721
+26 -3
drivers/gpu/drm/i915/intel_display.c
··· 10243 10243 bxt_set_cdclk(to_i915(dev), req_cdclk); 10244 10244 } 10245 10245 10246 + static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, 10247 + int pixel_rate) 10248 + { 10249 + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 10250 + 10251 + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 10252 + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 10253 + pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 10254 + 10255 + /* BSpec says "Do not use DisplayPort with CDCLK less than 10256 + * 432 MHz, audio enabled, port width x4, and link rate 10257 + * HBR2 (5.4 GHz), or else there may be audio corruption or 10258 + * screen corruption." 10259 + */ 10260 + if (intel_crtc_has_dp_encoder(crtc_state) && 10261 + crtc_state->has_audio && 10262 + crtc_state->port_clock >= 540000 && 10263 + crtc_state->lane_count == 4) 10264 + pixel_rate = max(432000, pixel_rate); 10265 + 10266 + return pixel_rate; 10267 + } 10268 + 10246 10269 /* compute the max rate for new configuration */ 10247 10270 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 10248 10271 { ··· 10291 10268 10292 10269 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 10293 10270 10294 - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 10295 - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 10296 - pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 10271 + if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) 10272 + pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, 10273 + pixel_rate); 10297 10274 10298 10275 intel_state->min_pixclk[i] = pixel_rate; 10299 10276 }
-10
drivers/gpu/drm/i915/intel_dp.c
··· 4463 4463 intel_dp_detect(struct drm_connector *connector, bool force) 4464 4464 { 4465 4465 struct intel_dp *intel_dp = intel_attached_dp(connector); 4466 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4467 - struct intel_encoder *intel_encoder = &intel_dig_port->base; 4468 4466 enum drm_connector_status status = connector->status; 4469 4467 4470 4468 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4471 4469 connector->base.id, connector->name); 4472 - 4473 - if (intel_dp->is_mst) { 4474 - /* MST devices are disconnected from a monitor POV */ 4475 - intel_dp_unset_edid(intel_dp); 4476 - if (intel_encoder->type != INTEL_OUTPUT_EDP) 4477 - intel_encoder->type = INTEL_OUTPUT_DP; 4478 - return connector_status_disconnected; 4479 - } 4480 4470 4481 4471 /* If full detect is not performed yet, do a full detect */ 4482 4472 if (!intel_dp->detect_done)
+48 -36
drivers/gpu/drm/i915/intel_hdmi.c
··· 1799 1799 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 1800 1800 } 1801 1801 1802 + static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, 1803 + enum port port) 1804 + { 1805 + const struct ddi_vbt_port_info *info = 1806 + &dev_priv->vbt.ddi_port_info[port]; 1807 + u8 ddc_pin; 1808 + 1809 + if (info->alternate_ddc_pin) { 1810 + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", 1811 + info->alternate_ddc_pin, port_name(port)); 1812 + return info->alternate_ddc_pin; 1813 + } 1814 + 1815 + switch (port) { 1816 + case PORT_B: 1817 + if (IS_BROXTON(dev_priv)) 1818 + ddc_pin = GMBUS_PIN_1_BXT; 1819 + else 1820 + ddc_pin = GMBUS_PIN_DPB; 1821 + break; 1822 + case PORT_C: 1823 + if (IS_BROXTON(dev_priv)) 1824 + ddc_pin = GMBUS_PIN_2_BXT; 1825 + else 1826 + ddc_pin = GMBUS_PIN_DPC; 1827 + break; 1828 + case PORT_D: 1829 + if (IS_CHERRYVIEW(dev_priv)) 1830 + ddc_pin = GMBUS_PIN_DPD_CHV; 1831 + else 1832 + ddc_pin = GMBUS_PIN_DPD; 1833 + break; 1834 + default: 1835 + MISSING_CASE(port); 1836 + ddc_pin = GMBUS_PIN_DPB; 1837 + break; 1838 + } 1839 + 1840 + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", 1841 + ddc_pin, port_name(port)); 1842 + 1843 + return ddc_pin; 1844 + } 1845 + 1802 1846 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1803 1847 struct intel_connector *intel_connector) 1804 1848 { ··· 1852 1808 struct drm_device *dev = intel_encoder->base.dev; 1853 1809 struct drm_i915_private *dev_priv = to_i915(dev); 1854 1810 enum port port = intel_dig_port->port; 1855 - uint8_t alternate_ddc_pin; 1856 1811 1857 1812 DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", 1858 1813 port_name(port)); ··· 1869 1826 connector->doublescan_allowed = 0; 1870 1827 connector->stereo_allowed = 1; 1871 1828 1829 + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); 1830 + 1872 1831 switch (port) { 1873 1832 case PORT_B: 1874 - if (IS_BROXTON(dev_priv)) 1875 - intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; 1876 - else 1877 - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; 1878 1833 /* 1879 1834 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 1880 1835 * interrupts to check the external panel connection. ··· 1883 1842 intel_encoder->hpd_pin = HPD_PORT_B; 1884 1843 break; 1885 1844 case PORT_C: 1886 - if (IS_BROXTON(dev_priv)) 1887 - intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT; 1888 - else 1889 - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; 1890 1845 intel_encoder->hpd_pin = HPD_PORT_C; 1891 1846 break; 1892 1847 case PORT_D: 1893 - if (WARN_ON(IS_BROXTON(dev_priv))) 1894 - intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED; 1895 - else if (IS_CHERRYVIEW(dev_priv)) 1896 - intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV; 1897 - else 1898 - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; 1899 1848 intel_encoder->hpd_pin = HPD_PORT_D; 1900 1849 break; 1901 1850 case PORT_E: 1902 - /* On SKL PORT E doesn't have seperate GMBUS pin 1903 - * We rely on VBT to set a proper alternate GMBUS pin. */ 1904 - alternate_ddc_pin = 1905 - dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; 1906 - switch (alternate_ddc_pin) { 1907 - case DDC_PIN_B: 1908 - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; 1909 - break; 1910 - case DDC_PIN_C: 1911 - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; 1912 - break; 1913 - case DDC_PIN_D: 1914 - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; 1915 - break; 1916 - default: 1917 - MISSING_CASE(alternate_ddc_pin); 1918 - } 1919 1851 intel_encoder->hpd_pin = HPD_PORT_E; 1920 1852 break; 1921 - case PORT_A: 1922 - intel_encoder->hpd_pin = HPD_PORT_A; 1923 - /* Internal port only for eDP. */ 1924 1853 default: 1925 - BUG(); 1854 + MISSING_CASE(port); 1855 + return; 1926 1856 } 1927 1857 1928 1858 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+3 -1
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 1139 1139 1140 1140 intel_power_sequencer_reset(dev_priv); 1141 1141 1142 - intel_hpd_poll_init(dev_priv); 1142 + /* Prevent us from re-enabling polling on accident in late suspend */ 1143 + if (!dev_priv->drm.dev->power.is_suspended) 1144 + intel_hpd_poll_init(dev_priv); 1143 1145 } 1144 1146 1145 1147 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+1 -1
drivers/gpu/drm/i915/intel_sprite.c
··· 358 358 int plane = intel_plane->plane; 359 359 u32 sprctl; 360 360 u32 sprsurf_offset, linear_offset; 361 - unsigned int rotation = dplane->state->rotation; 361 + unsigned int rotation = plane_state->base.rotation; 362 362 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 363 363 int crtc_x = plane_state->base.dst.x1; 364 364 int crtc_y = plane_state->base.dst.y1;
+2 -1
drivers/gpu/drm/i915/intel_vbt_defs.h
··· 280 280 u8 dp_support:1; 281 281 u8 tmds_support:1; 282 282 u8 support_reserved:5; 283 - u8 not_common3[12]; 283 + u8 aux_channel; 284 + u8 not_common3[11]; 284 285 u8 iboost_level; 285 286 } __packed; 286 287
+6 -3
drivers/gpu/drm/imx/ipuv3-crtc.c
··· 68 68 69 69 ipu_dc_disable_channel(ipu_crtc->dc); 70 70 ipu_di_disable(ipu_crtc->di); 71 + /* 72 + * Planes must be disabled before DC clock is removed, as otherwise the 73 + * attached IDMACs will be left in undefined state, possibly hanging 74 + * the IPU or even system. 75 + */ 76 + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); 71 77 ipu_dc_disable(ipu); 72 78 73 79 spin_lock_irq(&crtc->dev->event_lock); ··· 82 76 crtc->state->event = NULL; 83 77 } 84 78 spin_unlock_irq(&crtc->dev->event_lock); 85 - 86 - /* always disable planes on the CRTC */ 87 - drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); 88 79 89 80 drm_crtc_vblank_off(crtc); 90 81 }
+1
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 80 80 ddp_comp); 81 81 82 82 priv->crtc = crtc; 83 + writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); 83 84 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); 84 85 } 85 86
+7 -2
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 432 432 unsigned long pll_rate; 433 433 unsigned int factor; 434 434 435 + /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ 435 436 pix_rate = 1000UL * mode->clock; 436 - if (mode->clock <= 74000) 437 + if (mode->clock <= 27000) 438 + factor = 16 * 3; 439 + else if (mode->clock <= 84000) 437 440 factor = 8 * 3; 438 - else 441 + else if (mode->clock <= 167000) 439 442 factor = 4 * 3; 443 + else 444 + factor = 2 * 3; 440 445 pll_rate = pix_rate * factor; 441 446 442 447 dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
+11 -6
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1133 1133 phy_power_on(hdmi->phy); 1134 1134 mtk_hdmi_aud_output_config(hdmi, mode); 1135 1135 1136 - mtk_hdmi_setup_audio_infoframe(hdmi); 1137 - mtk_hdmi_setup_avi_infoframe(hdmi, mode); 1138 - mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); 1139 - if (mode->flags & DRM_MODE_FLAG_3D_MASK) 1140 - mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); 1141 - 1142 1136 mtk_hdmi_hw_vid_black(hdmi, false); 1143 1137 mtk_hdmi_hw_aud_unmute(hdmi); 1144 1138 mtk_hdmi_hw_send_av_unmute(hdmi); ··· 1395 1401 hdmi->powered = true; 1396 1402 } 1397 1403 1404 + static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, 1405 + struct drm_display_mode *mode) 1406 + { 1407 + mtk_hdmi_setup_audio_infoframe(hdmi); 1408 + mtk_hdmi_setup_avi_infoframe(hdmi, mode); 1409 + mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); 1410 + if (mode->flags & DRM_MODE_FLAG_3D_MASK) 1411 + mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); 1412 + } 1413 + 1398 1414 static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) 1399 1415 { 1400 1416 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); ··· 1413 1409 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); 1414 1410 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); 1415 1411 phy_power_on(hdmi->phy); 1412 + mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); 1416 1413 1417 1414 hdmi->enabled = true; 1418 1415 }
+30 -12
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
··· 265 265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); 266 266 unsigned int pre_div; 267 267 unsigned int div; 268 + unsigned int pre_ibias; 269 + unsigned int hdmi_ibias; 270 + unsigned int imp_en; 268 271 269 272 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, 270 273 rate, parent_rate); ··· 301 298 (0x1 << PLL_BR_SHIFT), 302 299 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | 303 300 RG_HDMITX_PLL_BR); 304 - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); 301 + if (rate < 165000000) { 302 + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, 303 + RG_HDMITX_PRD_IMP_EN); 304 + pre_ibias = 0x3; 305 + imp_en = 0x0; 306 + hdmi_ibias = hdmi_phy->ibias; 307 + } else { 308 + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, 309 + RG_HDMITX_PRD_IMP_EN); 310 + pre_ibias = 0x6; 311 + imp_en = 0xf; 312 + hdmi_ibias = hdmi_phy->ibias_up; 313 + } 305 314 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 306 - (0x3 << PRD_IBIAS_CLK_SHIFT) | 307 - (0x3 << PRD_IBIAS_D2_SHIFT) | 308 - (0x3 << PRD_IBIAS_D1_SHIFT) | 309 - (0x3 << PRD_IBIAS_D0_SHIFT), 315 + (pre_ibias << PRD_IBIAS_CLK_SHIFT) | 316 + (pre_ibias << PRD_IBIAS_D2_SHIFT) | 317 + (pre_ibias << PRD_IBIAS_D1_SHIFT) | 318 + (pre_ibias << PRD_IBIAS_D0_SHIFT), 310 319 RG_HDMITX_PRD_IBIAS_CLK | 311 320 RG_HDMITX_PRD_IBIAS_D2 | 312 321 RG_HDMITX_PRD_IBIAS_D1 | 313 322 RG_HDMITX_PRD_IBIAS_D0); 314 323 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, 315 - (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); 324 + (imp_en << DRV_IMP_EN_SHIFT), 325 + RG_HDMITX_DRV_IMP_EN); 316 326 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, 317 327 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | 318 328 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | ··· 334 318 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | 335 319 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); 336 320 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, 337 - (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | 338 - (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | 339 - (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | 340 - (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), 341 - RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | 342 - RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); 321 + (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | 322 + (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | 323 + (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | 324 + (hdmi_ibias << DRV_IBIAS_D0_SHIFT), 325 + RG_HDMITX_DRV_IBIAS_CLK | 326 + RG_HDMITX_DRV_IBIAS_D2 | 327 + RG_HDMITX_DRV_IBIAS_D1 | 328 + RG_HDMITX_DRV_IBIAS_D0); 343 329 return 0; 344 330 } 345 331
+12 -2
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 139 139 140 140 u32 err_work_state; 141 141 struct work_struct err_work; 142 + struct work_struct hpd_work; 142 143 struct workqueue_struct *workqueue; 143 144 144 145 /* DSI 6G TX buffer*/ ··· 1295 1294 wmb(); /* make sure dsi controller enabled again */ 1296 1295 } 1297 1296 1297 + static void dsi_hpd_worker(struct work_struct *work) 1298 + { 1299 + struct msm_dsi_host *msm_host = 1300 + container_of(work, struct msm_dsi_host, hpd_work); 1301 + 1302 + drm_helper_hpd_irq_event(msm_host->dev); 1303 + } 1304 + 1298 1305 static void dsi_err_worker(struct work_struct *work) 1299 1306 { 1300 1307 struct msm_dsi_host *msm_host = ··· 1489 1480 1490 1481 DBG("id=%d", msm_host->id); 1491 1482 if (msm_host->dev) 1492 - drm_helper_hpd_irq_event(msm_host->dev); 1483 + queue_work(msm_host->workqueue, &msm_host->hpd_work); 1493 1484 1494 1485 return 0; 1495 1486 } ··· 1503 1494 1504 1495 DBG("id=%d", msm_host->id); 1505 1496 if (msm_host->dev) 1506 - drm_helper_hpd_irq_event(msm_host->dev); 1497 + queue_work(msm_host->workqueue, &msm_host->hpd_work); 1507 1498 1508 1499 return 0; 1509 1500 } ··· 1757 1748 /* setup workqueue */ 1758 1749 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1759 1750 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1751 + INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); 1760 1752 1761 1753 msm_dsi->host = &msm_host->base; 1762 1754 msm_dsi->id = msm_host->id;
+1
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
··· 521 521 .parent_names = (const char *[]){ "xo" }, 522 522 .num_parents = 1, 523 523 .name = vco_name, 524 + .flags = CLK_IGNORE_UNUSED, 524 525 .ops = &clk_ops_dsi_pll_28nm_vco, 525 526 }; 526 527 struct device *dev = &pll_28nm->pdev->dev;
+1
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
··· 412 412 struct clk_init_data vco_init = { 413 413 .parent_names = (const char *[]){ "pxo" }, 414 414 .num_parents = 1, 415 + .flags = CLK_IGNORE_UNUSED, 415 416 .ops = &clk_ops_dsi_pll_28nm_vco, 416 417 }; 417 418 struct device *dev = &pll_28nm->pdev->dev;
+1
drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
··· 702 702 .ops = &hdmi_8996_pll_ops, 703 703 .parent_names = hdmi_pll_parents, 704 704 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 705 + .flags = CLK_IGNORE_UNUSED, 705 706 }; 706 707 707 708 int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+1
drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
··· 424 424 .ops = &hdmi_pll_ops, 425 425 .parent_names = hdmi_pll_parents, 426 426 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 427 + .flags = CLK_IGNORE_UNUSED, 427 428 }; 428 429 429 430 int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+2 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
··· 272 272 .count = 2, 273 273 .base = { 0x14000, 0x16000 }, 274 274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 275 - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 275 + MDP_PIPE_CAP_DECIMATION, 276 276 }, 277 277 .pipe_dma = { 278 278 .count = 1, ··· 282 282 .lm = { 283 283 .count = 2, /* LM0 and LM3 */ 284 284 .base = { 0x44000, 0x47000 }, 285 - .nb_stages = 5, 285 + .nb_stages = 8, 286 286 .max_width = 2048, 287 287 .max_height = 0xFFFF, 288 288 },
+28 -18
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 223 223 plane_cnt++; 224 224 } 225 225 226 - /* 227 - * If there is no base layer, enable border color. 228 - * Although it's not possbile in current blend logic, 229 - * put it here as a reminder. 230 - */ 231 - if (!pstates[STAGE_BASE] && plane_cnt) { 226 + if (!pstates[STAGE_BASE]) { 232 227 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; 233 228 DBG("Border Color is enabled"); 234 229 } ··· 360 365 return pa->state->zpos - pb->state->zpos; 361 366 } 362 367 368 + /* is there a helper for this? */ 369 + static bool is_fullscreen(struct drm_crtc_state *cstate, 370 + struct drm_plane_state *pstate) 371 + { 372 + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && 373 + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && 374 + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); 375 + } 376 + 363 377 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, 364 378 struct drm_crtc_state *state) 365 379 { ··· 379 375 struct plane_state pstates[STAGE_MAX + 1]; 380 376 const struct mdp5_cfg_hw *hw_cfg; 381 377 const struct drm_plane_state *pstate; 382 - int cnt = 0, i; 378 + int cnt = 0, base = 0, i; 383 379 384 380 DBG("%s: check", mdp5_crtc->name); 385 381 386 - /* verify that there are not too many planes attached to crtc 387 - * and that we don't have conflicting mixer stages: 388 - */ 389 - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 390 382 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 391 - if (cnt >= (hw_cfg->lm.nb_stages)) { 392 - dev_err(dev->dev, "too many planes!\n"); 393 - return -EINVAL; 394 - } 395 - 396 - 397 383 pstates[cnt].plane = plane; 398 384 pstates[cnt].state = to_mdp5_plane_state(pstate); 399 385 ··· 393 399 /* assign a stage based on sorted zpos property */ 394 400 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 395 401 402 + /* if the bottom-most layer is not fullscreen, we need to use 403 + * it for solid-color: 404 + */ 405 + if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) 406 + base++; 407 + 408 + /* verify that there are not too many planes attached to crtc 409 + * and that we don't have conflicting mixer stages: 410 + */ 411 + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 412 + 413 + if ((cnt + base) >= hw_cfg->lm.nb_stages) { 414 + dev_err(dev->dev, "too many planes!\n"); 415 + return -EINVAL; 416 + } 417 + 396 418 for (i = 0; i < cnt; i++) { 397 - pstates[i].state->stage = STAGE_BASE + i; 419 + pstates[i].state->stage = STAGE_BASE + i + base; 398 420 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, 399 421 pipe2name(mdp5_plane_pipe(pstates[i].plane)), 400 422 pstates[i].state->stage);
+3 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 292 292 format = to_mdp_format(msm_framebuffer_format(state->fb)); 293 293 if (MDP_FORMAT_IS_YUV(format) && 294 294 !pipe_supports_yuv(mdp5_plane->caps)) { 295 - dev_err(plane->dev->dev, 296 - "Pipe doesn't support YUV\n"); 295 + DBG("Pipe doesn't support YUV\n"); 297 296 298 297 return -EINVAL; 299 298 } ··· 300 301 if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && 301 302 (((state->src_w >> 16) != state->crtc_w) || 302 303 ((state->src_h >> 16) != state->crtc_h))) { 303 - dev_err(plane->dev->dev, 304 - "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", 304 + DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n", 305 305 state->src_w >> 16, state->src_h >> 16, 306 306 state->crtc_w, state->crtc_h); 307 307 ··· 311 313 vflip = !!(state->rotation & DRM_REFLECT_Y); 312 314 if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || 313 315 (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { 314 - dev_err(plane->dev->dev, 315 - "Pipe doesn't support flip\n"); 316 + DBG("Pipe doesn't support flip\n"); 316 317 317 318 return -EINVAL; 318 319 }
+1 -1
drivers/gpu/drm/msm/msm_drv.c
··· 228 228 flush_workqueue(priv->atomic_wq); 229 229 destroy_workqueue(priv->atomic_wq); 230 230 231 - if (kms) 231 + if (kms && kms->funcs) 232 232 kms->funcs->destroy(kms); 233 233 234 234 if (gpu) {
+5 -2
drivers/gpu/drm/msm/msm_gem_shrinker.c
··· 154 154 void msm_gem_shrinker_cleanup(struct drm_device *dev) 155 155 { 156 156 struct msm_drm_private *priv = dev->dev_private; 157 - WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 158 - unregister_shrinker(&priv->shrinker); 157 + 158 + if (priv->shrinker.nr_deferred) { 159 + WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 160 + unregister_shrinker(&priv->shrinker); 161 + } 159 162 }
+1 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 931 931 { 932 932 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 933 933 934 - if (radeon_connector->ddc_bus->has_aux) { 934 + if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) { 935 935 drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); 936 936 radeon_connector->ddc_bus->has_aux = false; 937 937 }
+13
drivers/gpu/drm/radeon/radeon_device.c
··· 104 104 "LAST", 105 105 }; 106 106 107 + #if defined(CONFIG_VGA_SWITCHEROO) 108 + bool radeon_has_atpx_dgpu_power_cntl(void); 109 + bool radeon_is_atpx_hybrid(void); 110 + #else 111 + static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 112 + static inline bool radeon_is_atpx_hybrid(void) { return false; } 113 + #endif 114 + 107 115 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 108 116 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 109 117 ··· 167 159 } 168 160 169 161 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) 162 + rdev->flags &= ~RADEON_IS_PX; 163 + 164 + /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ 165 + if (!radeon_is_atpx_hybrid() && 166 + !radeon_has_atpx_dgpu_power_cntl()) 170 167 rdev->flags &= ~RADEON_IS_PX; 171 168 } 172 169
+2 -2
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 142 142 143 143 /* Create our layers */ 144 144 drv->layers = sun4i_layers_init(drm); 145 - if (!drv->layers) { 145 + if (IS_ERR(drv->layers)) { 146 146 dev_err(drm->dev, "Couldn't create the planes\n"); 147 - ret = -EINVAL; 147 + ret = PTR_ERR(drv->layers); 148 148 goto free_drm; 149 149 } 150 150
+8 -12
drivers/gpu/drm/sun4i/sun4i_rgb.c
··· 152 152 153 153 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 154 154 155 - if (!IS_ERR(tcon->panel)) { 155 + if (!IS_ERR(tcon->panel)) 156 156 drm_panel_prepare(tcon->panel); 157 - drm_panel_enable(tcon->panel); 158 - } 159 - 160 - /* encoder->bridge can be NULL; drm_bridge_enable checks for it */ 161 - drm_bridge_enable(encoder->bridge); 162 157 163 158 sun4i_tcon_channel_enable(tcon, 0); 159 + 160 + if (!IS_ERR(tcon->panel)) 161 + drm_panel_enable(tcon->panel); 164 162 } 165 163 166 164 static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) ··· 169 171 170 172 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 171 173 174 + if (!IS_ERR(tcon->panel)) 175 + drm_panel_disable(tcon->panel); 176 + 172 177 sun4i_tcon_channel_disable(tcon, 0); 173 178 174 - /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ 175 - drm_bridge_disable(encoder->bridge); 176 - 177 - if (!IS_ERR(tcon->panel)) { 178 - drm_panel_disable(tcon->panel); 179 + if (!IS_ERR(tcon->panel)) 179 180 drm_panel_unprepare(tcon->panel); 180 - } 181 181 } 182 182 183 183 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
+11 -5
drivers/gpu/drm/udl/udl_main.c
··· 98 98 static int udl_select_std_channel(struct udl_device *udl) 99 99 { 100 100 int ret; 101 - u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 102 - 0x1C, 0x88, 0x5E, 0x15, 103 - 0x60, 0xFE, 0xC6, 0x97, 104 - 0x16, 0x3D, 0x47, 0xF2}; 101 + static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 102 + 0x1C, 0x88, 0x5E, 0x15, 103 + 0x60, 0xFE, 0xC6, 0x97, 104 + 0x16, 0x3D, 0x47, 0xF2}; 105 + void *sendbuf; 106 + 107 + sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); 108 + if (!sendbuf) 109 + return -ENOMEM; 105 110 106 111 ret = usb_control_msg(udl->udev, 107 112 usb_sndctrlpipe(udl->udev, 0), 108 113 NR_USB_REQUEST_CHANNEL, 109 114 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 110 - set_def_chn, sizeof(set_def_chn), 115 + sendbuf, sizeof(set_def_chn), 111 116 USB_CTRL_SET_TIMEOUT); 117 + kfree(sendbuf); 112 118 return ret < 0 ? ret : 0; 113 119 } 114 120
+1 -1
drivers/hv/vmbus_drv.c
··· 961 961 { 962 962 int ret = 0; 963 963 964 - dev_set_name(&child_device_obj->device, "vmbus-%pUl", 964 + dev_set_name(&child_device_obj->device, "%pUl", 965 965 child_device_obj->channel->offermsg.offer.if_instance.b); 966 966 967 967 child_device_obj->device.bus = &hv_bus;
-1
drivers/i2c/Kconfig
··· 59 59 60 60 config I2C_MUX 61 61 tristate "I2C bus multiplexing support" 62 - depends on HAS_IOMEM 63 62 help 64 63 Say Y here if you want the I2C core to support the ability to 65 64 handle multiplexed I2C bus topologies, by presenting each
+1 -1
drivers/i2c/busses/i2c-digicolor.c
··· 347 347 348 348 ret = i2c_add_adapter(&i2c->adap); 349 349 if (ret < 0) { 350 - clk_unprepare(i2c->clk); 350 + clk_disable_unprepare(i2c->clk); 351 351 return ret; 352 352 } 353 353
+1
drivers/i2c/muxes/Kconfig
··· 63 63 64 64 config I2C_MUX_REG 65 65 tristate "Register-based I2C multiplexer" 66 + depends on HAS_IOMEM 66 67 help 67 68 If you say yes to this option, support will be included for a 68 69 register based I2C multiplexer. This driver provides access to
+20 -2
drivers/i2c/muxes/i2c-demux-pinctrl.c
··· 69 69 goto err_with_revert; 70 70 } 71 71 72 - p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); 72 + /* 73 + * Check if there are pinctrl states at all. Note: we cant' use 74 + * devm_pinctrl_get_select() because we need to distinguish between 75 + * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state(). 76 + */ 77 + p = devm_pinctrl_get(adap->dev.parent); 73 78 if (IS_ERR(p)) { 74 79 ret = PTR_ERR(p); 75 - goto err_with_put; 80 + /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */ 81 + if (ret != -ENODEV) 82 + goto err_with_put; 83 + } else { 84 + /* there are states. check and use them */ 85 + struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name); 86 + 87 + if (IS_ERR(s)) { 88 + ret = PTR_ERR(s); 89 + goto err_with_put; 90 + } 91 + ret = pinctrl_select_state(p, s); 92 + if (ret < 0) 93 + goto err_with_put; 76 94 } 77 95 78 96 priv->chan[new_chan].parent_adap = adap;
+2 -2
drivers/i2c/muxes/i2c-mux-pca954x.c
··· 268 268 /* discard unconfigured channels */ 269 269 break; 270 270 idle_disconnect_pd = pdata->modes[num].deselect_on_exit; 271 - data->deselect |= (idle_disconnect_pd 272 - || idle_disconnect_dt) << num; 273 271 } 272 + data->deselect |= (idle_disconnect_pd || 273 + idle_disconnect_dt) << num; 274 274 275 275 ret = i2c_mux_add_adapter(muxc, force, num, class); 276 276
+8 -4
drivers/iio/accel/st_accel_core.c
··· 743 743 744 744 return IIO_VAL_INT; 745 745 case IIO_CHAN_INFO_SCALE: 746 - *val = 0; 747 - *val2 = adata->current_fullscale->gain; 746 + *val = adata->current_fullscale->gain / 1000000; 747 + *val2 = adata->current_fullscale->gain % 1000000; 748 748 return IIO_VAL_INT_PLUS_MICRO; 749 749 case IIO_CHAN_INFO_SAMP_FREQ: 750 750 *val = adata->odr; ··· 763 763 int err; 764 764 765 765 switch (mask) { 766 - case IIO_CHAN_INFO_SCALE: 767 - err = st_sensors_set_fullscale_by_gain(indio_dev, val2); 766 + case IIO_CHAN_INFO_SCALE: { 767 + int gain; 768 + 769 + gain = val * 1000000 + val2; 770 + err = st_sensors_set_fullscale_by_gain(indio_dev, gain); 768 771 break; 772 + } 769 773 case IIO_CHAN_INFO_SAMP_FREQ: 770 774 if (val2) 771 775 return -EINVAL;
+28 -28
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
··· 30 30 u32 usage_id; 31 31 int unit; /* 0 for default others from HID sensor spec */ 32 32 int scale_val0; /* scale, whole number */ 33 - int scale_val1; /* scale, fraction in micros */ 33 + int scale_val1; /* scale, fraction in nanos */ 34 34 } unit_conversion[] = { 35 - {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, 35 + {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000}, 36 36 {HID_USAGE_SENSOR_ACCEL_3D, 37 37 HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, 38 38 {HID_USAGE_SENSOR_ACCEL_3D, 39 - HID_USAGE_SENSOR_UNITS_G, 9, 806650}, 39 + HID_USAGE_SENSOR_UNITS_G, 9, 806650000}, 40 40 41 - {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, 41 + {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293}, 42 42 {HID_USAGE_SENSOR_GYRO_3D, 43 43 HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, 44 44 {HID_USAGE_SENSOR_GYRO_3D, 45 - HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, 45 + HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293}, 46 46 47 - {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, 47 + {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000}, 48 48 {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, 49 49 50 - {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, 50 + {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293}, 51 51 {HID_USAGE_SENSOR_INCLINOMETER_3D, 52 - HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, 52 + HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, 53 53 {HID_USAGE_SENSOR_INCLINOMETER_3D, 54 54 HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, 55 55 ··· 57 57 {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, 58 58 59 59 {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, 60 - {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, 60 + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000}, 61 61 }; 62 62 63 63 static int pow_10(unsigned power) ··· 266 266 /* 267 267 * This fuction applies the unit exponent to the scale. 268 268 * For example: 269 - * 9.806650 ->exp:2-> val0[980]val1[665000] 270 - * 9.000806 ->exp:2-> val0[900]val1[80600] 271 - * 0.174535 ->exp:2-> val0[17]val1[453500] 272 - * 1.001745 ->exp:0-> val0[1]val1[1745] 273 - * 1.001745 ->exp:2-> val0[100]val1[174500] 274 - * 1.001745 ->exp:4-> val0[10017]val1[450000] 275 - * 9.806650 ->exp:-2-> val0[0]val1[98066] 269 + * 9.806650000 ->exp:2-> val0[980]val1[665000000] 270 + * 9.000806000 ->exp:2-> val0[900]val1[80600000] 271 + * 0.174535293 ->exp:2-> val0[17]val1[453529300] 272 + * 1.001745329 ->exp:0-> val0[1]val1[1745329] 273 + * 1.001745329 ->exp:2-> val0[100]val1[174532900] 274 + * 1.001745329 ->exp:4-> val0[10017]val1[453290000] 275 + * 9.806650000 ->exp:-2-> val0[0]val1[98066500] 276 276 */ 277 - static void adjust_exponent_micro(int *val0, int *val1, int scale0, 277 + static void adjust_exponent_nano(int *val0, int *val1, int scale0, 278 278 int scale1, int exp) 279 279 { 280 280 int i; ··· 285 285 if (exp > 0) { 286 286 *val0 = scale0 * pow_10(exp); 287 287 res = 0; 288 - if (exp > 6) { 288 + if (exp > 9) { 289 289 *val1 = 0; 290 290 return; 291 291 } 292 292 for (i = 0; i < exp; ++i) { 293 - x = scale1 / pow_10(5 - i); 293 + x = scale1 / pow_10(8 - i); 294 294 res += (pow_10(exp - 1 - i) * x); 295 - scale1 = scale1 % pow_10(5 - i); 295 + scale1 = scale1 % pow_10(8 - i); 296 296 } 297 297 *val0 += res; 298 298 *val1 = scale1 * pow_10(exp); 299 299 } else if (exp < 0) { 300 300 exp = abs(exp); 301 - if (exp > 6) { 301 + if (exp > 9) { 302 302 *val0 = *val1 = 0; 303 303 return; 304 304 } 305 305 *val0 = scale0 / pow_10(exp); 306 306 rem = scale0 % pow_10(exp); 307 307 res = 0; 308 - for (i = 0; i < (6 - exp); ++i) { 309 - x = scale1 / pow_10(5 - i); 310 - res += (pow_10(5 - exp - i) * x); 311 - scale1 = scale1 % pow_10(5 - i); 308 + for (i = 0; i < (9 - exp); ++i) { 309 + x = scale1 / pow_10(8 - i); 310 + res += (pow_10(8 - exp - i) * x); 311 + scale1 = scale1 % pow_10(8 - i); 312 312 } 313 - *val1 = rem * pow_10(6 - exp) + res; 313 + *val1 = rem * pow_10(9 - exp) + res; 314 314 } else { 315 315 *val0 = scale0; 316 316 *val1 = scale1; ··· 332 332 unit_conversion[i].unit == attr_info->units) { 333 333 exp = hid_sensor_convert_exponent( 334 334 attr_info->unit_expo); 335 - adjust_exponent_micro(val0, val1, 335 + adjust_exponent_nano(val0, val1, 336 336 unit_conversion[i].scale_val0, 337 337 unit_conversion[i].scale_val1, exp); 338 338 break; 339 339 } 340 340 } 341 341 342 - return IIO_VAL_INT_PLUS_MICRO; 342 + return IIO_VAL_INT_PLUS_NANO; 343 343 } 344 344 EXPORT_SYMBOL(hid_sensor_format_scale); 345 345
+5 -3
drivers/iio/common/st_sensors/st_sensors_core.c
··· 612 612 ssize_t st_sensors_sysfs_scale_avail(struct device *dev, 613 613 struct device_attribute *attr, char *buf) 614 614 { 615 - int i, len = 0; 615 + int i, len = 0, q, r; 616 616 struct iio_dev *indio_dev = dev_get_drvdata(dev); 617 617 struct st_sensor_data *sdata = iio_priv(indio_dev); 618 618 ··· 621 621 if (sdata->sensor_settings->fs.fs_avl[i].num == 0) 622 622 break; 623 623 624 - len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", 625 - sdata->sensor_settings->fs.fs_avl[i].gain); 624 + q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000; 625 + r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000; 626 + 627 + len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); 626 628 } 627 629 mutex_unlock(&indio_dev->mlock); 628 630 buf[len - 1] = '\n';
+1
drivers/iio/orientation/hid-sensor-rotation.c
··· 335 335 .id_table = hid_dev_rot_ids, 336 336 .driver = { 337 337 .name = KBUILD_MODNAME, 338 + .pm = &hid_sensor_pm_ops, 338 339 }, 339 340 .probe = hid_dev_rot_probe, 340 341 .remove = hid_dev_rot_remove,
+2
drivers/iio/temperature/maxim_thermocouple.c
··· 136 136 ret = spi_read(data->spi, (void *)&buf32, storage_bytes); 137 137 *val = be32_to_cpu(buf32); 138 138 break; 139 + default: 140 + ret = -EINVAL; 139 141 } 140 142 141 143 if (ret)
+9 -2
drivers/infiniband/core/addr.c
··· 699 699 struct resolve_cb_context { 700 700 struct rdma_dev_addr *addr; 701 701 struct completion comp; 702 + int status; 702 703 }; 703 704 704 705 static void resolve_cb(int status, struct sockaddr *src_addr, 705 706 struct rdma_dev_addr *addr, void *context) 706 707 { 707 - memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct 708 - rdma_dev_addr)); 708 + if (!status) 709 + memcpy(((struct resolve_cb_context *)context)->addr, 710 + addr, sizeof(struct rdma_dev_addr)); 711 + ((struct resolve_cb_context *)context)->status = status; 709 712 complete(&((struct resolve_cb_context *)context)->comp); 710 713 } 711 714 ··· 745 742 return ret; 746 743 747 744 wait_for_completion(&ctx.comp); 745 + 746 + ret = ctx.status; 747 + if (ret) 748 + return ret; 748 749 749 750 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); 750 751 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
+110 -16
drivers/infiniband/core/cm.c
··· 80 80 __be32 random_id_operand; 81 81 struct list_head timewait_list; 82 82 struct workqueue_struct *wq; 83 + /* Sync on cm change port state */ 84 + spinlock_t state_lock; 83 85 } cm; 84 86 85 87 /* Counter indexes ordered by attribute ID */ ··· 163 161 struct ib_mad_agent *mad_agent; 164 162 struct kobject port_obj; 165 163 u8 port_num; 164 + struct list_head cm_priv_prim_list; 165 + struct list_head cm_priv_altr_list; 166 166 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 167 167 }; 168 168 ··· 245 241 u8 service_timeout; 246 242 u8 target_ack_delay; 247 243 244 + struct list_head prim_list; 245 + struct list_head altr_list; 246 + /* Indicates that the send port mad is registered and av is set */ 247 + int prim_send_port_not_ready; 248 + int altr_send_port_not_ready; 249 + 248 250 struct list_head work_list; 249 251 atomic_t work_count; 250 252 }; ··· 269 259 struct ib_mad_agent *mad_agent; 270 260 struct ib_mad_send_buf *m; 271 261 struct ib_ah *ah; 262 + struct cm_av *av; 263 + unsigned long flags, flags2; 264 + int ret = 0; 272 265 266 + /* don't let the port to be released till the agent is down */ 267 + spin_lock_irqsave(&cm.state_lock, flags2); 268 + spin_lock_irqsave(&cm.lock, flags); 269 + if (!cm_id_priv->prim_send_port_not_ready) 270 + av = &cm_id_priv->av; 271 + else if (!cm_id_priv->altr_send_port_not_ready && 272 + (cm_id_priv->alt_av.port)) 273 + av = &cm_id_priv->alt_av; 274 + else { 275 + pr_info("%s: not valid CM id\n", __func__); 276 + ret = -ENODEV; 277 + spin_unlock_irqrestore(&cm.lock, flags); 278 + goto out; 279 + } 280 + spin_unlock_irqrestore(&cm.lock, flags); 281 + /* Make sure the port haven't released the mad yet */ 273 282 mad_agent = cm_id_priv->av.port->mad_agent; 274 - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 275 - if (IS_ERR(ah)) 276 - return PTR_ERR(ah); 283 + if (!mad_agent) { 284 + pr_info("%s: not a valid MAD agent\n", __func__); 285 + ret = -ENODEV; 286 + goto out; 287 + } 288 + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); 289 + if (IS_ERR(ah)) { 290 + ret = PTR_ERR(ah); 291 + goto out; 292 + } 277 293 278 294 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 279 - cm_id_priv->av.pkey_index, 295 + av->pkey_index, 280 296 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 281 297 GFP_ATOMIC, 282 298 IB_MGMT_BASE_VERSION); 283 299 if (IS_ERR(m)) { 284 300 ib_destroy_ah(ah); 285 - return PTR_ERR(m); 301 + ret = PTR_ERR(m); 302 + goto out; 286 303 } 287 304 288 305 /* Timeout set by caller if response is expected. */ ··· 319 282 atomic_inc(&cm_id_priv->refcount); 320 283 m->context[0] = cm_id_priv; 321 284 *msg = m; 322 - return 0; 285 + 286 + out: 287 + spin_unlock_irqrestore(&cm.state_lock, flags2); 288 + return ret; 323 289 } 324 290 325 291 static int cm_alloc_response_msg(struct cm_port *port, ··· 392 352 grh, &av->ah_attr); 393 353 } 394 354 395 - static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 355 + static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, 356 + struct cm_id_private *cm_id_priv) 396 357 { 397 358 struct cm_device *cm_dev; 398 359 struct cm_port *port = NULL; ··· 428 387 &av->ah_attr); 429 388 av->timeout = path->packet_life_time + 1; 430 389 431 - return 0; 390 + spin_lock_irqsave(&cm.lock, flags); 391 + if (&cm_id_priv->av == av) 392 + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); 393 + else if (&cm_id_priv->alt_av == av) 394 + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); 395 + else 396 + ret = -EINVAL; 397 + 398 + spin_unlock_irqrestore(&cm.lock, flags); 399 + 400 + return ret; 432 401 } 433 402 434 403 static int cm_alloc_id(struct cm_id_private *cm_id_priv) ··· 728 677 spin_lock_init(&cm_id_priv->lock); 729 678 init_completion(&cm_id_priv->comp); 730 679 INIT_LIST_HEAD(&cm_id_priv->work_list); 680 + INIT_LIST_HEAD(&cm_id_priv->prim_list); 681 + INIT_LIST_HEAD(&cm_id_priv->altr_list); 731 682 atomic_set(&cm_id_priv->work_count, -1); 732 683 atomic_set(&cm_id_priv->refcount, 1); 733 684 return &cm_id_priv->id; ··· 944 891 spin_unlock_irq(&cm_id_priv->lock); 945 892 break; 946 893 } 894 + 895 + spin_lock_irq(&cm.lock); 896 + if (!list_empty(&cm_id_priv->altr_list) && 897 + (!cm_id_priv->altr_send_port_not_ready)) 898 + list_del(&cm_id_priv->altr_list); 899 + if (!list_empty(&cm_id_priv->prim_list) && 900 + (!cm_id_priv->prim_send_port_not_ready)) 901 + list_del(&cm_id_priv->prim_list); 902 + spin_unlock_irq(&cm.lock); 947 903 948 904 cm_free_id(cm_id->local_id); 949 905 cm_deref_id(cm_id_priv); ··· 1254 1192 goto out; 1255 1193 } 1256 1194 1257 - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 1195 + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, 1196 + cm_id_priv); 1258 1197 if (ret) 1259 1198 goto error1; 1260 1199 if (param->alternate_path) { 1261 1200 ret = cm_init_av_by_path(param->alternate_path, 1262 - &cm_id_priv->alt_av); 1201 + &cm_id_priv->alt_av, cm_id_priv); 1263 1202 if (ret) 1264 1203 goto error1; 1265 1204 } ··· 1716 1653 dev_put(gid_attr.ndev); 1717 1654 } 1718 1655 work->path[0].gid_type = gid_attr.gid_type; 1719 - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1656 + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, 1657 + cm_id_priv); 1720 1658 } 1721 1659 if (ret) { 1722 1660 int err = ib_get_cached_gid(work->port->cm_dev->ib_device, ··· 1736 1672 goto rejected; 1737 1673 } 1738 1674 if (req_msg->alt_local_lid) { 1739 - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1675 + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, 1676 + cm_id_priv); 1740 1677 if (ret) { 1741 1678 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1742 1679 &work->path[0].sgid, ··· 2792 2727 goto out; 2793 2728 } 2794 2729 2795 - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2730 + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, 2731 + cm_id_priv); 2796 2732 if (ret) 2797 2733 goto out; 2798 2734 cm_id_priv->alt_av.timeout = ··· 2905 2839 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2906 2840 work->mad_recv_wc->recv_buf.grh, 2907 2841 &cm_id_priv->av); 2908 - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2842 + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, 2843 + cm_id_priv); 2909 2844 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2910 2845 if (!ret) 2911 2846 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 3098 3031 return -EINVAL; 3099 3032 3100 3033 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3101 - ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 3034 + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); 3102 3035 if (ret) 3103 3036 goto out; 3104 3037 ··· 3535 3468 static int cm_migrate(struct ib_cm_id *cm_id) 3536 3469 { 3537 3470 struct cm_id_private *cm_id_priv; 3471 + struct cm_av tmp_av; 3538 3472 unsigned long flags; 3473 + int tmp_send_port_not_ready; 3539 3474 int ret = 0; 3540 3475 3541 3476 cm_id_priv = container_of(cm_id, struct cm_id_private, id); ··· 3546 3477 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3547 3478 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3548 3479 cm_id->lap_state = IB_CM_LAP_IDLE; 3480 + /* Swap address vector */ 3481 + tmp_av = cm_id_priv->av; 3549 3482 cm_id_priv->av = cm_id_priv->alt_av; 3483 + cm_id_priv->alt_av = tmp_av; 3484 + /* Swap port send ready state */ 3485 + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; 3486 + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; 3487 + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; 3550 3488 } else 3551 3489 ret = -EINVAL; 3552 3490 spin_unlock_irqrestore(&cm_id_priv->lock, flags); ··· 3964 3888 port->cm_dev = cm_dev; 3965 3889 port->port_num = i; 3966 3890 3891 + INIT_LIST_HEAD(&port->cm_priv_prim_list); 3892 + INIT_LIST_HEAD(&port->cm_priv_altr_list); 3893 + 3967 3894 ret = cm_create_port_fs(port); 3968 3895 if (ret) 3969 3896 goto error1; ··· 4024 3945 { 4025 3946 struct cm_device *cm_dev = client_data; 4026 3947 struct cm_port *port; 3948 + struct cm_id_private *cm_id_priv; 3949 + struct ib_mad_agent *cur_mad_agent; 4027 3950 struct ib_port_modify port_modify = { 4028 3951 .clr_port_cap_mask = IB_PORT_CM_SUP 4029 3952 }; ··· 4049 3968 4050 3969 port = cm_dev->port[i-1]; 4051 3970 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3971 + /* Mark all the cm_id's as not valid */ 3972 + spin_lock_irq(&cm.lock); 3973 + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) 3974 + cm_id_priv->altr_send_port_not_ready = 1; 3975 + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) 3976 + cm_id_priv->prim_send_port_not_ready = 1; 3977 + spin_unlock_irq(&cm.lock); 4052 3978 /* 4053 3979 * We flush the queue here after the going_down set, this 4054 3980 * verify that no new works will be queued in the recv handler, 4055 3981 * after that we can call the unregister_mad_agent 4056 3982 */ 4057 3983 flush_workqueue(cm.wq); 4058 - ib_unregister_mad_agent(port->mad_agent); 3984 + spin_lock_irq(&cm.state_lock); 3985 + cur_mad_agent = port->mad_agent; 3986 + port->mad_agent = NULL; 3987 + spin_unlock_irq(&cm.state_lock); 3988 + ib_unregister_mad_agent(cur_mad_agent); 4059 3989 cm_remove_port_fs(port); 4060 3990 } 3991 + 4061 3992 device_unregister(cm_dev->device); 4062 3993 kfree(cm_dev); 4063 3994 } ··· 4082 3989 INIT_LIST_HEAD(&cm.device_list); 4083 3990 rwlock_init(&cm.device_lock); 4084 3991 spin_lock_init(&cm.lock); 3992 + spin_lock_init(&cm.state_lock); 4085 3993 cm.listen_service_table = RB_ROOT; 4086 3994 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4087 3995 cm.remote_id_table = RB_ROOT;
+48 -27
drivers/infiniband/core/cma.c
··· 1094 1094 } 1095 1095 } 1096 1096 1097 - static void cma_save_ip4_info(struct sockaddr *src_addr, 1098 - struct sockaddr *dst_addr, 1097 + static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1098 + struct sockaddr_in *dst_addr, 1099 1099 struct cma_hdr *hdr, 1100 1100 __be16 local_port) 1101 1101 { 1102 - struct sockaddr_in *ip4; 1103 - 1104 1102 if (src_addr) { 1105 - ip4 = (struct sockaddr_in *)src_addr; 1106 - ip4->sin_family = AF_INET; 1107 - ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 1108 - ip4->sin_port = local_port; 1103 + *src_addr = (struct sockaddr_in) { 1104 + .sin_family = AF_INET, 1105 + .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1106 + .sin_port = local_port, 1107 + }; 1109 1108 } 1110 1109 1111 1110 if (dst_addr) { 1112 - ip4 = (struct sockaddr_in *)dst_addr; 1113 - ip4->sin_family = AF_INET; 1114 - ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 1115 - ip4->sin_port = hdr->port; 1111 + *dst_addr = (struct sockaddr_in) { 1112 + .sin_family = AF_INET, 1113 + .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1114 + .sin_port = hdr->port, 1115 + }; 1116 1116 } 1117 1117 } 1118 1118 1119 - static void cma_save_ip6_info(struct sockaddr *src_addr, 1120 - struct sockaddr *dst_addr, 1119 + static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1120 + struct sockaddr_in6 *dst_addr, 1121 1121 struct cma_hdr *hdr, 1122 1122 __be16 local_port) 1123 1123 { 1124 - struct sockaddr_in6 *ip6; 1125 - 1126 1124 if (src_addr) { 1127 - ip6 = (struct sockaddr_in6 *)src_addr; 1128 - ip6->sin6_family = AF_INET6; 1129 - ip6->sin6_addr = hdr->dst_addr.ip6; 1130 - ip6->sin6_port = local_port; 1125 + *src_addr = (struct sockaddr_in6) { 1126 + .sin6_family = AF_INET6, 1127 + .sin6_addr = hdr->dst_addr.ip6, 1128 + .sin6_port = local_port, 1129 + }; 1131 1130 } 1132 1131 1133 1132 if (dst_addr) { 1134 - ip6 = (struct sockaddr_in6 *)dst_addr; 1135 - ip6->sin6_family = AF_INET6; 1136 - ip6->sin6_addr = hdr->src_addr.ip6; 1137 - ip6->sin6_port = hdr->port; 1133 + *dst_addr = (struct sockaddr_in6) { 1134 + .sin6_family = AF_INET6, 1135 + .sin6_addr = hdr->src_addr.ip6, 1136 + .sin6_port = hdr->port, 1137 + }; 1138 1138 } 1139 1139 } 1140 1140 ··· 1159 1159 1160 1160 switch (cma_get_ip_ver(hdr)) { 1161 1161 case 4: 1162 - cma_save_ip4_info(src_addr, dst_addr, hdr, port); 1162 + cma_save_ip4_info((struct sockaddr_in *)src_addr, 1163 + (struct sockaddr_in *)dst_addr, hdr, port); 1163 1164 break; 1164 1165 case 6: 1165 - cma_save_ip6_info(src_addr, dst_addr, hdr, port); 1166 + cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1167 + (struct sockaddr_in6 *)dst_addr, hdr, port); 1166 1168 break; 1167 1169 default: 1168 1170 return -EAFNOSUPPORT; ··· 2438 2436 return 0; 2439 2437 } 2440 2438 2439 + static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2440 + unsigned long supported_gids, 2441 + enum ib_gid_type default_gid) 2442 + { 2443 + if ((network_type == RDMA_NETWORK_IPV4 || 2444 + network_type == RDMA_NETWORK_IPV6) && 2445 + test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2446 + return IB_GID_TYPE_ROCE_UDP_ENCAP; 2447 + 2448 + return default_gid; 2449 + } 2450 + 2441 2451 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2442 2452 { 2443 2453 struct rdma_route *route = &id_priv->id.route; ··· 2475 2461 route->num_paths = 1; 2476 2462 2477 2463 if (addr->dev_addr.bound_dev_if) { 2464 + unsigned long supported_gids; 2465 + 2478 2466 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2479 2467 if (!ndev) { 2480 2468 ret = -ENODEV; ··· 2500 2484 2501 2485 route->path_rec->net = &init_net; 2502 2486 route->path_rec->ifindex = ndev->ifindex; 2503 - route->path_rec->gid_type = id_priv->gid_type; 2487 + supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2488 + id_priv->id.port_num); 2489 + route->path_rec->gid_type = 2490 + cma_route_gid_type(addr->dev_addr.network, 2491 + supported_gids, 2492 + id_priv->gid_type); 2504 2493 } 2505 2494 if (!ndev) { 2506 2495 ret = -ENODEV;
+1 -1
drivers/infiniband/core/umem.c
··· 175 175 176 176 cur_base = addr & PAGE_MASK; 177 177 178 - if (npages == 0) { 178 + if (npages == 0 || npages > UINT_MAX) { 179 179 ret = -EINVAL; 180 180 goto out; 181 181 }
+2 -5
drivers/infiniband/core/uverbs_main.c
··· 262 262 container_of(uobj, struct ib_uqp_object, uevent.uobject); 263 263 264 264 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 265 - if (qp != qp->real_qp) { 266 - ib_close_qp(qp); 267 - } else { 265 + if (qp == qp->real_qp) 268 266 ib_uverbs_detach_umcast(qp, uqp); 269 - ib_destroy_qp(qp); 270 - } 267 + ib_destroy_qp(qp); 271 268 ib_uverbs_release_uevent(file, &uqp->uevent); 272 269 kfree(uqp); 273 270 }
+3 -14
drivers/infiniband/hw/cxgb4/cq.c
··· 666 666 return ret; 667 667 } 668 668 669 - static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 670 - { 671 - struct c4iw_mr *mhp; 672 - unsigned long flags; 673 - 674 - spin_lock_irqsave(&rhp->lock, flags); 675 - mhp = get_mhp(rhp, rkey >> 8); 676 - if (mhp) 677 - mhp->attr.state = 0; 678 - spin_unlock_irqrestore(&rhp->lock, flags); 679 - } 680 - 681 669 /* 682 670 * Get one cq entry from c4iw and map it to openib. 683 671 * ··· 721 733 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { 722 734 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); 723 735 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 724 - invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); 736 + c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); 725 737 } 726 738 } else { 727 739 switch (CQE_OPCODE(&cqe)) { ··· 750 762 751 763 /* Invalidate the MR if the fastreg failed */ 752 764 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) 753 - invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); 765 + c4iw_invalidate_mr(qhp->rhp, 766 + CQE_WRID_FR_STAG(&cqe)); 754 767 break; 755 768 default: 756 769 printk(KERN_ERR MOD "Unexpected opcode %d "
+1 -1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 999 999 extern int use_dsgl; 1000 1000 void c4iw_drain_rq(struct ib_qp *qp); 1001 1001 void c4iw_drain_sq(struct ib_qp *qp); 1002 - 1002 + void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1003 1003 1004 1004 #endif
+12
drivers/infiniband/hw/cxgb4/mem.c
··· 770 770 kfree(mhp); 771 771 return 0; 772 772 } 773 + 774 + void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 775 + { 776 + struct c4iw_mr *mhp; 777 + unsigned long flags; 778 + 779 + spin_lock_irqsave(&rhp->lock, flags); 780 + mhp = get_mhp(rhp, rkey >> 8); 781 + if (mhp) 782 + mhp->attr.state = 0; 783 + spin_unlock_irqrestore(&rhp->lock, flags); 784 + }
+12 -8
drivers/infiniband/hw/cxgb4/qp.c
··· 706 706 return 0; 707 707 } 708 708 709 - static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, 710 - struct ib_send_wr *wr, u8 *len16) 709 + static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) 711 710 { 712 - struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8); 713 - 714 - mhp->attr.state = 0; 715 711 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 716 712 wqe->inv.r2 = 0; 717 713 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); ··· 793 797 spin_lock_irqsave(&qhp->lock, flag); 794 798 if (t4_wq_in_error(&qhp->wq)) { 795 799 spin_unlock_irqrestore(&qhp->lock, flag); 800 + *bad_wr = wr; 796 801 return -EINVAL; 797 802 } 798 803 num_wrs = t4_sq_avail(&qhp->wq); 799 804 if (num_wrs == 0) { 800 805 spin_unlock_irqrestore(&qhp->lock, flag); 806 + *bad_wr = wr; 801 807 return -ENOMEM; 802 808 } 803 809 while (wr) { ··· 838 840 case IB_WR_RDMA_READ_WITH_INV: 839 841 fw_opcode = FW_RI_RDMA_READ_WR; 840 842 swsqe->opcode = FW_RI_READ_REQ; 841 - if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 843 + if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { 844 + c4iw_invalidate_mr(qhp->rhp, 845 + wr->sg_list[0].lkey); 842 846 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 843 - else 847 + } else { 844 848 fw_flags = 0; 849 + } 845 850 err = build_rdma_read(wqe, wr, &len16); 846 851 if (err) 847 852 break; ··· 877 876 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 878 877 fw_opcode = FW_RI_INV_LSTAG_WR; 879 878 swsqe->opcode = FW_RI_LOCAL_INV; 880 - err = build_inv_stag(qhp->rhp, wqe, wr, &len16); 879 + err = build_inv_stag(wqe, wr, &len16); 880 + c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); 881 881 break; 882 882 default: 883 883 PDBG("%s post of type=%d TBD!\n", __func__, ··· 936 934 spin_lock_irqsave(&qhp->lock, flag); 937 935 if (t4_wq_in_error(&qhp->wq)) { 938 936 spin_unlock_irqrestore(&qhp->lock, flag); 937 + *bad_wr = wr; 939 938 return -EINVAL; 940 939 } 941 940 num_wrs = t4_rq_avail(&qhp->wq); 942 941 if (num_wrs == 0) { 943 942 spin_unlock_irqrestore(&qhp->lock, flag); 943 + *bad_wr = wr; 944 944 return -ENOMEM; 945 945 } 946 946 while (wr) {
-72
drivers/infiniband/hw/hfi1/affinity.c
··· 775 775 } 776 776 mutex_unlock(&affinity->lock); 777 777 } 778 - 779 - int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, 780 - size_t count) 781 - { 782 - struct hfi1_affinity_node *entry; 783 - cpumask_var_t mask; 784 - int ret, i; 785 - 786 - mutex_lock(&node_affinity.lock); 787 - entry = node_affinity_lookup(dd->node); 788 - 789 - if (!entry) { 790 - ret = -EINVAL; 791 - goto unlock; 792 - } 793 - 794 - ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 795 - if (!ret) { 796 - ret = -ENOMEM; 797 - goto unlock; 798 - } 799 - 800 - ret = cpulist_parse(buf, mask); 801 - if (ret) 802 - goto out; 803 - 804 - if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { 805 - dd_dev_warn(dd, "Invalid CPU mask\n"); 806 - ret = -EINVAL; 807 - goto out; 808 - } 809 - 810 - /* reset the SDMA interrupt affinity details */ 811 - init_cpu_mask_set(&entry->def_intr); 812 - cpumask_copy(&entry->def_intr.mask, mask); 813 - 814 - /* Reassign the affinity for each SDMA interrupt. */ 815 - for (i = 0; i < dd->num_msix_entries; i++) { 816 - struct hfi1_msix_entry *msix; 817 - 818 - msix = &dd->msix_entries[i]; 819 - if (msix->type != IRQ_SDMA) 820 - continue; 821 - 822 - ret = get_irq_affinity(dd, msix); 823 - 824 - if (ret) 825 - break; 826 - } 827 - out: 828 - free_cpumask_var(mask); 829 - unlock: 830 - mutex_unlock(&node_affinity.lock); 831 - return ret ? ret : strnlen(buf, PAGE_SIZE); 832 - } 833 - 834 - int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf) 835 - { 836 - struct hfi1_affinity_node *entry; 837 - 838 - mutex_lock(&node_affinity.lock); 839 - entry = node_affinity_lookup(dd->node); 840 - 841 - if (!entry) { 842 - mutex_unlock(&node_affinity.lock); 843 - return -EINVAL; 844 - } 845 - 846 - cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask); 847 - mutex_unlock(&node_affinity.lock); 848 - return strnlen(buf, PAGE_SIZE); 849 - }
-4
drivers/infiniband/hw/hfi1/affinity.h
··· 102 102 /* Release a CPU used by a user process. */ 103 103 void hfi1_put_proc_affinity(int); 104 104 105 - int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf); 106 - int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, 107 - size_t count); 108 - 109 105 struct hfi1_affinity_node { 110 106 int node; 111 107 struct cpu_mask_set def_intr;
+9 -18
drivers/infiniband/hw/hfi1/chip.c
··· 6301 6301 /* leave shared count at zero for both global and VL15 */ 6302 6302 write_global_credit(dd, vau, vl15buf, 0); 6303 6303 6304 - /* We may need some credits for another VL when sending packets 6305 - * with the snoop interface. Dividing it down the middle for VL15 6306 - * and VL0 should suffice. 6307 - */ 6308 - if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) { 6309 - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1) 6310 - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6311 - write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1) 6312 - << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT); 6313 - } else { 6314 - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6315 - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6316 - } 6304 + write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6305 + << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6317 6306 } 6318 6307 6319 6308 /* ··· 9904 9915 u32 mask = ~((1U << ppd->lmc) - 1); 9905 9916 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 9906 9917 9907 - if (dd->hfi1_snoop.mode_flag) 9908 - dd_dev_info(dd, "Set lid/lmc while snooping"); 9909 - 9910 9918 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 9911 9919 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 9912 9920 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) ··· 12098 12112 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12099 12113 } 12100 12114 12101 - #define C_MAX_NAME 13 /* 12 chars + one for /0 */ 12115 + #define C_MAX_NAME 16 /* 15 chars + one for /0 */ 12102 12116 static int init_cntrs(struct hfi1_devdata *dd) 12103 12117 { 12104 12118 int i, rcv_ctxts, j; ··· 14449 14463 * Any error printing is already done by the init code. 14450 14464 * On return, we have the chip mapped. 14451 14465 */ 14452 - ret = hfi1_pcie_ddinit(dd, pdev, ent); 14466 + ret = hfi1_pcie_ddinit(dd, pdev); 14453 14467 if (ret < 0) 14454 14468 goto bail_free; 14455 14469 ··· 14676 14690 ret = init_rcverr(dd); 14677 14691 if (ret) 14678 14692 goto bail_free_cntrs; 14693 + 14694 + init_completion(&dd->user_comp); 14695 + 14696 + /* The user refcount starts with one to inidicate an active device */ 14697 + atomic_set(&dd->user_refcount, 1); 14679 14698 14680 14699 goto bail; 14681 14700
+3
drivers/infiniband/hw/hfi1/chip.h
··· 320 320 /* DC_DC8051_CFG_MODE.GENERAL bits */ 321 321 #define DISABLE_SELF_GUID_CHECK 0x2 322 322 323 + /* Bad L2 frame error code */ 324 + #define BAD_L2_ERR 0x6 325 + 323 326 /* 324 327 * Eager buffer minimum and maximum sizes supported by the hardware. 325 328 * All power-of-two sizes in between are supported as well.
+26 -11
drivers/infiniband/hw/hfi1/driver.c
··· 599 599 dd->rhf_offset; 600 600 struct rvt_qp *qp; 601 601 struct ib_header *hdr; 602 - struct ib_other_headers *ohdr; 603 602 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 604 603 u64 rhf = rhf_to_cpu(rhf_addr); 605 604 u32 etype = rhf_rcv_type(rhf), qpn, bth1; ··· 614 615 if (etype != RHF_RCV_TYPE_IB) 615 616 goto next; 616 617 617 - hdr = hfi1_get_msgheader(dd, rhf_addr); 618 + packet->hdr = hfi1_get_msgheader(dd, rhf_addr); 619 + hdr = packet->hdr; 618 620 619 621 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 620 622 621 - if (lnh == HFI1_LRH_BTH) 622 - ohdr = &hdr->u.oth; 623 - else if (lnh == HFI1_LRH_GRH) 624 - ohdr = &hdr->u.l.oth; 625 - else 623 + if (lnh == HFI1_LRH_BTH) { 624 + packet->ohdr = &hdr->u.oth; 625 + } else if (lnh == HFI1_LRH_GRH) { 626 + packet->ohdr = &hdr->u.l.oth; 627 + packet->rcv_flags |= HFI1_HAS_GRH; 628 + } else { 626 629 goto next; /* just in case */ 630 + } 627 631 628 - bth1 = be32_to_cpu(ohdr->bth[1]); 632 + bth1 = be32_to_cpu(packet->ohdr->bth[1]); 629 633 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); 630 634 631 635 if (!is_ecn) ··· 648 646 649 647 /* turn off BECN, FECN */ 650 648 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); 651 - ohdr->bth[1] = cpu_to_be32(bth1); 649 + packet->ohdr->bth[1] = cpu_to_be32(bth1); 652 650 next: 653 651 update_ps_mdata(&mdata, rcd); 654 652 } ··· 1362 1360 1363 1361 int process_receive_bypass(struct hfi1_packet *packet) 1364 1362 { 1363 + struct hfi1_devdata *dd = packet->rcd->dd; 1364 + 1365 1365 if (unlikely(rhf_err_flags(packet->rhf))) 1366 1366 handle_eflags(packet); 1367 1367 1368 - dd_dev_err(packet->rcd->dd, 1368 + dd_dev_err(dd, 1369 1369 "Bypass packets are not supported in normal operation. Dropping\n"); 1370 - incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); 1370 + incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1371 + if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { 1372 + u64 *flits = packet->ebuf; 1373 + 1374 + if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1375 + dd->err_info_rcvport.packet_flit1 = flits[0]; 1376 + dd->err_info_rcvport.packet_flit2 = 1377 + packet->tlen > sizeof(flits[0]) ? flits[1] : 0; 1378 + } 1379 + dd->err_info_rcvport.status_and_code |= 1380 + (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1381 + } 1371 1382 return RHF_RCV_CONTINUE; 1372 1383 } 1373 1384
+16 -3
drivers/infiniband/hw/hfi1/file_ops.c
··· 172 172 struct hfi1_devdata, 173 173 user_cdev); 174 174 175 + if (!atomic_inc_not_zero(&dd->user_refcount)) 176 + return -ENXIO; 177 + 175 178 /* Just take a ref now. Not all opens result in a context assign */ 176 179 kobject_get(&dd->kobj); 177 180 ··· 186 183 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 187 184 fd->mm = current->mm; 188 185 atomic_inc(&fd->mm->mm_count); 186 + fp->private_data = fd; 187 + } else { 188 + fp->private_data = NULL; 189 + 190 + if (atomic_dec_and_test(&dd->user_refcount)) 191 + complete(&dd->user_comp); 192 + 193 + return -ENOMEM; 189 194 } 190 195 191 - fp->private_data = fd; 192 - 193 - return fd ? 0 : -ENOMEM; 196 + return 0; 194 197 } 195 198 196 199 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, ··· 807 798 done: 808 799 mmdrop(fdata->mm); 809 800 kobject_put(&dd->kobj); 801 + 802 + if (atomic_dec_and_test(&dd->user_refcount)) 803 + complete(&dd->user_comp); 804 + 810 805 kfree(fdata); 811 806 return 0; 812 807 }
+34 -55
drivers/infiniband/hw/hfi1/hfi.h
··· 367 367 u8 etype; 368 368 }; 369 369 370 - /* 371 - * Private data for snoop/capture support. 372 - */ 373 - struct hfi1_snoop_data { 374 - int mode_flag; 375 - struct cdev cdev; 376 - struct device *class_dev; 377 - /* protect snoop data */ 378 - spinlock_t snoop_lock; 379 - struct list_head queue; 380 - wait_queue_head_t waitq; 381 - void *filter_value; 382 - int (*filter_callback)(void *hdr, void *data, void *value); 383 - u64 dcc_cfg; /* saved value of DCC Cfg register */ 384 - }; 385 - 386 - /* snoop mode_flag values */ 387 - #define HFI1_PORT_SNOOP_MODE 1U 388 - #define HFI1_PORT_CAPTURE_MODE 2U 389 - 390 370 struct rvt_sge_state; 391 371 392 372 /* ··· 592 612 /* host link state variables */ 593 613 struct mutex hls_lock; 594 614 u32 host_link_state; 595 - 596 - spinlock_t sdma_alllock ____cacheline_aligned_in_smp; 597 615 598 616 u32 lstate; /* logical link state */ 599 617 ··· 1082 1104 char *portcntrnames; 1083 1105 size_t portcntrnameslen; 1084 1106 1085 - struct hfi1_snoop_data hfi1_snoop; 1086 - 1087 1107 struct err_info_rcvport err_info_rcvport; 1088 1108 struct err_info_constraint err_info_rcv_constraint; 1089 1109 struct err_info_constraint err_info_xmit_constraint; ··· 1117 1141 rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; 1118 1142 1119 1143 /* 1120 - * Handlers for outgoing data so that snoop/capture does not 1121 - * have to have its hooks in the send path 1144 + * Capability to have different send engines simply by changing a 1145 + * pointer value. 1122 1146 */ 1123 1147 send_routine process_pio_send; 1124 1148 send_routine process_dma_send; ··· 1150 1174 spinlock_t aspm_lock; 1151 1175 /* Number of verbs contexts which have disabled ASPM */ 1152 1176 atomic_t aspm_disabled_cnt; 1177 + /* Keeps track of user space clients */ 1178 + atomic_t user_refcount; 1179 + /* Used to wait for outstanding user space clients before dev removal */ 1180 + struct completion user_comp; 1153 1181 1154 1182 struct hfi1_affinity *affinity; 1155 1183 struct rhashtable sdma_rht; ··· 1201 1221 extern u32 hfi1_cpulist_count; 1202 1222 extern unsigned long *hfi1_cpulist; 1203 1223 1204 - extern unsigned int snoop_drop_send; 1205 - extern unsigned int snoop_force_capture; 1206 1224 int hfi1_init(struct hfi1_devdata *, int); 1207 1225 int hfi1_count_units(int *npresentp, int *nupp); 1208 1226 int hfi1_count_active_units(void); ··· 1535 1557 void reset_link_credits(struct hfi1_devdata *dd); 1536 1558 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1537 1559 1538 - int snoop_recv_handler(struct hfi1_packet *packet); 1539 - int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 1540 - u64 pbc); 1541 - int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 1542 - u64 pbc); 1543 - void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, 1544 - u64 pbc, const void *from, size_t count); 1545 1560 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); 1546 1561 1547 1562 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) ··· 1734 1763 1735 1764 int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); 1736 1765 void hfi1_pcie_cleanup(struct pci_dev *); 1737 - int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, 1738 - const struct pci_device_id *); 1766 + int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); 1739 1767 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1740 1768 void hfi1_pcie_flr(struct hfi1_devdata *); 1741 1769 int pcie_speeds(struct hfi1_devdata *); ··· 1769 1799 int kdeth_process_eager(struct hfi1_packet *packet); 1770 1800 int process_receive_invalid(struct hfi1_packet *packet); 1771 1801 1772 - extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; 1773 - 1774 1802 void update_sge(struct rvt_sge_state *ss, u32 length); 1775 1803 1776 1804 /* global module parameter variables */ ··· 1795 1827 #define DRIVER_NAME "hfi1" 1796 1828 #define HFI1_USER_MINOR_BASE 0 1797 1829 #define HFI1_TRACE_MINOR 127 1798 - #define HFI1_DIAGPKT_MINOR 128 1799 - #define HFI1_DIAG_MINOR_BASE 129 1800 - #define HFI1_SNOOP_CAPTURE_BASE 200 1801 1830 #define HFI1_NMINORS 255 1802 1831 1803 1832 #define PCI_VENDOR_ID_INTEL 0x8086 ··· 1813 1848 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, 1814 1849 u16 ctxt_type) 1815 1850 { 1816 - u64 base_sc_integrity = 1851 + u64 base_sc_integrity; 1852 + 1853 + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 1854 + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 1855 + return 0; 1856 + 1857 + base_sc_integrity = 1817 1858 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1818 1859 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1819 1860 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK ··· 1834 1863 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1835 1864 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 1836 1865 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 1837 - | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 1838 1866 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 1839 1867 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1840 1868 ··· 1842 1872 else 1843 1873 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; 1844 1874 1845 - if (is_ax(dd)) 1846 - /* turn off send-side job key checks - A0 */ 1847 - return base_sc_integrity & 1848 - ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1875 + /* turn on send-side job key checks if !A0 */ 1876 + if (!is_ax(dd)) 1877 + base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1878 + 1849 1879 return base_sc_integrity; 1850 1880 } 1851 1881 1852 1882 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) 1853 1883 { 1854 - u64 base_sdma_integrity = 1884 + u64 base_sdma_integrity; 1885 + 1886 + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 1887 + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 1888 + return 0; 1889 + 1890 + base_sdma_integrity = 1855 1891 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1856 - | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1857 1892 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 1858 1893 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 1859 1894 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK ··· 1870 1895 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1871 1896 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 1872 1897 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 1873 - | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 1874 1898 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 1875 1899 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1876 1900 1877 - if (is_ax(dd)) 1878 - /* turn off send-side job key checks - A0 */ 1879 - return base_sdma_integrity & 1880 - ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1901 + if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 1902 + base_sdma_integrity |= 1903 + SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; 1904 + 1905 + /* turn on send-side job key checks if !A0 */ 1906 + if (!is_ax(dd)) 1907 + base_sdma_integrity |= 1908 + SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1909 + 1881 1910 return base_sdma_integrity; 1882 1911 } 1883 1912
+64 -40
drivers/infiniband/hw/hfi1/init.c
··· 144 144 struct hfi1_ctxtdata *rcd; 145 145 146 146 ppd = dd->pport + (i % dd->num_pports); 147 + 148 + /* dd->rcd[i] gets assigned inside the callee */ 147 149 rcd = hfi1_create_ctxtdata(ppd, i, dd->node); 148 150 if (!rcd) { 149 151 dd_dev_err(dd, ··· 171 169 if (!rcd->sc) { 172 170 dd_dev_err(dd, 173 171 "Unable to allocate kernel send context, failing\n"); 174 - dd->rcd[rcd->ctxt] = NULL; 175 - hfi1_free_ctxtdata(dd, rcd); 176 172 goto nomem; 177 173 } 178 174 ··· 178 178 if (ret < 0) { 179 179 dd_dev_err(dd, 180 180 "Failed to setup kernel receive context, failing\n"); 181 - sc_free(rcd->sc); 182 - dd->rcd[rcd->ctxt] = NULL; 183 - hfi1_free_ctxtdata(dd, rcd); 184 181 ret = -EFAULT; 185 182 goto bail; 186 183 } ··· 193 196 nomem: 194 197 ret = -ENOMEM; 195 198 bail: 199 + if (dd->rcd) { 200 + for (i = 0; i < dd->num_rcv_contexts; ++i) 201 + hfi1_free_ctxtdata(dd, dd->rcd[i]); 202 + } 196 203 kfree(dd->rcd); 197 204 dd->rcd = NULL; 198 205 return ret; ··· 217 216 dd->num_rcv_contexts - dd->first_user_ctxt) 218 217 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 219 218 (dd->num_rcv_contexts - dd->first_user_ctxt)); 220 - rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); 219 + rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 221 220 if (rcd) { 222 221 u32 rcvtids, max_entries; 223 222 ··· 262 261 } 263 262 rcd->eager_base = base * dd->rcv_entries.group_size; 264 263 265 - /* Validate and initialize Rcv Hdr Q variables */ 266 - if (rcvhdrcnt % HDRQ_INCREMENT) { 267 - dd_dev_err(dd, 268 - "ctxt%u: header queue count %d must be divisible by %lu\n", 269 - rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); 270 - goto bail; 271 - } 272 264 rcd->rcvhdrq_cnt = rcvhdrcnt; 273 265 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 274 266 /* ··· 500 506 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 501 507 502 508 mutex_init(&ppd->hls_lock); 503 - spin_lock_init(&ppd->sdma_alllock); 504 509 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 505 510 506 511 ppd->qsfp_info.ppd = ppd; ··· 1392 1399 hfi1_free_devdata(dd); 1393 1400 } 1394 1401 1402 + static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1403 + { 1404 + if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1405 + hfi1_early_err(dev, "Receive header queue count too small\n"); 1406 + return -EINVAL; 1407 + } 1408 + 1409 + if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1410 + hfi1_early_err(dev, 1411 + "Receive header queue count cannot be greater than %u\n", 1412 + HFI1_MAX_HDRQ_EGRBUF_CNT); 1413 + return -EINVAL; 1414 + } 1415 + 1416 + if (thecnt % HDRQ_INCREMENT) { 1417 + hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1418 + thecnt, HDRQ_INCREMENT); 1419 + return -EINVAL; 1420 + } 1421 + 1422 + return 0; 1423 + } 1424 + 1395 1425 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1396 1426 { 1397 1427 int ret = 0, j, pidx, initfail; 1398 - struct hfi1_devdata *dd = ERR_PTR(-EINVAL); 1428 + struct hfi1_devdata *dd; 1399 1429 struct hfi1_pportdata *ppd; 1400 1430 1401 1431 /* First, lock the non-writable module parameters */ 1402 1432 HFI1_CAP_LOCK(); 1403 1433 1404 1434 /* Validate some global module parameters */ 1405 - if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1406 - hfi1_early_err(&pdev->dev, "Header queue count too small\n"); 1407 - ret = -EINVAL; 1435 + ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1436 + if (ret) 1408 1437 goto bail; 1409 - } 1410 - if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1411 - hfi1_early_err(&pdev->dev, 1412 - "Receive header queue count cannot be greater than %u\n", 1413 - HFI1_MAX_HDRQ_EGRBUF_CNT); 1414 - ret = -EINVAL; 1415 - goto bail; 1416 - } 1438 + 1417 1439 /* use the encoding function as a sanitization check */ 1418 1440 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1419 1441 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", ··· 1469 1461 if (ret) 1470 1462 goto bail; 1471 1463 1472 - /* 1473 - * Do device-specific initialization, function table setup, dd 1474 - * allocation, etc. 1475 - */ 1476 - switch (ent->device) { 1477 - case PCI_DEVICE_ID_INTEL0: 1478 - case PCI_DEVICE_ID_INTEL1: 1479 - dd = hfi1_init_dd(pdev, ent); 1480 - break; 1481 - default: 1464 + if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1465 + ent->device == PCI_DEVICE_ID_INTEL1)) { 1482 1466 hfi1_early_err(&pdev->dev, 1483 1467 "Failing on unknown Intel deviceid 0x%x\n", 1484 1468 ent->device); 1485 1469 ret = -ENODEV; 1470 + goto clean_bail; 1486 1471 } 1487 1472 1488 - if (IS_ERR(dd)) 1473 + /* 1474 + * Do device-specific initialization, function table setup, dd 1475 + * allocation, etc. 1476 + */ 1477 + dd = hfi1_init_dd(pdev, ent); 1478 + 1479 + if (IS_ERR(dd)) { 1489 1480 ret = PTR_ERR(dd); 1490 - if (ret) 1491 1481 goto clean_bail; /* error already printed */ 1482 + } 1492 1483 1493 1484 ret = create_workqueues(dd); 1494 1485 if (ret) ··· 1545 1538 return ret; 1546 1539 } 1547 1540 1541 + static void wait_for_clients(struct hfi1_devdata *dd) 1542 + { 1543 + /* 1544 + * Remove the device init value and complete the device if there is 1545 + * no clients or wait for active clients to finish. 1546 + */ 1547 + if (atomic_dec_and_test(&dd->user_refcount)) 1548 + complete(&dd->user_comp); 1549 + 1550 + wait_for_completion(&dd->user_comp); 1551 + } 1552 + 1548 1553 static void remove_one(struct pci_dev *pdev) 1549 1554 { 1550 1555 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1551 1556 1552 1557 /* close debugfs files before ib unregister */ 1553 1558 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1559 + 1560 + /* remove the /dev hfi1 interface */ 1561 + hfi1_device_remove(dd); 1562 + 1563 + /* wait for existing user space clients to finish */ 1564 + wait_for_clients(dd); 1565 + 1554 1566 /* unregister from IB core */ 1555 1567 hfi1_unregister_ib_device(dd); 1556 1568 ··· 1583 1557 1584 1558 /* wait until all of our (qsfp) queue_work() calls complete */ 1585 1559 flush_workqueue(ib_wq); 1586 - 1587 - hfi1_device_remove(dd); 1588 1560 1589 1561 postinit_cleanup(dd); 1590 1562 }
+1 -2
drivers/infiniband/hw/hfi1/pcie.c
··· 157 157 * fields required to re-initialize after a chip reset, or for 158 158 * various other purposes 159 159 */ 160 - int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, 161 - const struct pci_device_id *ent) 160 + int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) 162 161 { 163 162 unsigned long len; 164 163 resource_size_t addr;
+3 -10
drivers/infiniband/hw/hfi1/pio.c
··· 668 668 void set_pio_integrity(struct send_context *sc) 669 669 { 670 670 struct hfi1_devdata *dd = sc->dd; 671 - u64 reg = 0; 672 671 u32 hw_context = sc->hw_context; 673 672 int type = sc->type; 674 673 675 - /* 676 - * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if 677 - * we're snooping. 678 - */ 679 - if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && 680 - dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE) 681 - reg = hfi1_pkt_default_send_ctxt_mask(dd, type); 682 - 683 - write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg); 674 + write_kctxt_csr(dd, hw_context, 675 + SC(CHECK_ENABLE), 676 + hfi1_pkt_default_send_ctxt_mask(dd, type)); 684 677 } 685 678 686 679 static u32 get_buffers_allocated(struct send_context *sc)
+1 -1
drivers/infiniband/hw/hfi1/rc.c
··· 89 89 90 90 lockdep_assert_held(&qp->s_lock); 91 91 qp->s_flags |= RVT_S_WAIT_RNR; 92 - qp->s_timer.expires = jiffies + usecs_to_jiffies(to); 92 + priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); 93 93 add_timer(&priv->s_rnr_timer); 94 94 } 95 95
+2 -17
drivers/infiniband/hw/hfi1/sdma.c
··· 2009 2009 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2010 2010 } 2011 2011 2012 - #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 2013 - (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 2014 - 2015 - #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 2016 - (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 2017 2012 /* 2018 2013 * set_sdma_integrity 2019 2014 * ··· 2017 2022 static void set_sdma_integrity(struct sdma_engine *sde) 2018 2023 { 2019 2024 struct hfi1_devdata *dd = sde->dd; 2020 - u64 reg; 2021 2025 2022 - if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY))) 2023 - return; 2024 - 2025 - reg = hfi1_pkt_base_sdma_integrity(dd); 2026 - 2027 - if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 2028 - CLEAR_STATIC_RATE_CONTROL_SMASK(reg); 2029 - else 2030 - SET_STATIC_RATE_CONTROL_SMASK(reg); 2031 - 2032 - write_sde_csr(sde, SD(CHECK_ENABLE), reg); 2026 + write_sde_csr(sde, SD(CHECK_ENABLE), 2027 + hfi1_pkt_base_sdma_integrity(dd)); 2033 2028 } 2034 2029 2035 2030 static void init_sdma_regs(
-25
drivers/infiniband/hw/hfi1/sysfs.c
··· 49 49 #include "hfi.h" 50 50 #include "mad.h" 51 51 #include "trace.h" 52 - #include "affinity.h" 53 52 54 53 /* 55 54 * Start of per-port congestion control structures and support code ··· 622 623 return ret; 623 624 } 624 625 625 - static ssize_t show_sdma_affinity(struct device *device, 626 - struct device_attribute *attr, char *buf) 627 - { 628 - struct hfi1_ibdev *dev = 629 - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); 630 - struct hfi1_devdata *dd = dd_from_dev(dev); 631 - 632 - return hfi1_get_sdma_affinity(dd, buf); 633 - } 634 - 635 - static ssize_t store_sdma_affinity(struct device *device, 636 - struct device_attribute *attr, 637 - const char *buf, size_t count) 638 - { 639 - struct hfi1_ibdev *dev = 640 - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); 641 - struct hfi1_devdata *dd = dd_from_dev(dev); 642 - 643 - return hfi1_set_sdma_affinity(dd, buf, count); 644 - } 645 - 646 626 /* 647 627 * end of per-unit (or driver, in some cases, but replicated 648 628 * per unit) functions ··· 636 658 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 637 659 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 638 660 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 639 - static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity, 640 - store_sdma_affinity); 641 661 642 662 static struct device_attribute *hfi1_attributes[] = { 643 663 &dev_attr_hw_rev, ··· 646 670 &dev_attr_boardversion, 647 671 &dev_attr_tempsense, 648 672 &dev_attr_chip_reset, 649 - &dev_attr_sdma_affinity, 650 673 }; 651 674 652 675 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
-60
drivers/infiniband/hw/hfi1/trace_rx.h
··· 253 253 ) 254 254 ); 255 255 256 - #define SNOOP_PRN \ 257 - "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \ 258 - "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]" 259 - 260 - TRACE_EVENT(snoop_capture, 261 - TP_PROTO(struct hfi1_devdata *dd, 262 - int hdr_len, 263 - struct ib_header *hdr, 264 - int data_len, 265 - void *data), 266 - TP_ARGS(dd, hdr_len, hdr, data_len, data), 267 - TP_STRUCT__entry( 268 - DD_DEV_ENTRY(dd) 269 - __field(u16, slid) 270 - __field(u16, dlid) 271 - __field(u32, qpn) 272 - __field(u8, opcode) 273 - __field(u8, sl) 274 - __field(u16, pkey) 275 - __field(u32, hdr_len) 276 - __field(u32, data_len) 277 - __field(u8, lnh) 278 - __dynamic_array(u8, raw_hdr, hdr_len) 279 - __dynamic_array(u8, raw_pkt, data_len) 280 - ), 281 - TP_fast_assign( 282 - struct ib_other_headers *ohdr; 283 - 284 - __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); 285 - if (__entry->lnh == HFI1_LRH_BTH) 286 - ohdr = &hdr->u.oth; 287 - else 288 - ohdr = &hdr->u.l.oth; 289 - DD_DEV_ASSIGN(dd); 290 - __entry->slid = be16_to_cpu(hdr->lrh[3]); 291 - __entry->dlid = be16_to_cpu(hdr->lrh[1]); 292 - __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 293 - __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; 294 - __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; 295 - __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; 296 - __entry->hdr_len = hdr_len; 297 - __entry->data_len = data_len; 298 - memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); 299 - memcpy(__get_dynamic_array(raw_pkt), data, data_len); 300 - ), 301 - TP_printk( 302 - "[%s] " SNOOP_PRN, 303 - __get_str(dev), 304 - __entry->slid, 305 - __entry->dlid, 306 - __entry->qpn, 307 - __entry->opcode, 308 - show_ib_opcode(__entry->opcode), 309 - __entry->sl, 310 - __entry->pkey, 311 - __entry->hdr_len, 312 - __entry->data_len 313 - ) 314 - ); 315 - 316 256 #endif /* __HFI1_TRACE_RX_H */ 317 257 318 258 #undef TRACE_INCLUDE_PATH
+1 -1
drivers/infiniband/hw/hfi1/user_sdma.c
··· 1144 1144 rb_node = hfi1_mmu_rb_extract(pq->handler, 1145 1145 (unsigned long)iovec->iov.iov_base, 1146 1146 iovec->iov.iov_len); 1147 - if (rb_node && !IS_ERR(rb_node)) 1147 + if (rb_node) 1148 1148 node = container_of(rb_node, struct sdma_mmu_node, rb); 1149 1149 else 1150 1150 rb_node = NULL;
+4 -1
drivers/infiniband/hw/mlx4/ah.c
··· 102 102 if (vlan_tag < 0x1000) 103 103 vlan_tag |= (ah_attr->sl & 7) << 13; 104 104 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 105 - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 105 + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 106 + if (ret < 0) 107 + return ERR_PTR(ret); 108 + ah->av.eth.gid_index = ret; 106 109 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 107 110 ah->av.eth.hop_limit = ah_attr->grh.hop_limit; 108 111 if (ah_attr->static_rate) {
+4 -1
drivers/infiniband/hw/mlx4/cq.c
··· 253 253 if (context) 254 254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 255 255 err = -EFAULT; 256 - goto err_dbmap; 256 + goto err_cq_free; 257 257 } 258 258 259 259 return &cq->ibcq; 260 + 261 + err_cq_free: 262 + mlx4_cq_free(dev->dev, &cq->mcq); 260 263 261 264 err_dbmap: 262 265 if (context)
+1 -2
drivers/infiniband/hw/mlx5/cq.c
··· 932 932 if (err) 933 933 goto err_create; 934 934 } else { 935 - /* for now choose 64 bytes till we have a proper interface */ 936 - cqe_size = 64; 935 + cqe_size = cache_line_size() == 128 ? 128 : 64; 937 936 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 938 937 &index, &inlen); 939 938 if (err)
+7 -4
drivers/infiniband/hw/mlx5/main.c
··· 2311 2311 { 2312 2312 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2313 2313 struct ib_event ibev; 2314 - 2314 + bool fatal = false; 2315 2315 u8 port = 0; 2316 2316 2317 2317 switch (event) { 2318 2318 case MLX5_DEV_EVENT_SYS_ERROR: 2319 - ibdev->ib_active = false; 2320 2319 ibev.event = IB_EVENT_DEVICE_FATAL; 2321 2320 mlx5_ib_handle_internal_error(ibdev); 2321 + fatal = true; 2322 2322 break; 2323 2323 2324 2324 case MLX5_DEV_EVENT_PORT_UP: ··· 2370 2370 2371 2371 if (ibdev->ib_active) 2372 2372 ib_dispatch_event(&ibev); 2373 + 2374 + if (fatal) 2375 + ibdev->ib_active = false; 2373 2376 } 2374 2377 2375 2378 static void get_ext_port_caps(struct mlx5_ib_dev *dev) ··· 3118 3115 } 3119 3116 err = init_node_data(dev); 3120 3117 if (err) 3121 - goto err_dealloc; 3118 + goto err_free_port; 3122 3119 3123 3120 mutex_init(&dev->flow_db.lock); 3124 3121 mutex_init(&dev->cap_mask_mutex); ··· 3128 3125 if (ll == IB_LINK_LAYER_ETHERNET) { 3129 3126 err = mlx5_enable_roce(dev); 3130 3127 if (err) 3131 - goto err_dealloc; 3128 + goto err_free_port; 3132 3129 } 3133 3130 3134 3131 err = create_dev_resources(&dev->devr);
+2
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 626 626 struct mlx5_ib_resources devr; 627 627 struct mlx5_mr_cache cache; 628 628 struct timer_list delay_timer; 629 + /* Prevents soft lock on massive reg MRs */ 630 + struct mutex slow_path_mutex; 629 631 int fill_delay; 630 632 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 631 633 struct ib_odp_caps odp_caps;
+5 -1
drivers/infiniband/hw/mlx5/mr.c
··· 610 610 int err; 611 611 int i; 612 612 613 + mutex_init(&dev->slow_path_mutex); 613 614 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 614 615 if (!cache->wq) { 615 616 mlx5_ib_warn(dev, "failed to create work queue\n"); ··· 1183 1182 goto error; 1184 1183 } 1185 1184 1186 - if (!mr) 1185 + if (!mr) { 1186 + mutex_lock(&dev->slow_path_mutex); 1187 1187 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1188 1188 page_shift, access_flags); 1189 + mutex_unlock(&dev->slow_path_mutex); 1190 + } 1189 1191 1190 1192 if (IS_ERR(mr)) { 1191 1193 err = PTR_ERR(mr);
+10 -2
drivers/infiniband/hw/mlx5/qp.c
··· 2051 2051 2052 2052 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 2053 2053 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 2054 - to_mcq(init_attr->recv_cq)->mcq.cqn, 2055 - to_mcq(init_attr->send_cq)->mcq.cqn); 2054 + init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, 2055 + init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); 2056 2056 2057 2057 qp->trans_qp.xrcdn = xrcdn; 2058 2058 ··· 4813 4813 !ib_is_udata_cleared(udata, 0, 4814 4814 udata->inlen)) 4815 4815 return ERR_PTR(-EOPNOTSUPP); 4816 + 4817 + if (init_attr->log_ind_tbl_size > 4818 + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 4819 + mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 4820 + init_attr->log_ind_tbl_size, 4821 + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 4822 + return ERR_PTR(-EINVAL); 4823 + } 4816 4824 4817 4825 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 4818 4826 if (udata->outlen && udata->outlen < min_resp_len)
-3
drivers/infiniband/sw/rdmavt/dma.c
··· 90 90 if (WARN_ON(!valid_dma_direction(direction))) 91 91 return BAD_DMA_ADDRESS; 92 92 93 - if (offset + size > PAGE_SIZE) 94 - return BAD_DMA_ADDRESS; 95 - 96 93 addr = (u64)page_address(page); 97 94 if (addr) 98 95 addr += offset;
+2 -6
drivers/infiniband/sw/rxe/rxe_net.c
··· 243 243 { 244 244 int err; 245 245 struct socket *sock; 246 - struct udp_port_cfg udp_cfg; 247 - struct udp_tunnel_sock_cfg tnl_cfg; 248 - 249 - memset(&udp_cfg, 0, sizeof(udp_cfg)); 246 + struct udp_port_cfg udp_cfg = {0}; 247 + struct udp_tunnel_sock_cfg tnl_cfg = {0}; 250 248 251 249 if (ipv6) { 252 250 udp_cfg.family = AF_INET6; ··· 262 264 return ERR_PTR(err); 263 265 } 264 266 265 - tnl_cfg.sk_user_data = NULL; 266 267 tnl_cfg.encap_type = 1; 267 268 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 268 - tnl_cfg.encap_destroy = NULL; 269 269 270 270 /* Setup UDP tunnel */ 271 271 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+2
drivers/infiniband/sw/rxe/rxe_qp.c
··· 522 522 if (qp->sq.queue) { 523 523 __rxe_do_task(&qp->comp.task); 524 524 __rxe_do_task(&qp->req.task); 525 + rxe_queue_reset(qp->sq.queue); 525 526 } 526 527 527 528 /* cleanup attributes */ ··· 574 573 { 575 574 qp->req.state = QP_STATE_ERROR; 576 575 qp->resp.state = QP_STATE_ERROR; 576 + qp->attr.qp_state = IB_QPS_ERR; 577 577 578 578 /* drain work and packet queues */ 579 579 rxe_run_task(&qp->resp.task, 1);
+9
drivers/infiniband/sw/rxe/rxe_queue.c
··· 84 84 return -EINVAL; 85 85 } 86 86 87 + inline void rxe_queue_reset(struct rxe_queue *q) 88 + { 89 + /* queue is comprised from header and the memory 90 + * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h 91 + * reset only the queue itself and not the management header 92 + */ 93 + memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); 94 + } 95 + 87 96 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 88 97 int *num_elem, 89 98 unsigned int elem_size)
+2
drivers/infiniband/sw/rxe/rxe_queue.h
··· 84 84 size_t buf_size, 85 85 struct rxe_mmap_info **ip_p); 86 86 87 + void rxe_queue_reset(struct rxe_queue *q); 88 + 87 89 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 88 90 int *num_elem, 89 91 unsigned int elem_size);
+13 -8
drivers/infiniband/sw/rxe/rxe_req.c
··· 696 696 qp->req.wqe_index); 697 697 wqe->state = wqe_state_done; 698 698 wqe->status = IB_WC_SUCCESS; 699 - goto complete; 699 + __rxe_do_task(&qp->comp.task); 700 + return 0; 700 701 } 701 702 payload = mtu; 702 703 } ··· 746 745 wqe->status = IB_WC_LOC_PROT_ERR; 747 746 wqe->state = wqe_state_error; 748 747 749 - complete: 750 - if (qp_type(qp) != IB_QPT_RC) { 751 - while (rxe_completer(qp) == 0) 752 - ; 753 - } 754 - 755 - return 0; 748 + /* 749 + * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS 750 + * ---------8<---------8<------------- 751 + * ...Note that if a completion error occurs, a Work Completion 752 + * will always be generated, even if the signaling 753 + * indicator requests an Unsignaled Completion. 754 + * ---------8<---------8<------------- 755 + */ 756 + wqe->wr.send_flags |= IB_SEND_SIGNALED; 757 + __rxe_do_task(&qp->comp.task); 758 + return -EAGAIN; 756 759 757 760 exit: 758 761 return -EAGAIN;
+7 -6
drivers/mailbox/pcc.c
··· 65 65 #include <linux/mailbox_controller.h> 66 66 #include <linux/mailbox_client.h> 67 67 #include <linux/io-64-nonatomic-lo-hi.h> 68 + #include <acpi/pcc.h> 68 69 69 70 #include "mailbox.h" 70 71 ··· 268 267 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 269 268 chan->txdone_method |= TXDONE_BY_ACK; 270 269 270 + spin_unlock_irqrestore(&chan->lock, flags); 271 + 271 272 if (pcc_doorbell_irq[subspace_id] > 0) { 272 273 int rc; 273 274 ··· 278 275 if (unlikely(rc)) { 279 276 dev_err(dev, "failed to register PCC interrupt %d\n", 280 277 pcc_doorbell_irq[subspace_id]); 278 + pcc_mbox_free_channel(chan); 281 279 chan = ERR_PTR(rc); 282 280 } 283 281 } 284 - 285 - spin_unlock_irqrestore(&chan->lock, flags); 286 282 287 283 return chan; 288 284 } ··· 306 304 return; 307 305 } 308 306 307 + if (pcc_doorbell_irq[id] > 0) 308 + devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); 309 + 309 310 spin_lock_irqsave(&chan->lock, flags); 310 311 chan->cl = NULL; 311 312 chan->active_req = NULL; 312 313 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) 313 314 chan->txdone_method = TXDONE_BY_POLL; 314 315 315 - if (pcc_doorbell_irq[id] > 0) 316 - devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); 317 - 318 316 spin_unlock_irqrestore(&chan->lock, flags); 319 317 } 320 318 EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); 321 - 322 319 323 320 /** 324 321 * pcc_send_data - Called from Mailbox Controller code. Used
+5
drivers/media/dvb-frontends/Kconfig
··· 513 513 depends on DVB_CORE 514 514 default DVB_AS102 515 515 516 + config DVB_GP8PSK_FE 517 + tristate 518 + depends on DVB_CORE 519 + default DVB_USB_GP8PSK 520 + 516 521 comment "DVB-C (cable) frontends" 517 522 depends on DVB_CORE 518 523
+1
drivers/media/dvb-frontends/Makefile
··· 121 121 obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o 122 122 obj-$(CONFIG_DVB_AF9033) += af9033.o 123 123 obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o 124 + obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o 124 125 obj-$(CONFIG_DVB_TC90522) += tc90522.o 125 126 obj-$(CONFIG_DVB_HORUS3A) += horus3a.o 126 127 obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
+82
drivers/media/dvb-frontends/gp8psk-fe.h
··· 1 + /* 2 + * gp8psk_fe driver 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2, or (at your option) 7 + * any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #ifndef GP8PSK_FE_H 16 + #define GP8PSK_FE_H 17 + 18 + #include <linux/types.h> 19 + 20 + /* gp8psk commands */ 21 + 22 + #define GET_8PSK_CONFIG 0x80 /* in */ 23 + #define SET_8PSK_CONFIG 0x81 24 + #define I2C_WRITE 0x83 25 + #define I2C_READ 0x84 26 + #define ARM_TRANSFER 0x85 27 + #define TUNE_8PSK 0x86 28 + #define GET_SIGNAL_STRENGTH 0x87 /* in */ 29 + #define LOAD_BCM4500 0x88 30 + #define BOOT_8PSK 0x89 /* in */ 31 + #define START_INTERSIL 0x8A /* in */ 32 + #define SET_LNB_VOLTAGE 0x8B 33 + #define SET_22KHZ_TONE 0x8C 34 + #define SEND_DISEQC_COMMAND 0x8D 35 + #define SET_DVB_MODE 0x8E 36 + #define SET_DN_SWITCH 0x8F 37 + #define GET_SIGNAL_LOCK 0x90 /* in */ 38 + #define GET_FW_VERS 0x92 39 + #define GET_SERIAL_NUMBER 0x93 /* in */ 40 + #define USE_EXTRA_VOLT 0x94 41 + #define GET_FPGA_VERS 0x95 42 + #define CW3K_INIT 0x9d 43 + 44 + /* PSK_configuration bits */ 45 + #define bm8pskStarted 0x01 46 + #define bm8pskFW_Loaded 0x02 47 + #define bmIntersilOn 0x04 48 + #define bmDVBmode 0x08 49 + #define bm22kHz 0x10 50 + #define bmSEL18V 0x20 51 + #define bmDCtuned 0x40 52 + #define bmArmed 0x80 53 + 54 + /* Satellite modulation modes */ 55 + #define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ 56 + #define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ 57 + #define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ 58 + #define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ 59 + 60 + #define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ 61 + #define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ 62 + #define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ 63 + #define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ 64 + #define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ 65 + #define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ 66 + 67 + /* firmware revision id's */ 68 + #define GP8PSK_FW_REV1 0x020604 69 + #define GP8PSK_FW_REV2 0x020704 70 + #define GP8PSK_FW_VERS(_fw_vers) \ 71 + ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) 72 + 73 + struct gp8psk_fe_ops { 74 + int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); 75 + int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); 76 + int (*reload)(void *priv); 77 + }; 78 + 79 + struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, 80 + void *priv, bool is_rev1); 81 + 82 + #endif
+1 -1
drivers/media/i2c/ir-kbd-i2c.c
··· 118 118 *protocol = RC_TYPE_RC6_MCE; 119 119 dev &= 0x7f; 120 120 dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", 121 - toggle, vendor, dev, code); 121 + *ptoggle, vendor, dev, code); 122 122 } else { 123 123 *ptoggle = 0; 124 124 *protocol = RC_TYPE_RC6_6A_32;
+1 -1
drivers/media/usb/dvb-usb/Makefile
··· 8 8 dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o 9 9 obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o 10 10 11 - dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o 11 + dvb-usb-gp8psk-objs := gp8psk.o 12 12 obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o 13 13 14 14 dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
+10 -23
drivers/media/usb/dvb-usb/af9005.c
··· 53 53 u8 sequence; 54 54 int led_state; 55 55 unsigned char data[256]; 56 - struct mutex data_mutex; 57 56 }; 58 57 59 58 static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, ··· 71 72 return -EINVAL; 72 73 } 73 74 74 - mutex_lock(&st->data_mutex); 75 + mutex_lock(&d->data_mutex); 75 76 st->data[0] = 14; /* rest of buffer length low */ 76 77 st->data[1] = 0; /* rest of buffer length high */ 77 78 ··· 139 140 values[i] = st->data[8 + i]; 140 141 141 142 ret: 142 - mutex_unlock(&st->data_mutex); 143 + mutex_unlock(&d->data_mutex); 143 144 return ret; 144 145 145 146 } ··· 480 481 } 481 482 packet_len = wlen + 5; 482 483 483 - mutex_lock(&st->data_mutex); 484 + mutex_lock(&d->data_mutex); 484 485 485 486 st->data[0] = (u8) (packet_len & 0xff); 486 487 st->data[1] = (u8) ((packet_len & 0xff00) >> 8); ··· 511 512 rbuf[i] = st->data[i + 7]; 512 513 } 513 514 514 - mutex_unlock(&st->data_mutex); 515 + mutex_unlock(&d->data_mutex); 515 516 return ret; 516 517 } 517 518 ··· 522 523 u8 seq; 523 524 int ret, i; 524 525 525 - mutex_lock(&st->data_mutex); 526 + mutex_lock(&d->data_mutex); 526 527 527 528 memset(st->data, 0, sizeof(st->data)); 528 529 ··· 558 559 for (i = 0; i < len; i++) 559 560 values[i] = st->data[6 + i]; 560 561 } 561 - mutex_unlock(&st->data_mutex); 562 + mutex_unlock(&d->data_mutex); 562 563 563 564 return ret; 564 565 } ··· 846 847 return 0; 847 848 } 848 849 849 - mutex_lock(&st->data_mutex); 850 + mutex_lock(&d->data_mutex); 850 851 851 852 /* deb_info("rc_query\n"); */ 852 853 st->data[0] = 3; /* rest of packet length low */ ··· 889 890 } 890 891 891 892 ret: 892 - mutex_unlock(&st->data_mutex); 893 + mutex_unlock(&d->data_mutex); 893 894 return ret; 894 895 } 895 896 ··· 1003 1004 static int af9005_usb_probe(struct usb_interface *intf, 1004 1005 const struct usb_device_id *id) 1005 1006 { 1006 - struct dvb_usb_device *d; 1007 - struct af9005_device_state *st; 1008 - int ret; 1009 - 1010 - ret = dvb_usb_device_init(intf, &af9005_properties, 1011 - THIS_MODULE, &d, adapter_nr); 1012 - 1013 - if (ret < 0) 1014 - return ret; 1015 - 1016 - st = d->priv; 1017 - mutex_init(&st->data_mutex); 1018 - 1019 - return 0; 1007 + return dvb_usb_device_init(intf, &af9005_properties, 1008 + THIS_MODULE, NULL, adapter_nr); 1020 1009 } 1021 1010 1022 1011 enum af9005_usb_table_entry {
+10 -23
drivers/media/usb/dvb-usb/cinergyT2-core.c
··· 42 42 struct cinergyt2_state { 43 43 u8 rc_counter; 44 44 unsigned char data[64]; 45 - struct mutex data_mutex; 46 45 }; 47 46 48 47 /* We are missing a release hook with usb_device data */ ··· 55 56 struct cinergyt2_state *st = d->priv; 56 57 int ret; 57 58 58 - mutex_lock(&st->data_mutex); 59 + mutex_lock(&d->data_mutex); 59 60 st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; 60 61 st->data[1] = enable ? 1 : 0; 61 62 62 63 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0); 63 - mutex_unlock(&st->data_mutex); 64 + mutex_unlock(&d->data_mutex); 64 65 65 66 return ret; 66 67 } ··· 70 71 struct cinergyt2_state *st = d->priv; 71 72 int ret; 72 73 73 - mutex_lock(&st->data_mutex); 74 + mutex_lock(&d->data_mutex); 74 75 st->data[0] = CINERGYT2_EP1_SLEEP_MODE; 75 76 st->data[1] = enable ? 0 : 1; 76 77 77 78 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0); 78 - mutex_unlock(&st->data_mutex); 79 + mutex_unlock(&d->data_mutex); 79 80 80 81 return ret; 81 82 } ··· 88 89 89 90 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); 90 91 91 - mutex_lock(&st->data_mutex); 92 + mutex_lock(&d->data_mutex); 92 93 st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; 93 94 94 95 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0); ··· 96 97 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " 97 98 "state info\n"); 98 99 } 99 - mutex_unlock(&st->data_mutex); 100 + mutex_unlock(&d->data_mutex); 100 101 101 102 /* Copy this pointer as we are gonna need it in the release phase */ 102 103 cinergyt2_usb_device = adap->dev; ··· 165 166 166 167 *state = REMOTE_NO_KEY_PRESSED; 167 168 168 - mutex_lock(&st->data_mutex); 169 + mutex_lock(&d->data_mutex); 169 170 st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS; 170 171 171 172 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); ··· 201 202 } 202 203 203 204 ret: 204 - mutex_unlock(&st->data_mutex); 205 + mutex_unlock(&d->data_mutex); 205 206 return ret; 206 207 } 207 208 208 209 static int cinergyt2_usb_probe(struct usb_interface *intf, 209 210 const struct usb_device_id *id) 210 211 { 211 - struct dvb_usb_device *d; 212 - struct cinergyt2_state *st; 213 - int ret; 214 - 215 - ret = dvb_usb_device_init(intf, &cinergyt2_properties, 216 - THIS_MODULE, &d, adapter_nr); 217 - if (ret < 0) 218 - return ret; 219 - 220 - st = d->priv; 221 - mutex_init(&st->data_mutex); 222 - 223 - return 0; 212 + return dvb_usb_device_init(intf, &cinergyt2_properties, 213 + THIS_MODULE, NULL, adapter_nr); 224 214 } 225 - 226 215 227 216 static struct usb_device_id cinergyt2_usb_table[] = { 228 217 { USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
+16 -23
drivers/media/usb/dvb-usb/cxusb.c
··· 68 68 69 69 wo = (rbuf == NULL || rlen == 0); /* write-only */ 70 70 71 - mutex_lock(&st->data_mutex); 71 + mutex_lock(&d->data_mutex); 72 72 st->data[0] = cmd; 73 73 memcpy(&st->data[1], wbuf, wlen); 74 74 if (wo) ··· 77 77 ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, 78 78 rbuf, rlen, 0); 79 79 80 - mutex_unlock(&st->data_mutex); 80 + mutex_unlock(&d->data_mutex); 81 81 return ret; 82 82 } 83 83 ··· 1461 1461 static int cxusb_probe(struct usb_interface *intf, 1462 1462 const struct usb_device_id *id) 1463 1463 { 1464 - struct dvb_usb_device *d; 1465 - struct cxusb_state *st; 1466 - 1467 1464 if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, 1468 - THIS_MODULE, &d, adapter_nr) || 1465 + THIS_MODULE, NULL, adapter_nr) || 1469 1466 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, 1470 - THIS_MODULE, &d, adapter_nr) || 1467 + THIS_MODULE, NULL, adapter_nr) || 1471 1468 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, 1472 - THIS_MODULE, &d, adapter_nr) || 1469 + THIS_MODULE, NULL, adapter_nr) || 1473 1470 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, 1474 - THIS_MODULE, &d, adapter_nr) || 1471 + THIS_MODULE, NULL, adapter_nr) || 1475 1472 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, 1476 - THIS_MODULE, &d, adapter_nr) || 1473 + THIS_MODULE, NULL, adapter_nr) || 1477 1474 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, 1478 - THIS_MODULE, &d, adapter_nr) || 1475 + THIS_MODULE, NULL, adapter_nr) || 1479 1476 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, 1480 - THIS_MODULE, &d, adapter_nr) || 1477 + THIS_MODULE, NULL, adapter_nr) || 1481 1478 0 == dvb_usb_device_init(intf, 1482 1479 &cxusb_bluebird_nano2_needsfirmware_properties, 1483 - THIS_MODULE, &d, adapter_nr) || 1480 + THIS_MODULE, NULL, adapter_nr) || 1484 1481 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, 1485 - THIS_MODULE, &d, adapter_nr) || 1482 + THIS_MODULE, NULL, adapter_nr) || 1486 1483 0 == dvb_usb_device_init(intf, 1487 1484 &cxusb_bluebird_dualdig4_rev2_properties, 1488 - THIS_MODULE, &d, adapter_nr) || 1485 + THIS_MODULE, NULL, adapter_nr) || 1489 1486 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, 1490 - THIS_MODULE, &d, adapter_nr) || 1487 + THIS_MODULE, NULL, adapter_nr) || 1491 1488 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, 1492 - THIS_MODULE, &d, adapter_nr) || 1489 + THIS_MODULE, NULL, adapter_nr) || 1493 1490 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, 1494 - THIS_MODULE, &d, adapter_nr) || 1495 - 0) { 1496 - st = d->priv; 1497 - mutex_init(&st->data_mutex); 1498 - 1491 + THIS_MODULE, NULL, adapter_nr) || 1492 + 0) 1499 1493 return 0; 1500 - } 1501 1494 1502 1495 return -EINVAL; 1503 1496 }
-1
drivers/media/usb/dvb-usb/cxusb.h
··· 37 37 struct i2c_client *i2c_client_tuner; 38 38 39 39 unsigned char data[MAX_XFER_SIZE]; 40 - struct mutex data_mutex; 41 40 }; 42 41 43 42 #endif
+3 -2
drivers/media/usb/dvb-usb/dib0700_core.c
··· 704 704 struct dvb_usb_device *d = purb->context; 705 705 struct dib0700_rc_response *poll_reply; 706 706 enum rc_type protocol; 707 - u32 uninitialized_var(keycode); 707 + u32 keycode; 708 708 u8 toggle; 709 709 710 710 deb_info("%s()\n", __func__); ··· 745 745 poll_reply->nec.data == 0x00 && 746 746 poll_reply->nec.not_data == 0xff) { 747 747 poll_reply->data_state = 2; 748 - break; 748 + rc_repeat(d->rc_dev); 749 + goto resubmit; 749 750 } 750 751 751 752 if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
+17 -23
drivers/media/usb/dvb-usb/dtt200u.c
··· 22 22 23 23 struct dtt200u_state { 24 24 unsigned char data[80]; 25 - struct mutex data_mutex; 26 25 }; 27 26 28 27 static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) ··· 29 30 struct dtt200u_state *st = d->priv; 30 31 int ret = 0; 31 32 32 - mutex_lock(&st->data_mutex); 33 + mutex_lock(&d->data_mutex); 33 34 34 35 st->data[0] = SET_INIT; 35 36 36 37 if (onoff) 37 38 ret = dvb_usb_generic_write(d, st->data, 2); 38 39 39 - mutex_unlock(&st->data_mutex); 40 + mutex_unlock(&d->data_mutex); 40 41 return ret; 41 42 } 42 43 43 44 static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 44 45 { 45 - struct dtt200u_state *st = adap->dev->priv; 46 + struct dvb_usb_device *d = adap->dev; 47 + struct dtt200u_state *st = d->priv; 46 48 int ret; 47 49 48 - mutex_lock(&st->data_mutex); 50 + mutex_lock(&d->data_mutex); 49 51 st->data[0] = SET_STREAMING; 50 52 st->data[1] = onoff; 51 53 ··· 61 61 ret = dvb_usb_generic_write(adap->dev, st->data, 1); 62 62 63 63 ret: 64 - mutex_unlock(&st->data_mutex); 64 + mutex_unlock(&d->data_mutex); 65 65 66 66 return ret; 67 67 } 68 68 69 69 static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) 70 70 { 71 - struct dtt200u_state *st = adap->dev->priv; 71 + struct dvb_usb_device *d = adap->dev; 72 + struct dtt200u_state *st = d->priv; 72 73 int ret; 73 74 74 75 pid = onoff ? pid : 0; 75 76 76 - mutex_lock(&st->data_mutex); 77 + mutex_lock(&d->data_mutex); 77 78 st->data[0] = SET_PID_FILTER; 78 79 st->data[1] = index; 79 80 st->data[2] = pid & 0xff; 80 81 st->data[3] = (pid >> 8) & 0x1f; 81 82 82 83 ret = dvb_usb_generic_write(adap->dev, st->data, 4); 83 - mutex_unlock(&st->data_mutex); 84 + mutex_unlock(&d->data_mutex); 84 85 85 86 return ret; 86 87 } ··· 92 91 u32 scancode; 93 92 int ret; 94 93 95 - mutex_lock(&st->data_mutex); 94 + mutex_lock(&d->data_mutex); 96 95 st->data[0] = GET_RC_CODE; 97 96 98 97 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); ··· 127 126 deb_info("st->data: %*ph\n", 5, st->data); 128 127 129 128 ret: 130 - mutex_unlock(&st->data_mutex); 129 + mutex_unlock(&d->data_mutex); 131 130 return ret; 132 131 } 133 132 ··· 146 145 static int dtt200u_usb_probe(struct usb_interface *intf, 147 146 const struct usb_device_id *id) 148 147 { 149 - struct dvb_usb_device *d; 150 - struct dtt200u_state *st; 151 - 152 148 if (0 == dvb_usb_device_init(intf, &dtt200u_properties, 153 - THIS_MODULE, &d, adapter_nr) || 149 + THIS_MODULE, NULL, adapter_nr) || 154 150 0 == dvb_usb_device_init(intf, &wt220u_properties, 155 - THIS_MODULE, &d, adapter_nr) || 151 + THIS_MODULE, NULL, adapter_nr) || 156 152 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, 157 - THIS_MODULE, &d, adapter_nr) || 153 + THIS_MODULE, NULL, adapter_nr) || 158 154 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, 159 - THIS_MODULE, &d, adapter_nr) || 155 + THIS_MODULE, NULL, adapter_nr) || 160 156 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, 161 - THIS_MODULE, &d, adapter_nr)) { 162 - st = d->priv; 163 - mutex_init(&st->data_mutex); 164 - 157 + THIS_MODULE, NULL, adapter_nr)) 165 158 return 0; 166 - } 167 159 168 160 return -ENODEV; 169 161 }
+1
drivers/media/usb/dvb-usb/dvb-usb-init.c
··· 142 142 { 143 143 int ret = 0; 144 144 145 + mutex_init(&d->data_mutex); 145 146 mutex_init(&d->usb_mutex); 146 147 mutex_init(&d->i2c_mutex); 147 148
+7 -2
drivers/media/usb/dvb-usb/dvb-usb.h
··· 404 404 * Powered is in/decremented for each call to modify the state. 405 405 * @udev: pointer to the device's struct usb_device. 406 406 * 407 - * @usb_mutex: semaphore of USB control messages (reading needs two messages) 408 - * @i2c_mutex: semaphore for i2c-transfers 407 + * @data_mutex: mutex to protect the data structure used to store URB data 408 + * @usb_mutex: mutex of USB control messages (reading needs two messages). 409 + * Please notice that this mutex is used internally at the generic 410 + * URB control functions. So, drivers using dvb_usb_generic_rw() and 411 + * derivated functions should not lock it internally. 412 + * @i2c_mutex: mutex for i2c-transfers 409 413 * 410 414 * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB 411 415 * ··· 437 433 int powered; 438 434 439 435 /* locking */ 436 + struct mutex data_mutex; 440 437 struct mutex usb_mutex; 441 438 442 439 /* i2c */
+90 -62
drivers/media/usb/dvb-usb/gp8psk-fe.c drivers/media/dvb-frontends/gp8psk-fe.c
··· 1 - /* DVB USB compliant Linux driver for the 2 - * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module 1 + /* 2 + * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module 3 3 * 4 4 * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) 5 5 * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) ··· 8 8 * 9 9 * This module is based off the vp7045 and vp702x modules 10 10 * 11 - * This program is free software; you can redistribute it and/or modify it 12 - * under the terms of the GNU General Public License as published by the Free 13 - * Software Foundation, version 2. 14 - * 15 - * see Documentation/dvb/README.dvb-usb for more information 11 + * This program is free software; you can redistribute it and/or modify it 12 + * under the terms of the GNU General Public License as published by the Free 13 + * Software Foundation, version 2. 16 14 */ 17 - #include "gp8psk.h" 15 + 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 18 + #include "gp8psk-fe.h" 19 + #include "dvb_frontend.h" 20 + 21 + static int debug; 22 + module_param(debug, int, 0644); 23 + MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); 24 + 25 + #define dprintk(fmt, arg...) do { \ 26 + if (debug) \ 27 + printk(KERN_DEBUG pr_fmt("%s: " fmt), \ 28 + __func__, ##arg); \ 29 + } while (0) 18 30 19 31 struct gp8psk_fe_state { 20 32 struct dvb_frontend fe; 21 - struct dvb_usb_device *d; 33 + void *priv; 34 + const struct gp8psk_fe_ops *ops; 35 + bool is_rev1; 22 36 u8 lock; 23 37 u16 snr; 24 38 unsigned long next_status_check; ··· 43 29 { 44 30 struct gp8psk_fe_state *st = fe->demodulator_priv; 45 31 u8 status; 46 - gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1); 32 + 33 + st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1); 47 34 return status & bmDCtuned; 48 35 } 49 36 50 37 static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) 51 38 { 52 - struct gp8psk_fe_state *state = fe->demodulator_priv; 53 - return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0); 39 + struct gp8psk_fe_state *st = fe->demodulator_priv; 40 + 41 + return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0); 54 42 } 55 43 56 44 static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) 57 45 { 58 46 u8 buf[6]; 59 47 if (time_after(jiffies,st->next_status_check)) { 60 - gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1); 61 - gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6); 48 + st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1); 49 + st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6); 62 50 st->snr = (buf[1]) << 8 | buf[0]; 63 51 st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; 64 52 } ··· 132 116 133 117 static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) 134 118 { 135 - struct gp8psk_fe_state *state = fe->demodulator_priv; 119 + struct gp8psk_fe_state *st = fe->demodulator_priv; 136 120 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 137 121 u8 cmd[10]; 138 122 u32 freq = c->frequency * 1000; 139 - int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct); 140 123 141 - deb_fe("%s()\n", __func__); 124 + dprintk("%s()\n", __func__); 142 125 143 126 cmd[4] = freq & 0xff; 144 127 cmd[5] = (freq >> 8) & 0xff; ··· 151 136 switch (c->delivery_system) { 152 137 case SYS_DVBS: 153 138 if (c->modulation != QPSK) { 154 - deb_fe("%s: unsupported modulation selected (%d)\n", 139 + dprintk("%s: unsupported modulation selected (%d)\n", 155 140 __func__, c->modulation); 156 141 return -EOPNOTSUPP; 157 142 } 158 143 c->fec_inner = FEC_AUTO; 159 144 break; 160 145 case SYS_DVBS2: /* kept for backwards compatibility */ 161 - deb_fe("%s: DVB-S2 delivery system selected\n", __func__); 146 + dprintk("%s: DVB-S2 delivery system selected\n", __func__); 162 147 break; 163 148 case SYS_TURBO: 164 - deb_fe("%s: Turbo-FEC delivery system selected\n", __func__); 149 + dprintk("%s: Turbo-FEC delivery system selected\n", __func__); 165 150 break; 166 151 167 152 default: 168 - deb_fe("%s: unsupported delivery system selected (%d)\n", 153 + dprintk("%s: unsupported delivery system selected (%d)\n", 169 154 __func__, c->delivery_system); 170 155 return -EOPNOTSUPP; 171 156 } ··· 176 161 cmd[3] = (c->symbol_rate >> 24) & 0xff; 177 162 switch (c->modulation) { 178 163 case QPSK: 179 - if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) 164 + if (st->is_rev1) 180 165 if (gp8psk_tuned_to_DCII(fe)) 181 - gp8psk_bcm4500_reload(state->d); 166 + st->ops->reload(st->priv); 182 167 switch (c->fec_inner) { 183 168 case FEC_1_2: 184 169 cmd[9] = 0; break; ··· 222 207 cmd[9] = 0; 223 208 break; 224 209 default: /* Unknown modulation */ 225 - deb_fe("%s: unsupported modulation selected (%d)\n", 210 + dprintk("%s: unsupported modulation selected (%d)\n", 226 211 __func__, c->modulation); 227 212 return -EOPNOTSUPP; 228 213 } 229 214 230 - if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) 215 + if (st->is_rev1) 231 216 gp8psk_set_tuner_mode(fe, 0); 232 - gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10); 217 + st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10); 233 218 234 - state->lock = 0; 235 - state->next_status_check = jiffies; 236 - state->status_check_interval = 200; 219 + st->lock = 0; 220 + st->next_status_check = jiffies; 221 + st->status_check_interval = 200; 237 222 238 223 return 0; 239 224 } ··· 243 228 { 244 229 struct gp8psk_fe_state *st = fe->demodulator_priv; 245 230 246 - deb_fe("%s\n",__func__); 231 + dprintk("%s\n", __func__); 247 232 248 - if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0, 233 + if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0, 249 234 m->msg, m->msg_len)) { 250 235 return -EINVAL; 251 236 } ··· 258 243 struct gp8psk_fe_state *st = fe->demodulator_priv; 259 244 u8 cmd; 260 245 261 - deb_fe("%s\n",__func__); 246 + dprintk("%s\n", __func__); 262 247 263 248 /* These commands are certainly wrong */ 264 249 cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; 265 250 266 - if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0, 251 + if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0, 267 252 &cmd, 0)) { 268 253 return -EINVAL; 269 254 } ··· 273 258 static int gp8psk_fe_set_tone(struct dvb_frontend *fe, 274 259 enum fe_sec_tone_mode tone) 275 260 { 276 - struct gp8psk_fe_state* state = fe->demodulator_priv; 261 + struct gp8psk_fe_state *st = fe->demodulator_priv; 277 262 278 - if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE, 279 - (tone == SEC_TONE_ON), 0, NULL, 0)) { 263 + if (st->ops->out(st->priv, SET_22KHZ_TONE, 264 + (tone == SEC_TONE_ON), 0, NULL, 0)) { 280 265 return -EINVAL; 281 266 } 282 267 return 0; ··· 285 270 static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, 286 271 enum fe_sec_voltage voltage) 287 272 { 288 - struct gp8psk_fe_state* state = fe->demodulator_priv; 273 + struct gp8psk_fe_state *st = fe->demodulator_priv; 289 274 290 - if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, 275 + if (st->ops->out(st->priv, SET_LNB_VOLTAGE, 291 276 voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { 292 277 return -EINVAL; 293 278 } ··· 296 281 297 282 static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) 298 283 { 299 - struct gp8psk_fe_state* state = fe->demodulator_priv; 300 - return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0); 284 + struct gp8psk_fe_state *st = fe->demodulator_priv; 285 + 286 + return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0); 301 287 } 302 288 303 289 static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) 304 290 { 305 - struct gp8psk_fe_state* state = fe->demodulator_priv; 291 + struct gp8psk_fe_state *st = fe->demodulator_priv; 306 292 u8 cmd = sw_cmd & 0x7f; 307 293 308 - if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0, 309 - NULL, 0)) { 294 + if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0)) 310 295 return -EINVAL; 311 - } 312 - if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), 313 - 0, NULL, 0)) { 296 + 297 + if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), 298 + 0, NULL, 0)) 314 299 return -EINVAL; 315 - } 316 300 317 301 return 0; 318 302 } 319 303 320 304 static void gp8psk_fe_release(struct dvb_frontend* fe) 321 305 { 322 - struct gp8psk_fe_state *state = fe->demodulator_priv; 323 - kfree(state); 306 + struct gp8psk_fe_state *st = fe->demodulator_priv; 307 + 308 + kfree(st); 324 309 } 325 310 326 311 static struct dvb_frontend_ops gp8psk_fe_ops; 327 312 328 - struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d) 313 + struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, 314 + void *priv, bool is_rev1) 329 315 { 330 - struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); 331 - if (s == NULL) 332 - goto error; 316 + struct gp8psk_fe_state *st; 333 317 334 - s->d = d; 335 - memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); 336 - s->fe.demodulator_priv = s; 318 + if (!ops || !ops->in || !ops->out || !ops->reload) { 319 + pr_err("Error! gp8psk-fe ops not defined.\n"); 320 + return NULL; 321 + } 337 322 338 - goto success; 339 - error: 340 - return NULL; 341 - success: 342 - return &s->fe; 323 + st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); 324 + if (!st) 325 + return NULL; 326 + 327 + memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); 328 + st->fe.demodulator_priv = st; 329 + st->ops = ops; 330 + st->priv = priv; 331 + st->is_rev1 = is_rev1; 332 + 333 + pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : ""); 334 + 335 + return &st->fe; 343 336 } 344 - 337 + EXPORT_SYMBOL_GPL(gp8psk_fe_attach); 345 338 346 339 static struct dvb_frontend_ops gp8psk_fe_ops = { 347 340 .delsys = { SYS_DVBS }, ··· 393 370 .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, 394 371 .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage 395 372 }; 373 + 374 + MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); 375 + MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); 376 + MODULE_VERSION("1.1"); 377 + MODULE_LICENSE("GPL");
+78 -33
drivers/media/usb/dvb-usb/gp8psk.c
··· 15 15 * see Documentation/dvb/README.dvb-usb for more information 16 16 */ 17 17 #include "gp8psk.h" 18 + #include "gp8psk-fe.h" 18 19 19 20 /* debug */ 20 21 static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; ··· 29 28 unsigned char data[80]; 30 29 }; 31 30 32 - static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) 33 - { 34 - return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); 35 - } 36 - 37 - static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) 38 - { 39 - return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1)); 40 - } 41 - 42 - static void gp8psk_info(struct dvb_usb_device *d) 43 - { 44 - u8 fpga_vers, fw_vers[6]; 45 - 46 - if (!gp8psk_get_fw_version(d, fw_vers)) 47 - info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", 48 - fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), 49 - 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); 50 - else 51 - info("failed to get FW version"); 52 - 53 - if (!gp8psk_get_fpga_version(d, &fpga_vers)) 54 - info("FPGA Version = %i", fpga_vers); 55 - else 56 - info("failed to get FPGA version"); 57 - } 58 - 59 - int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) 31 + static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, 32 + u16 index, u8 *b, int blen) 60 33 { 61 34 struct gp8psk_state *st = d->priv; 62 35 int ret = 0,try = 0; ··· 42 67 return ret; 43 68 44 69 while (ret >= 0 && ret != blen && try < 3) { 45 - memcpy(st->data, b, blen); 46 70 ret = usb_control_msg(d->udev, 47 71 usb_rcvctrlpipe(d->udev,0), 48 72 req, ··· 55 81 if (ret < 0 || ret != blen) { 56 82 warn("usb in %d operation failed.", req); 57 83 ret = -EIO; 58 - } else 84 + } else { 59 85 ret = 0; 86 + memcpy(b, st->data, blen); 87 + } 60 88 61 89 deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); 62 90 debug_dump(b,blen,deb_xfer); ··· 68 92 return ret; 69 93 } 70 94 71 - int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 95 + static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 72 96 u16 index, u8 *b, int blen) 73 97 { 74 98 struct gp8psk_state *st = d->priv; ··· 97 121 mutex_unlock(&d->usb_mutex); 98 122 99 123 return ret; 124 + } 125 + 126 + 127 + static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) 128 + { 129 + return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6); 130 + } 131 + 132 + static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) 133 + { 134 + return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1); 135 + } 136 + 137 + static void gp8psk_info(struct dvb_usb_device *d) 138 + { 139 + u8 fpga_vers, fw_vers[6]; 140 + 141 + if (!gp8psk_get_fw_version(d, fw_vers)) 142 + info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", 143 + fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), 144 + 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); 145 + else 146 + info("failed to get FW version"); 147 + 148 + if (!gp8psk_get_fpga_version(d, &fpga_vers)) 149 + info("FPGA Version = %i", fpga_vers); 150 + else 151 + info("failed to get FPGA version"); 100 152 } 101 153 102 154 static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) ··· 229 225 return 0; 230 226 } 231 227 232 - int gp8psk_bcm4500_reload(struct dvb_usb_device *d) 228 + static int gp8psk_bcm4500_reload(struct dvb_usb_device *d) 233 229 { 234 230 u8 buf; 235 231 int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); 232 + 233 + deb_xfer("reloading firmware\n"); 234 + 236 235 /* Turn off 8psk power */ 237 236 if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) 238 237 return -EINVAL; ··· 254 247 return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); 255 248 } 256 249 250 + /* Callbacks for gp8psk-fe.c */ 251 + 252 + static int gp8psk_fe_in(void *priv, u8 req, u16 value, 253 + u16 index, u8 *b, int blen) 254 + { 255 + struct dvb_usb_device *d = priv; 256 + 257 + return gp8psk_usb_in_op(d, req, value, index, b, blen); 258 + } 259 + 260 + static int gp8psk_fe_out(void *priv, u8 req, u16 value, 261 + u16 index, u8 *b, int blen) 262 + { 263 + struct dvb_usb_device *d = priv; 264 + 265 + return gp8psk_usb_out_op(d, req, value, index, b, blen); 266 + } 267 + 268 + static int gp8psk_fe_reload(void *priv) 269 + { 270 + struct dvb_usb_device *d = priv; 271 + 272 + return gp8psk_bcm4500_reload(d); 273 + } 274 + 275 + const struct gp8psk_fe_ops gp8psk_fe_ops = { 276 + .in = gp8psk_fe_in, 277 + .out = gp8psk_fe_out, 278 + .reload = gp8psk_fe_reload, 279 + }; 280 + 257 281 static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) 258 282 { 259 - adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev); 283 + struct dvb_usb_device *d = adap->dev; 284 + int id = le16_to_cpu(d->udev->descriptor.idProduct); 285 + int is_rev1; 286 + 287 + is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false; 288 + 289 + adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach, 290 + &gp8psk_fe_ops, d, is_rev1); 260 291 return 0; 261 292 } 262 293
-63
drivers/media/usb/dvb-usb/gp8psk.h
··· 24 24 #define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args) 25 25 #define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args) 26 26 #define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args) 27 - #define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args) 28 - 29 - /* Twinhan Vendor requests */ 30 - #define TH_COMMAND_IN 0xC0 31 - #define TH_COMMAND_OUT 0xC1 32 - 33 - /* gp8psk commands */ 34 - 35 - #define GET_8PSK_CONFIG 0x80 /* in */ 36 - #define SET_8PSK_CONFIG 0x81 37 - #define I2C_WRITE 0x83 38 - #define I2C_READ 0x84 39 - #define ARM_TRANSFER 0x85 40 - #define TUNE_8PSK 0x86 41 - #define GET_SIGNAL_STRENGTH 0x87 /* in */ 42 - #define LOAD_BCM4500 0x88 43 - #define BOOT_8PSK 0x89 /* in */ 44 - #define START_INTERSIL 0x8A /* in */ 45 - #define SET_LNB_VOLTAGE 0x8B 46 - #define SET_22KHZ_TONE 0x8C 47 - #define SEND_DISEQC_COMMAND 0x8D 48 - #define SET_DVB_MODE 0x8E 49 - #define SET_DN_SWITCH 0x8F 50 - #define GET_SIGNAL_LOCK 0x90 /* in */ 51 - #define GET_FW_VERS 0x92 52 - #define GET_SERIAL_NUMBER 0x93 /* in */ 53 - #define USE_EXTRA_VOLT 0x94 54 - #define GET_FPGA_VERS 0x95 55 - #define CW3K_INIT 0x9d 56 - 57 - /* PSK_configuration bits */ 58 - #define bm8pskStarted 0x01 59 - #define bm8pskFW_Loaded 0x02 60 - #define bmIntersilOn 0x04 61 - #define bmDVBmode 0x08 62 - #define bm22kHz 0x10 63 - #define bmSEL18V 0x20 64 - #define bmDCtuned 0x40 65 - #define bmArmed 0x80 66 - 67 - /* Satellite modulation modes */ 68 - #define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ 69 - #define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ 70 - #define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ 71 - #define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ 72 - 73 - #define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ 74 - #define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ 75 - #define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ 76 - #define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ 77 - #define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ 78 - #define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ 79 27 80 28 #define GET_USB_SPEED 0x07 81 29 ··· 33 85 #define VENDOR_STRING_READ 0x0C 34 86 #define PRODUCT_STRING_READ 0x0D 35 87 #define FW_BCD_VERSION_READ 0x14 36 - 37 - /* firmware revision id's */ 38 - #define GP8PSK_FW_REV1 0x020604 39 - #define GP8PSK_FW_REV2 0x020704 40 - #define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) 41 - 42 - extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d); 43 - extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen); 44 - extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 45 - u16 index, u8 *b, int blen); 46 - extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d); 47 88 48 89 #endif
+9 -22
drivers/mfd/intel-lpss-pci.c
··· 123 123 .properties = apl_i2c_properties, 124 124 }; 125 125 126 - static const struct intel_lpss_platform_info kbl_info = { 127 - .clk_rate = 120000000, 128 - }; 129 - 130 - static const struct intel_lpss_platform_info kbl_uart_info = { 131 - .clk_rate = 120000000, 132 - .clk_con_id = "baudclk", 133 - }; 134 - 135 - static const struct intel_lpss_platform_info kbl_i2c_info = { 136 - .clk_rate = 133000000, 137 - }; 138 - 139 126 static const struct pci_device_id intel_lpss_pci_ids[] = { 140 127 /* BXT A-Step */ 141 128 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, ··· 194 207 { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, 195 208 { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, 196 209 /* KBL-H */ 197 - { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, 198 - { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, 199 - { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, 200 - { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, 201 - { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, 202 - { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, 203 - { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, 204 - { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, 205 - { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, 210 + { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, 211 + { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, 212 + { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, 213 + { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, 214 + { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, 215 + { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, 216 + { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, 217 + { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, 218 + { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, 206 219 { } 207 220 }; 208 221 MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
-3
drivers/mfd/intel-lpss.c
··· 502 502 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) 503 503 lpss->priv_ctx[i] = readl(lpss->priv + i * 4); 504 504 505 - /* Put the device into reset state */ 506 - writel(0, lpss->priv + LPSS_PRIV_RESETS); 507 - 508 505 return 0; 509 506 } 510 507 EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+4 -2
drivers/mfd/intel_soc_pmic_bxtwc.c
··· 86 86 BXTWC_THRM2_IRQ, 87 87 BXTWC_BCU_IRQ, 88 88 BXTWC_ADC_IRQ, 89 + BXTWC_USBC_IRQ, 89 90 BXTWC_CHGR0_IRQ, 90 91 BXTWC_CHGR1_IRQ, 91 92 BXTWC_GPIO0_IRQ, ··· 112 111 REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), 113 112 REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), 114 113 REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), 115 - REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), 114 + REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)), 115 + REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), 116 116 REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), 117 117 REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), 118 118 REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), ··· 148 146 }; 149 147 150 148 static struct resource usbc_resources[] = { 151 - DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), 149 + DEFINE_RES_IRQ(BXTWC_USBC_IRQ), 152 150 }; 153 151 154 152 static struct resource charger_resources[] = {
+2
drivers/mfd/mfd-core.c
··· 399 399 clones[i]); 400 400 } 401 401 402 + put_device(dev); 403 + 402 404 return 0; 403 405 } 404 406 EXPORT_SYMBOL(mfd_clone_cell);
+2
drivers/mfd/stmpe.c
··· 851 851 if (ret < 0) 852 852 return ret; 853 853 854 + msleep(10); 855 + 854 856 timeout = jiffies + msecs_to_jiffies(100); 855 857 while (time_before(jiffies, timeout)) { 856 858 ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
+1 -1
drivers/misc/mei/bus-fixup.c
··· 178 178 179 179 ret = 0; 180 180 bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); 181 - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { 181 + if (bytes_recv < if_version_length) { 182 182 dev_err(bus->dev, "Could not read IF version\n"); 183 183 ret = -EIO; 184 184 goto err;
+4 -4
drivers/mmc/card/mmc_test.c
··· 2347 2347 struct mmc_test_req *rq = mmc_test_req_alloc(); 2348 2348 struct mmc_host *host = test->card->host; 2349 2349 struct mmc_test_area *t = &test->area; 2350 - struct mmc_async_req areq; 2350 + struct mmc_test_async_req test_areq = { .test = test }; 2351 2351 struct mmc_request *mrq; 2352 2352 unsigned long timeout; 2353 2353 bool expired = false; ··· 2363 2363 mrq->sbc = &rq->sbc; 2364 2364 mrq->cap_cmd_during_tfr = true; 2365 2365 2366 - areq.mrq = mrq; 2367 - areq.err_check = mmc_test_check_result_async; 2366 + test_areq.areq.mrq = mrq; 2367 + test_areq.areq.err_check = mmc_test_check_result_async; 2368 2368 2369 2369 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2370 2370 512, write); ··· 2378 2378 2379 2379 /* Start ongoing data request */ 2380 2380 if (use_areq) { 2381 - mmc_start_req(host, &areq, &ret); 2381 + mmc_start_req(host, &test_areq.areq, &ret); 2382 2382 if (ret) 2383 2383 goto out_free; 2384 2384 } else {
+3
drivers/mmc/core/mmc.c
··· 26 26 #include "mmc_ops.h" 27 27 #include "sd_ops.h" 28 28 29 + #define DEFAULT_CMD6_TIMEOUT_MS 500 30 + 29 31 static const unsigned int tran_exp[] = { 30 32 10000, 100000, 1000000, 10000000, 31 33 0, 0, 0, 0 ··· 573 571 card->erased_byte = 0x0; 574 572 575 573 /* eMMC v4.5 or later */ 574 + card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS; 576 575 if (card->ext_csd.rev >= 6) { 577 576 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 578 577
+1 -1
drivers/mmc/host/dw_mmc.c
··· 2940 2940 return ERR_PTR(-ENOMEM); 2941 2941 2942 2942 /* find reset controller when exist */ 2943 - pdata->rstc = devm_reset_control_get_optional(dev, NULL); 2943 + pdata->rstc = devm_reset_control_get_optional(dev, "reset"); 2944 2944 if (IS_ERR(pdata->rstc)) { 2945 2945 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 2946 2946 return ERR_PTR(-EPROBE_DEFER);
+2 -2
drivers/mmc/host/mxs-mmc.c
··· 661 661 662 662 platform_set_drvdata(pdev, mmc); 663 663 664 + spin_lock_init(&host->lock); 665 + 664 666 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, 665 667 dev_name(&pdev->dev), host); 666 668 if (ret) 667 669 goto out_free_dma; 668 - 669 - spin_lock_init(&host->lock); 670 670 671 671 ret = mmc_add_host(mmc); 672 672 if (ret)
+26 -10
drivers/mmc/host/sdhci.c
··· 2086 2086 2087 2087 if (!host->tuning_done) { 2088 2088 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); 2089 + 2090 + sdhci_do_reset(host, SDHCI_RESET_CMD); 2091 + sdhci_do_reset(host, SDHCI_RESET_DATA); 2092 + 2089 2093 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2090 2094 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2091 2095 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; ··· 2290 2286 2291 2287 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2292 2288 mrq = host->mrqs_done[i]; 2293 - if (mrq) { 2294 - host->mrqs_done[i] = NULL; 2289 + if (mrq) 2295 2290 break; 2296 - } 2297 2291 } 2298 2292 2299 2293 if (!mrq) { ··· 2322 2320 * upon error conditions. 2323 2321 */ 2324 2322 if (sdhci_needs_reset(host, mrq)) { 2323 + /* 2324 + * Do not finish until command and data lines are available for 2325 + * reset. Note there can only be one other mrq, so it cannot 2326 + * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2327 + * would both be null. 2328 + */ 2329 + if (host->cmd || host->data_cmd) { 2330 + spin_unlock_irqrestore(&host->lock, flags); 2331 + return true; 2332 + } 2333 + 2325 2334 /* Some controllers need this kick or reset won't work here */ 2326 2335 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2327 2336 /* This is to force an update */ ··· 2340 2327 2341 2328 /* Spec says we should do both at the same time, but Ricoh 2342 2329 controllers do not like that. */ 2343 - if (!host->cmd) 2344 - sdhci_do_reset(host, SDHCI_RESET_CMD); 2345 - if (!host->data_cmd) 2346 - sdhci_do_reset(host, SDHCI_RESET_DATA); 2330 + sdhci_do_reset(host, SDHCI_RESET_CMD); 2331 + sdhci_do_reset(host, SDHCI_RESET_DATA); 2347 2332 2348 2333 host->pending_reset = false; 2349 2334 } 2350 2335 2351 2336 if (!sdhci_has_requests(host)) 2352 2337 sdhci_led_deactivate(host); 2338 + 2339 + host->mrqs_done[i] = NULL; 2353 2340 2354 2341 mmiowb(); 2355 2342 spin_unlock_irqrestore(&host->lock, flags); ··· 2525 2512 if (!host->data) { 2526 2513 struct mmc_command *data_cmd = host->data_cmd; 2527 2514 2528 - if (data_cmd) 2529 - host->data_cmd = NULL; 2530 - 2531 2515 /* 2532 2516 * The "data complete" interrupt is also used to 2533 2517 * indicate that a busy state has ended. See comment ··· 2532 2522 */ 2533 2523 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2534 2524 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2525 + host->data_cmd = NULL; 2535 2526 data_cmd->error = -ETIMEDOUT; 2536 2527 sdhci_finish_mrq(host, data_cmd->mrq); 2537 2528 return; 2538 2529 } 2539 2530 if (intmask & SDHCI_INT_DATA_END) { 2531 + host->data_cmd = NULL; 2540 2532 /* 2541 2533 * Some cards handle busy-end interrupt 2542 2534 * before the command completed, so make ··· 2923 2911 sdhci_enable_preset_value(host, true); 2924 2912 spin_unlock_irqrestore(&host->lock, flags); 2925 2913 } 2914 + 2915 + if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 2916 + mmc->ops->hs400_enhanced_strobe) 2917 + mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 2926 2918 2927 2919 spin_lock_irqsave(&host->lock, flags); 2928 2920
+18
drivers/net/can/sja1000/plx_pci.c
··· 142 142 #define CTI_PCI_VENDOR_ID 0x12c4 143 143 #define CTI_PCI_DEVICE_ID_CRG001 0x0900 144 144 145 + #define MOXA_PCI_VENDOR_ID 0x1393 146 + #define MOXA_PCI_DEVICE_ID 0x0100 147 + 145 148 static void plx_pci_reset_common(struct pci_dev *pdev); 146 149 static void plx9056_pci_reset_common(struct pci_dev *pdev); 147 150 static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); ··· 261 258 /* based on PLX9030 */ 262 259 }; 263 260 261 + static struct plx_pci_card_info plx_pci_card_info_moxa = { 262 + "MOXA", 2, 263 + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 264 + {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, 265 + &plx_pci_reset_common 266 + /* based on PLX9052 */ 267 + }; 268 + 264 269 static const struct pci_device_id plx_pci_tbl[] = { 265 270 { 266 271 /* Adlink PCI-7841/cPCI-7841 */ ··· 367 356 CAN200PCI_SUB_VENDOR_ID, CAN200PCI_SUB_DEVICE_ID, 368 357 0, 0, 369 358 (kernel_ulong_t)&plx_pci_card_info_elcus 359 + }, 360 + { 361 + /* moxa */ 362 + MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, 363 + PCI_ANY_ID, PCI_ANY_ID, 364 + 0, 0, 365 + (kernel_ulong_t)&plx_pci_card_info_moxa 370 366 }, 371 367 { 0,} 372 368 };
+4 -12
drivers/net/dsa/b53/b53_common.c
··· 962 962 963 963 vl->members |= BIT(port) | BIT(cpu_port); 964 964 if (untagged) 965 - vl->untag |= BIT(port) | BIT(cpu_port); 965 + vl->untag |= BIT(port); 966 966 else 967 - vl->untag &= ~(BIT(port) | BIT(cpu_port)); 967 + vl->untag &= ~BIT(port); 968 + vl->untag &= ~BIT(cpu_port); 968 969 969 970 b53_set_vlan_entry(dev, vid, vl); 970 971 b53_fast_age_vlan(dev, vid); ··· 973 972 974 973 if (pvid) { 975 974 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 976 - vlan->vid_end); 977 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), 978 975 vlan->vid_end); 979 976 b53_fast_age_vlan(dev, vid); 980 977 } ··· 983 984 { 984 985 struct b53_device *dev = ds->priv; 985 986 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 986 - unsigned int cpu_port = dev->cpu_port; 987 987 struct b53_vlan *vl; 988 988 u16 vid; 989 989 u16 pvid; ··· 995 997 b53_get_vlan_entry(dev, vid, vl); 996 998 997 999 vl->members &= ~BIT(port); 998 - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) 999 - vl->members = 0; 1000 1000 1001 1001 if (pvid == vid) { 1002 1002 if (is5325(dev) || is5365(dev)) ··· 1003 1007 pvid = 0; 1004 1008 } 1005 1009 1006 - if (untagged) { 1010 + if (untagged) 1007 1011 vl->untag &= ~(BIT(port)); 1008 - if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) 1009 - vl->untag = 0; 1010 - } 1011 1012 1012 1013 b53_set_vlan_entry(dev, vid, vl); 1013 1014 b53_fast_age_vlan(dev, vid); 1014 1015 } 1015 1016 1016 1017 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1017 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); 1018 1018 b53_fast_age_vlan(dev, pvid); 1019 1019 1020 1020 return 0;
-12
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
··· 204 204 return num_msgs; 205 205 } 206 206 207 - static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) 208 - { 209 - u32 data = 0x7777; 210 - 211 - xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); 212 - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); 213 - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); 214 - xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); 215 - xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); 216 - } 217 - 218 207 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 219 208 struct xgene_enet_pdata *pdata, 220 209 enum xgene_enet_err_code status) ··· 918 929 .clear = xgene_enet_clear_ring, 919 930 .wr_cmd = xgene_enet_wr_cmd, 920 931 .len = xgene_enet_ring_len, 921 - .coalesce = xgene_enet_setup_coalescing, 922 932 };
+2
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
··· 55 55 #define PREFETCH_BUF_EN BIT(21) 56 56 #define CSR_RING_ID_BUF 0x000c 57 57 #define CSR_PBM_COAL 0x0014 58 + #define CSR_PBM_CTICK0 0x0018 58 59 #define CSR_PBM_CTICK1 0x001c 59 60 #define CSR_PBM_CTICK2 0x0020 61 + #define CSR_PBM_CTICK3 0x0024 60 62 #define CSR_THRESHOLD0_SET1 0x0030 61 63 #define CSR_THRESHOLD1_SET1 0x0034 62 64 #define CSR_RING_NE_INT_MODE 0x017c
+2 -1
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1188 1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1189 1189 } 1190 1190 1191 - pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1191 + if (pdata->ring_ops->coalesce) 1192 + pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1192 1193 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; 1193 1194 1194 1195 return 0;
+7 -5
drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
··· 30 30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); 31 31 ring_cfg[3] |= SET_BIT(X2_DEQINTEN); 32 32 } 33 - ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); 33 + ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); 34 34 35 35 addr >>= 8; 36 36 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); ··· 192 192 193 193 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) 194 194 { 195 - u32 data = 0x7777; 195 + u32 data = 0x77777777; 196 196 197 197 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); 198 + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); 198 199 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); 199 - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); 200 - xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); 201 - xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); 200 + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); 201 + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); 202 + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); 203 + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); 202 204 } 203 205 204 206 struct xgene_ring_ops xgene_ring2_ops = {
+4 -3
drivers/net/ethernet/arc/emac_main.c
··· 460 460 if (ndev->flags & IFF_ALLMULTI) { 461 461 arc_reg_set(priv, R_LAFL, ~0); 462 462 arc_reg_set(priv, R_LAFH, ~0); 463 - } else { 463 + } else if (ndev->flags & IFF_MULTICAST) { 464 464 struct netdev_hw_addr *ha; 465 465 unsigned int filter[2] = { 0, 0 }; 466 466 int bit; ··· 472 472 473 473 arc_reg_set(priv, R_LAFL, filter[0]); 474 474 arc_reg_set(priv, R_LAFH, filter[1]); 475 + } else { 476 + arc_reg_set(priv, R_LAFL, 0); 477 + arc_reg_set(priv, R_LAFH, 0); 475 478 } 476 479 } 477 480 } ··· 767 764 ndev->netdev_ops = &arc_emac_netdev_ops; 768 765 ndev->ethtool_ops = &arc_emac_ethtool_ops; 769 766 ndev->watchdog_timeo = TX_TIMEOUT; 770 - /* FIXME :: no multicast support yet */ 771 - ndev->flags &= ~IFF_MULTICAST; 772 767 773 768 priv = netdev_priv(ndev); 774 769 priv->dev = dev;
+6 -3
drivers/net/ethernet/broadcom/bgmac.c
··· 307 307 u32 ctl; 308 308 309 309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 310 + 311 + /* preserve ONLY bits 16-17 from current hardware value */ 312 + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; 313 + 310 314 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { 311 315 ctl &= ~BGMAC_DMA_RX_BL_MASK; 312 316 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; ··· 321 317 ctl &= ~BGMAC_DMA_RX_PT_MASK; 322 318 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; 323 319 } 324 - ctl &= BGMAC_DMA_RX_ADDREXT_MASK; 325 320 ctl |= BGMAC_DMA_RX_ENABLE; 326 321 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 327 322 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; ··· 1049 1046 1050 1047 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1051 1048 BGMAC_DS_MM_SHIFT; 1052 - if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) 1049 + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) 1053 1050 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1054 - if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) 1051 + if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) 1055 1052 bgmac_cco_ctl_maskset(bgmac, 1, ~0, 1056 1053 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1057 1054
+36 -12
drivers/net/ethernet/broadcom/bnx2.c
··· 49 49 #include <linux/firmware.h> 50 50 #include <linux/log2.h> 51 51 #include <linux/aer.h> 52 + #include <linux/crash_dump.h> 52 53 53 54 #if IS_ENABLED(CONFIG_CNIC) 54 55 #define BCM_CNIC 1 ··· 4765 4764 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); 4766 4765 } 4767 4766 4768 - static int 4769 - bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 4767 + static void 4768 + bnx2_wait_dma_complete(struct bnx2 *bp) 4770 4769 { 4771 4770 u32 val; 4772 - int i, rc = 0; 4773 - u8 old_port; 4771 + int i; 4774 4772 4775 - /* Wait for the current PCI transaction to complete before 4776 - * issuing a reset. */ 4773 + /* 4774 + * Wait for the current PCI transaction to complete before 4775 + * issuing a reset. 4776 + */ 4777 4777 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || 4778 4778 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { 4779 4779 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, ··· 4797 4795 break; 4798 4796 } 4799 4797 } 4798 + 4799 + return; 4800 + } 4801 + 4802 + 4803 + static int 4804 + bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 4805 + { 4806 + u32 val; 4807 + int i, rc = 0; 4808 + u8 old_port; 4809 + 4810 + /* Wait for the current PCI transaction to complete before 4811 + * issuing a reset. */ 4812 + bnx2_wait_dma_complete(bp); 4800 4813 4801 4814 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4802 4815 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); ··· 6378 6361 struct bnx2 *bp = netdev_priv(dev); 6379 6362 int rc; 6380 6363 6364 + rc = bnx2_request_firmware(bp); 6365 + if (rc < 0) 6366 + goto out; 6367 + 6381 6368 netif_carrier_off(dev); 6382 6369 6383 6370 bnx2_disable_int(bp); ··· 6450 6429 bnx2_free_irq(bp); 6451 6430 bnx2_free_mem(bp); 6452 6431 bnx2_del_napi(bp); 6432 + bnx2_release_firmware(bp); 6453 6433 goto out; 6454 6434 } 6455 6435 ··· 8597 8575 8598 8576 pci_set_drvdata(pdev, dev); 8599 8577 8600 - rc = bnx2_request_firmware(bp); 8601 - if (rc < 0) 8602 - goto error; 8578 + /* 8579 + * In-flight DMA from 1st kernel could continue going in kdump kernel. 8580 + * New io-page table has been created before bnx2 does reset at open stage. 8581 + * We have to wait for the in-flight DMA to complete to avoid it look up 8582 + * into the newly created io-page table. 8583 + */ 8584 + if (is_kdump_kernel()) 8585 + bnx2_wait_dma_complete(bp); 8603 8586 8604 - 8605 - bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 8606 8587 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); 8607 8588 8608 8589 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | ··· 8638 8613 return 0; 8639 8614 8640 8615 error: 8641 - bnx2_release_firmware(bp); 8642 8616 pci_iounmap(pdev, bp->regview); 8643 8617 pci_release_regions(pdev); 8644 8618 pci_disable_device(pdev);
+10 -5
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4934 4934 napi_hash_del(&bnapi->napi); 4935 4935 netif_napi_del(&bnapi->napi); 4936 4936 } 4937 + /* We called napi_hash_del() before netif_napi_del(), we need 4938 + * to respect an RCU grace period before freeing napi structures. 4939 + */ 4940 + synchronize_net(); 4937 4941 } 4938 4942 4939 4943 static void bnxt_init_napi(struct bnxt *bp) ··· 6313 6309 struct tc_to_netdev *ntc) 6314 6310 { 6315 6311 struct bnxt *bp = netdev_priv(dev); 6312 + bool sh = false; 6316 6313 u8 tc; 6317 6314 6318 6315 if (ntc->type != TC_SETUP_MQPRIO) ··· 6330 6325 if (netdev_get_num_tc(dev) == tc) 6331 6326 return 0; 6332 6327 6328 + if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6329 + sh = true; 6330 + 6333 6331 if (tc) { 6334 6332 int max_rx_rings, max_tx_rings, rc; 6335 - bool sh = false; 6336 - 6337 - if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6338 - sh = true; 6339 6333 6340 6334 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 6341 6335 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) ··· 6352 6348 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6353 6349 netdev_reset_tc(dev); 6354 6350 } 6355 - bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 6351 + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 6352 + bp->tx_nr_rings + bp->rx_nr_rings; 6356 6353 bp->num_stat_ctxs = bp->cp_nr_rings; 6357 6354 6358 6355 if (netif_running(bp->dev))
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 774 774 775 775 if (vf->flags & BNXT_VF_LINK_UP) { 776 776 /* if physical link is down, force link up on VF */ 777 - if (phy_qcfg_resp.link == 778 - PORT_PHY_QCFG_RESP_LINK_NO_LINK) { 777 + if (phy_qcfg_resp.link != 778 + PORT_PHY_QCFG_RESP_LINK_LINK) { 779 779 phy_qcfg_resp.link = 780 780 PORT_PHY_QCFG_RESP_LINK_LINK; 781 781 phy_qcfg_resp.link_speed = cpu_to_le16(
+2 -2
drivers/net/ethernet/brocade/bna/bnad.c
··· 177 177 return 0; 178 178 179 179 hw_cons = *(tcb->hw_consumer_index); 180 + rmb(); 180 181 cons = tcb->consumer_index; 181 182 q_depth = tcb->q_depth; 182 183 ··· 3095 3094 BNA_QE_INDX_INC(prod, q_depth); 3096 3095 tcb->producer_index = prod; 3097 3096 3098 - smp_mb(); 3097 + wmb(); 3099 3098 3100 3099 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 3101 3100 return NETDEV_TX_OK; ··· 3103 3102 skb_tx_timestamp(skb); 3104 3103 3105 3104 bna_txq_prod_indx_doorbell(tcb); 3106 - smp_mb(); 3107 3105 3108 3106 return NETDEV_TX_OK; 3109 3107 }
+6
drivers/net/ethernet/cadence/macb.c
··· 2673 2673 lp->skb_length = skb->len; 2674 2674 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 2675 2675 DMA_TO_DEVICE); 2676 + if (dma_mapping_error(NULL, lp->skb_physaddr)) { 2677 + dev_kfree_skb_any(skb); 2678 + dev->stats.tx_dropped++; 2679 + netdev_err(dev, "%s: DMA mapping error\n", __func__); 2680 + return NETDEV_TX_OK; 2681 + } 2676 2682 2677 2683 /* Set address of the data in the Transmit Address register */ 2678 2684 macb_writel(lp, TAR, lp->skb_physaddr);
+35 -27
drivers/net/ethernet/cavium/thunder/nic.h
··· 47 47 48 48 /* Min/Max packet size */ 49 49 #define NIC_HW_MIN_FRS 64 50 - #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ 50 + #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */ 51 51 52 52 /* Max pkinds */ 53 53 #define NIC_MAX_PKIND 16 ··· 178 178 179 179 struct nicvf_hw_stats { 180 180 u64 rx_bytes; 181 + u64 rx_frames; 181 182 u64 rx_ucast_frames; 182 183 u64 rx_bcast_frames; 183 184 u64 rx_mcast_frames; 184 - u64 rx_fcs_errors; 185 - u64 rx_l2_errors; 185 + u64 rx_drops; 186 186 u64 rx_drop_red; 187 187 u64 rx_drop_red_bytes; 188 188 u64 rx_drop_overrun; ··· 191 191 u64 rx_drop_mcast; 192 192 u64 rx_drop_l3_bcast; 193 193 u64 rx_drop_l3_mcast; 194 + u64 rx_fcs_errors; 195 + u64 rx_l2_errors; 196 + 197 + u64 tx_bytes; 198 + u64 tx_frames; 199 + u64 tx_ucast_frames; 200 + u64 tx_bcast_frames; 201 + u64 tx_mcast_frames; 202 + u64 tx_drops; 203 + }; 204 + 205 + struct nicvf_drv_stats { 206 + /* CQE Rx errs */ 194 207 u64 rx_bgx_truncated_pkts; 195 208 u64 rx_jabber_errs; 196 209 u64 rx_fcs_errs; ··· 229 216 u64 rx_l4_pclp; 230 217 u64 rx_truncated_pkts; 231 218 232 - u64 tx_bytes_ok; 233 - u64 tx_ucast_frames_ok; 234 - u64 tx_bcast_frames_ok; 235 - u64 tx_mcast_frames_ok; 236 - u64 tx_drops; 237 - }; 219 + /* CQE Tx errs */ 220 + u64 tx_desc_fault; 221 + u64 tx_hdr_cons_err; 222 + u64 tx_subdesc_err; 223 + u64 tx_max_size_exceeded; 224 + u64 tx_imm_size_oflow; 225 + u64 tx_data_seq_err; 226 + u64 tx_mem_seq_err; 227 + u64 tx_lock_viol; 228 + u64 tx_data_fault; 229 + u64 tx_tstmp_conflict; 230 + u64 tx_tstmp_timeout; 231 + u64 tx_mem_fault; 232 + u64 tx_csum_overlap; 233 + u64 tx_csum_overflow; 238 234 239 - struct nicvf_drv_stats { 240 - /* Rx */ 241 - u64 rx_frames_ok; 242 - u64 rx_frames_64; 243 - u64 rx_frames_127; 244 - u64 rx_frames_255; 245 - u64 rx_frames_511; 246 - u64 rx_frames_1023; 247 - u64 rx_frames_1518; 248 - u64 rx_frames_jumbo; 249 - u64 rx_drops; 250 - 235 + /* driver debug stats */ 251 236 u64 rcv_buffer_alloc_failures; 252 - 253 - /* Tx */ 254 - u64 tx_frames_ok; 255 - u64 tx_drops; 256 237 u64 tx_tso; 257 238 u64 tx_timeout; 258 239 u64 txq_stop; 259 240 u64 txq_wake; 241 + 242 + struct u64_stats_sync syncp; 260 243 }; 261 244 262 245 struct nicvf { ··· 291 282 292 283 u8 node; 293 284 u8 cpi_alg; 294 - u16 mtu; 295 285 bool link_up; 296 286 u8 duplex; 297 287 u32 speed; ··· 306 298 307 299 /* Stats */ 308 300 struct nicvf_hw_stats hw_stats; 309 - struct nicvf_drv_stats drv_stats; 301 + struct nicvf_drv_stats __percpu *drv_stats; 310 302 struct bgx_stats bgx_stats; 311 303 312 304 /* MSI-X */
+27 -12
drivers/net/ethernet/cavium/thunder/nic_main.c
··· 11 11 #include <linux/pci.h> 12 12 #include <linux/etherdevice.h> 13 13 #include <linux/of.h> 14 + #include <linux/if_vlan.h> 14 15 15 16 #include "nic_reg.h" 16 17 #include "nic.h" ··· 261 260 /* Update hardware min/max frame size */ 262 261 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 263 262 { 264 - if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 265 - dev_err(&nic->pdev->dev, 266 - "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", 267 - vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 268 - return 1; 269 - } 270 - new_frs += ETH_HLEN; 271 - if (new_frs <= nic->pkind.maxlen) 272 - return 0; 263 + int bgx, lmac, lmac_cnt; 264 + u64 lmac_credits; 273 265 274 - nic->pkind.maxlen = new_frs; 275 - nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); 266 + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) 267 + return 1; 268 + 269 + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 270 + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 271 + lmac += bgx * MAX_LMAC_PER_BGX; 272 + 273 + new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4; 274 + 275 + /* Update corresponding LMAC credits */ 276 + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 277 + lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8)); 278 + lmac_credits &= ~(0xFFFFFULL << 12); 279 + lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12); 280 + nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits); 281 + 282 + /* Enforce MTU in HW 283 + * This config is supported only from 88xx pass 2.0 onwards. 284 + */ 285 + if (!pass1_silicon(nic->pdev)) 286 + nic_reg_write(nic, 287 + NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs); 276 288 return 0; 277 289 } 278 290 ··· 478 464 479 465 /* PKIND configuration */ 480 466 nic->pkind.minlen = 0; 481 - nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 467 + nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4; 482 468 nic->pkind.lenerr_en = 1; 483 469 nic->pkind.rx_hdr = 0; 484 470 nic->pkind.hdr_sl = 0; ··· 851 837 nic_reg_write(nic, reg_addr, 0); 852 838 } 853 839 } 840 + 854 841 return 0; 855 842 } 856 843
+1
drivers/net/ethernet/cavium/thunder/nic_reg.h
··· 106 106 #define NIC_PF_MPI_0_2047_CFG (0x210000) 107 107 #define NIC_PF_RSSI_0_4097_RQ (0x220000) 108 108 #define NIC_PF_LMAC_0_7_CFG (0x240000) 109 + #define NIC_PF_LMAC_0_7_CFG2 (0x240100) 109 110 #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 110 111 #define NIC_PF_LMAC_0_7_CREDIT (0x244000) 111 112 #define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+59 -46
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
··· 36 36 37 37 static const struct nicvf_stat nicvf_hw_stats[] = { 38 38 NICVF_HW_STAT(rx_bytes), 39 + NICVF_HW_STAT(rx_frames), 39 40 NICVF_HW_STAT(rx_ucast_frames), 40 41 NICVF_HW_STAT(rx_bcast_frames), 41 42 NICVF_HW_STAT(rx_mcast_frames), 42 - NICVF_HW_STAT(rx_fcs_errors), 43 - NICVF_HW_STAT(rx_l2_errors), 43 + NICVF_HW_STAT(rx_drops), 44 44 NICVF_HW_STAT(rx_drop_red), 45 45 NICVF_HW_STAT(rx_drop_red_bytes), 46 46 NICVF_HW_STAT(rx_drop_overrun), ··· 49 49 NICVF_HW_STAT(rx_drop_mcast), 50 50 NICVF_HW_STAT(rx_drop_l3_bcast), 51 51 NICVF_HW_STAT(rx_drop_l3_mcast), 52 - NICVF_HW_STAT(rx_bgx_truncated_pkts), 53 - NICVF_HW_STAT(rx_jabber_errs), 54 - NICVF_HW_STAT(rx_fcs_errs), 55 - NICVF_HW_STAT(rx_bgx_errs), 56 - NICVF_HW_STAT(rx_prel2_errs), 57 - NICVF_HW_STAT(rx_l2_hdr_malformed), 58 - NICVF_HW_STAT(rx_oversize), 59 - NICVF_HW_STAT(rx_undersize), 60 - NICVF_HW_STAT(rx_l2_len_mismatch), 61 - NICVF_HW_STAT(rx_l2_pclp), 62 - NICVF_HW_STAT(rx_ip_ver_errs), 63 - NICVF_HW_STAT(rx_ip_csum_errs), 64 - NICVF_HW_STAT(rx_ip_hdr_malformed), 65 - NICVF_HW_STAT(rx_ip_payload_malformed), 66 - NICVF_HW_STAT(rx_ip_ttl_errs), 67 - NICVF_HW_STAT(rx_l3_pclp), 68 - NICVF_HW_STAT(rx_l4_malformed), 69 - NICVF_HW_STAT(rx_l4_csum_errs), 70 - NICVF_HW_STAT(rx_udp_len_errs), 71 - NICVF_HW_STAT(rx_l4_port_errs), 72 - NICVF_HW_STAT(rx_tcp_flag_errs), 73 - NICVF_HW_STAT(rx_tcp_offset_errs), 74 - NICVF_HW_STAT(rx_l4_pclp), 75 - NICVF_HW_STAT(rx_truncated_pkts), 76 - NICVF_HW_STAT(tx_bytes_ok), 77 - NICVF_HW_STAT(tx_ucast_frames_ok), 78 - NICVF_HW_STAT(tx_bcast_frames_ok), 79 - NICVF_HW_STAT(tx_mcast_frames_ok), 52 + NICVF_HW_STAT(rx_fcs_errors), 53 + NICVF_HW_STAT(rx_l2_errors), 54 + NICVF_HW_STAT(tx_bytes), 55 + NICVF_HW_STAT(tx_frames), 56 + NICVF_HW_STAT(tx_ucast_frames), 57 + NICVF_HW_STAT(tx_bcast_frames), 58 + NICVF_HW_STAT(tx_mcast_frames), 59 + NICVF_HW_STAT(tx_drops), 80 60 }; 81 61 82 62 static const struct nicvf_stat nicvf_drv_stats[] = { 83 - NICVF_DRV_STAT(rx_frames_ok), 84 - NICVF_DRV_STAT(rx_frames_64), 85 - NICVF_DRV_STAT(rx_frames_127), 86 - NICVF_DRV_STAT(rx_frames_255), 87 - NICVF_DRV_STAT(rx_frames_511), 88 - NICVF_DRV_STAT(rx_frames_1023), 89 - NICVF_DRV_STAT(rx_frames_1518), 90 - NICVF_DRV_STAT(rx_frames_jumbo), 91 - NICVF_DRV_STAT(rx_drops), 63 + NICVF_DRV_STAT(rx_bgx_truncated_pkts), 64 + NICVF_DRV_STAT(rx_jabber_errs), 65 + NICVF_DRV_STAT(rx_fcs_errs), 66 + NICVF_DRV_STAT(rx_bgx_errs), 67 + NICVF_DRV_STAT(rx_prel2_errs), 68 + NICVF_DRV_STAT(rx_l2_hdr_malformed), 69 + NICVF_DRV_STAT(rx_oversize), 70 + NICVF_DRV_STAT(rx_undersize), 71 + NICVF_DRV_STAT(rx_l2_len_mismatch), 72 + NICVF_DRV_STAT(rx_l2_pclp), 73 + NICVF_DRV_STAT(rx_ip_ver_errs), 74 + NICVF_DRV_STAT(rx_ip_csum_errs), 75 + NICVF_DRV_STAT(rx_ip_hdr_malformed), 76 + NICVF_DRV_STAT(rx_ip_payload_malformed), 77 + NICVF_DRV_STAT(rx_ip_ttl_errs), 78 + NICVF_DRV_STAT(rx_l3_pclp), 79 + NICVF_DRV_STAT(rx_l4_malformed), 80 + NICVF_DRV_STAT(rx_l4_csum_errs), 81 + NICVF_DRV_STAT(rx_udp_len_errs), 82 + NICVF_DRV_STAT(rx_l4_port_errs), 83 + NICVF_DRV_STAT(rx_tcp_flag_errs), 84 + NICVF_DRV_STAT(rx_tcp_offset_errs), 85 + NICVF_DRV_STAT(rx_l4_pclp), 86 + NICVF_DRV_STAT(rx_truncated_pkts), 87 + 88 + NICVF_DRV_STAT(tx_desc_fault), 89 + NICVF_DRV_STAT(tx_hdr_cons_err), 90 + NICVF_DRV_STAT(tx_subdesc_err), 91 + NICVF_DRV_STAT(tx_max_size_exceeded), 92 + NICVF_DRV_STAT(tx_imm_size_oflow), 93 + NICVF_DRV_STAT(tx_data_seq_err), 94 + NICVF_DRV_STAT(tx_mem_seq_err), 95 + NICVF_DRV_STAT(tx_lock_viol), 96 + NICVF_DRV_STAT(tx_data_fault), 97 + NICVF_DRV_STAT(tx_tstmp_conflict), 98 + NICVF_DRV_STAT(tx_tstmp_timeout), 99 + NICVF_DRV_STAT(tx_mem_fault), 100 + NICVF_DRV_STAT(tx_csum_overlap), 101 + NICVF_DRV_STAT(tx_csum_overflow), 102 + 92 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 93 - NICVF_DRV_STAT(tx_frames_ok), 94 104 NICVF_DRV_STAT(tx_tso), 95 - NICVF_DRV_STAT(tx_drops), 96 105 NICVF_DRV_STAT(tx_timeout), 97 106 NICVF_DRV_STAT(txq_stop), 98 107 NICVF_DRV_STAT(txq_wake), ··· 287 278 struct ethtool_stats *stats, u64 *data) 288 279 { 289 280 struct nicvf *nic = netdev_priv(netdev); 290 - int stat; 291 - int sqs; 281 + int stat, tmp_stats; 282 + int sqs, cpu; 292 283 293 284 nicvf_update_stats(nic); 294 285 ··· 298 289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 299 290 *(data++) = ((u64 *)&nic->hw_stats) 300 291 [nicvf_hw_stats[stat].index]; 301 - for (stat = 0; stat < nicvf_n_drv_stats; stat++) 302 - *(data++) = ((u64 *)&nic->drv_stats) 303 - [nicvf_drv_stats[stat].index]; 292 + for (stat = 0; stat < nicvf_n_drv_stats; stat++) { 293 + tmp_stats = 0; 294 + for_each_possible_cpu(cpu) 295 + tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) 296 + [nicvf_drv_stats[stat].index]; 297 + *(data++) = tmp_stats; 298 + } 304 299 305 300 nicvf_get_qset_stats(nic, stats, &data); 306 301
+79 -74
drivers/net/ethernet/cavium/thunder/nicvf_main.c
··· 69 69 return qidx; 70 70 } 71 71 72 - static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, 73 - struct sk_buff *skb) 74 - { 75 - if (skb->len <= 64) 76 - nic->drv_stats.rx_frames_64++; 77 - else if (skb->len <= 127) 78 - nic->drv_stats.rx_frames_127++; 79 - else if (skb->len <= 255) 80 - nic->drv_stats.rx_frames_255++; 81 - else if (skb->len <= 511) 82 - nic->drv_stats.rx_frames_511++; 83 - else if (skb->len <= 1023) 84 - nic->drv_stats.rx_frames_1023++; 85 - else if (skb->len <= 1518) 86 - nic->drv_stats.rx_frames_1518++; 87 - else 88 - nic->drv_stats.rx_frames_jumbo++; 89 - } 90 - 91 72 /* The Cavium ThunderX network controller can *only* be found in SoCs 92 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 93 74 * registers on this platform are implicitly strongly ordered with respect ··· 473 492 static int nicvf_init_resources(struct nicvf *nic) 474 493 { 475 494 int err; 476 - union nic_mbx mbx = {}; 477 - 478 - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 479 495 480 496 /* Enable Qset */ 481 497 nicvf_qset_config(nic, true); ··· 485 507 return err; 486 508 } 487 509 488 - /* Send VF config done msg to PF */ 489 - nicvf_write_to_mbx(nic, &mbx); 490 - 491 510 return 0; 492 511 } 493 512 494 513 static void nicvf_snd_pkt_handler(struct net_device *netdev, 495 - struct cmp_queue *cq, 496 514 struct cqe_send_t *cqe_tx, 497 515 int cqe_type, int budget, 498 516 unsigned int *tx_pkts, unsigned int *tx_bytes) ··· 510 536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 511 537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 512 538 513 - nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 539 + nicvf_check_cqe_tx_errs(nic, cqe_tx); 514 540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 515 541 if (skb) { 516 542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ ··· 604 630 return; 605 631 } 606 632 607 - nicvf_set_rx_frame_cnt(nic, skb); 608 - 609 633 nicvf_set_rxhash(netdev, cqe_rx, skb); 610 634 611 635 skb_record_rx_queue(skb, rq_idx); ··· 675 703 work_done++; 676 704 break; 677 705 case CQE_TYPE_SEND: 678 - nicvf_snd_pkt_handler(netdev, cq, 706 + nicvf_snd_pkt_handler(netdev, 679 707 (void *)cq_desc, CQE_TYPE_SEND, 680 708 budget, &tx_pkts, &tx_bytes); 681 709 tx_done++; ··· 712 740 nic = nic->pnicvf; 713 741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 714 742 netif_tx_start_queue(txq); 715 - nic->drv_stats.txq_wake++; 743 + this_cpu_inc(nic->drv_stats->txq_wake); 716 744 if (netif_msg_tx_err(nic)) 717 745 netdev_warn(netdev, 718 746 "%s: Transmit queue wakeup SQ%d\n", ··· 1056 1084 1057 1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1058 1086 netif_tx_stop_queue(txq); 1059 - nic->drv_stats.txq_stop++; 1087 + this_cpu_inc(nic->drv_stats->txq_stop); 1060 1088 if (netif_msg_tx_err(nic)) 1061 1089 netdev_warn(netdev, 1062 1090 "%s: Transmit ring full, stopping SQ%d\n", ··· 1161 1189 return 0; 1162 1190 } 1163 1191 1192 + static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1193 + { 1194 + union nic_mbx mbx = {}; 1195 + 1196 + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1197 + mbx.frs.max_frs = mtu; 1198 + mbx.frs.vf_id = nic->vf_id; 1199 + 1200 + return nicvf_send_msg_to_pf(nic, &mbx); 1201 + } 1202 + 1164 1203 int nicvf_open(struct net_device *netdev) 1165 1204 { 1166 - int err, qidx; 1205 + int cpu, err, qidx; 1167 1206 struct nicvf *nic = netdev_priv(netdev); 1168 1207 struct queue_set *qs = nic->qs; 1169 1208 struct nicvf_cq_poll *cq_poll = NULL; 1170 - 1171 - nic->mtu = netdev->mtu; 1209 + union nic_mbx mbx = {}; 1172 1210 1173 1211 netif_carrier_off(netdev); 1174 1212 ··· 1230 1248 if (nic->sqs_mode) 1231 1249 nicvf_get_primary_vf_struct(nic); 1232 1250 1233 - /* Configure receive side scaling */ 1234 - if (!nic->sqs_mode) 1251 + /* Configure receive side scaling and MTU */ 1252 + if (!nic->sqs_mode) { 1235 1253 nicvf_rss_init(nic); 1254 + if (nicvf_update_hw_max_frs(nic, netdev->mtu)) 1255 + goto cleanup; 1256 + 1257 + /* Clear percpu stats */ 1258 + for_each_possible_cpu(cpu) 1259 + memset(per_cpu_ptr(nic->drv_stats, cpu), 0, 1260 + sizeof(struct nicvf_drv_stats)); 1261 + } 1236 1262 1237 1263 err = nicvf_register_interrupts(nic); 1238 1264 if (err) ··· 1266 1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1267 1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1268 1278 1269 - nic->drv_stats.txq_stop = 0; 1270 - nic->drv_stats.txq_wake = 0; 1279 + /* Send VF config done msg to PF */ 1280 + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1281 + nicvf_write_to_mbx(nic, &mbx); 1271 1282 1272 1283 return 0; 1273 1284 cleanup: ··· 1288 1297 return err; 1289 1298 } 1290 1299 1291 - static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1292 - { 1293 - union nic_mbx mbx = {}; 1294 - 1295 - mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1296 - mbx.frs.max_frs = mtu; 1297 - mbx.frs.vf_id = nic->vf_id; 1298 - 1299 - return nicvf_send_msg_to_pf(nic, &mbx); 1300 - } 1301 - 1302 1300 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1303 1301 { 1304 1302 struct nicvf *nic = netdev_priv(netdev); ··· 1298 1318 if (new_mtu < NIC_HW_MIN_FRS) 1299 1319 return -EINVAL; 1300 1320 1321 + netdev->mtu = new_mtu; 1322 + 1323 + if (!netif_running(netdev)) 1324 + return 0; 1325 + 1301 1326 if (nicvf_update_hw_max_frs(nic, new_mtu)) 1302 1327 return -EINVAL; 1303 - netdev->mtu = new_mtu; 1304 - nic->mtu = new_mtu; 1305 1328 1306 1329 return 0; 1307 1330 } ··· 1362 1379 1363 1380 void nicvf_update_stats(struct nicvf *nic) 1364 1381 { 1365 - int qidx; 1382 + int qidx, cpu; 1383 + u64 tmp_stats = 0; 1366 1384 struct nicvf_hw_stats *stats = &nic->hw_stats; 1367 - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1385 + struct nicvf_drv_stats *drv_stats; 1368 1386 struct queue_set *qs = nic->qs; 1369 1387 1370 1388 #define GET_RX_STATS(reg) \ ··· 1388 1404 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1389 1405 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1390 1406 1391 - stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1392 - stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1393 - stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1394 - stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1407 + stats->tx_bytes = GET_TX_STATS(TX_OCTS); 1408 + stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); 1409 + stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); 1410 + stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); 1395 1411 stats->tx_drops = GET_TX_STATS(TX_DROP); 1396 1412 1397 - drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1398 - stats->tx_bcast_frames_ok + 1399 - stats->tx_mcast_frames_ok; 1400 - drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1401 - stats->rx_bcast_frames + 1402 - stats->rx_mcast_frames; 1403 - drv_stats->rx_drops = stats->rx_drop_red + 1404 - stats->rx_drop_overrun; 1405 - drv_stats->tx_drops = stats->tx_drops; 1413 + /* On T88 pass 2.0, the dummy SQE added for TSO notification 1414 + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed 1415 + * pointed by dummy SQE and results in tx_drops counter being 1416 + * incremented. Subtracting it from tx_tso counter will give 1417 + * exact tx_drops counter. 1418 + */ 1419 + if (nic->t88 && nic->hw_tso) { 1420 + for_each_possible_cpu(cpu) { 1421 + drv_stats = per_cpu_ptr(nic->drv_stats, cpu); 1422 + tmp_stats += drv_stats->tx_tso; 1423 + } 1424 + stats->tx_drops = tmp_stats - stats->tx_drops; 1425 + } 1426 + stats->tx_frames = stats->tx_ucast_frames + 1427 + stats->tx_bcast_frames + 1428 + stats->tx_mcast_frames; 1429 + stats->rx_frames = stats->rx_ucast_frames + 1430 + stats->rx_bcast_frames + 1431 + stats->rx_mcast_frames; 1432 + stats->rx_drops = stats->rx_drop_red + 1433 + stats->rx_drop_overrun; 1406 1434 1407 1435 /* Update RQ and SQ stats */ 1408 1436 for (qidx = 0; qidx < qs->rq_cnt; qidx++) ··· 1428 1432 { 1429 1433 struct nicvf *nic = netdev_priv(netdev); 1430 1434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1431 - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1432 1435 1433 1436 nicvf_update_stats(nic); 1434 1437 1435 1438 stats->rx_bytes = hw_stats->rx_bytes; 1436 - stats->rx_packets = drv_stats->rx_frames_ok; 1437 - stats->rx_dropped = drv_stats->rx_drops; 1439 + stats->rx_packets = hw_stats->rx_frames; 1440 + stats->rx_dropped = hw_stats->rx_drops; 1438 1441 stats->multicast = hw_stats->rx_mcast_frames; 1439 1442 1440 - stats->tx_bytes = hw_stats->tx_bytes_ok; 1441 - stats->tx_packets = drv_stats->tx_frames_ok; 1442 - stats->tx_dropped = drv_stats->tx_drops; 1443 + stats->tx_bytes = hw_stats->tx_bytes; 1444 + stats->tx_packets = hw_stats->tx_frames; 1445 + stats->tx_dropped = hw_stats->tx_drops; 1443 1446 1444 1447 return stats; 1445 1448 } ··· 1451 1456 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1452 1457 dev->name); 1453 1458 1454 - nic->drv_stats.tx_timeout++; 1459 + this_cpu_inc(nic->drv_stats->tx_timeout); 1455 1460 schedule_work(&nic->reset_task); 1456 1461 } 1457 1462 ··· 1585 1590 goto err_free_netdev; 1586 1591 } 1587 1592 1593 + nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); 1594 + if (!nic->drv_stats) { 1595 + err = -ENOMEM; 1596 + goto err_free_netdev; 1597 + } 1598 + 1588 1599 err = nicvf_set_qset_resources(nic); 1589 1600 if (err) 1590 1601 goto err_free_netdev; ··· 1649 1648 nicvf_unregister_interrupts(nic); 1650 1649 err_free_netdev: 1651 1650 pci_set_drvdata(pdev, NULL); 1651 + if (nic->drv_stats) 1652 + free_percpu(nic->drv_stats); 1652 1653 free_netdev(netdev); 1653 1654 err_release_regions: 1654 1655 pci_release_regions(pdev); ··· 1678 1675 unregister_netdev(pnetdev); 1679 1676 nicvf_unregister_interrupts(nic); 1680 1677 pci_set_drvdata(pdev, NULL); 1678 + if (nic->drv_stats) 1679 + free_percpu(nic->drv_stats); 1681 1680 free_netdev(netdev); 1682 1681 pci_release_regions(pdev); 1683 1682 pci_disable_device(pdev);
+67 -51
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 104 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 105 105 order); 106 106 if (!nic->rb_page) { 107 - nic->drv_stats.rcv_buffer_alloc_failures++; 107 + this_cpu_inc(nic->pnicvf->drv_stats-> 108 + rcv_buffer_alloc_failures); 108 109 return -ENOMEM; 109 110 } 110 111 nic->rb_page_offset = 0; ··· 271 270 rbdr_idx, new_rb); 272 271 next_rbdr: 273 272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 274 - if (!nic->rb_alloc_fail && rbdr->enable) 273 + if (!nic->rb_alloc_fail && rbdr->enable && 274 + netif_running(nic->pnicvf->netdev)) 275 275 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 276 276 277 277 if (rbdr_idx) ··· 363 361 364 362 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 365 363 { 364 + struct sk_buff *skb; 365 + 366 366 if (!sq) 367 367 return; 368 368 if (!sq->dmem.base) ··· 375 371 sq->dmem.q_len * TSO_HEADER_SIZE, 376 372 sq->tso_hdrs, sq->tso_hdrs_phys); 377 373 374 + /* Free pending skbs in the queue */ 375 + smp_rmb(); 376 + while (sq->head != sq->tail) { 377 + skb = (struct sk_buff *)sq->skbuff[sq->head]; 378 + if (skb) 379 + dev_kfree_skb_any(skb); 380 + sq->head++; 381 + sq->head &= (sq->dmem.q_len - 1); 382 + } 378 383 kfree(sq->skbuff); 379 384 nicvf_free_q_desc_mem(nic, &sq->dmem); 380 385 } ··· 496 483 { 497 484 union nic_mbx mbx = {}; 498 485 499 - /* Reset all RXQ's stats */ 486 + /* Reset all RQ/SQ and VF stats */ 500 487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 488 + mbx.reset_stat.rx_stat_mask = 0x3FFF; 489 + mbx.reset_stat.tx_stat_mask = 0x1F; 501 490 mbx.reset_stat.rq_stat_mask = 0xFFFF; 491 + mbx.reset_stat.sq_stat_mask = 0xFFFF; 502 492 nicvf_send_msg_to_pf(nic, &mbx); 503 493 } 504 494 ··· 554 538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 555 539 nicvf_send_msg_to_pf(nic, &mbx); 556 540 557 - nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 558 - if (!nic->sqs_mode) 541 + if (!nic->sqs_mode && (qidx == 0)) { 542 + /* Enable checking L3/L4 length and TCP/UDP checksums */ 543 + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 544 + (BIT(24) | BIT(23) | BIT(21))); 559 545 nicvf_config_vlan_stripping(nic, nic->netdev->features); 546 + } 560 547 561 548 /* Enable Receive queue */ 562 549 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); ··· 1048 1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1049 1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1050 1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1051 - nic->drv_stats.tx_tso++; 1032 + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1052 1033 } 1053 1034 } 1054 1035 ··· 1180 1161 1181 1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1182 1163 1183 - nic->drv_stats.tx_tso++; 1164 + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1184 1165 return 1; 1185 1166 } 1186 1167 ··· 1441 1422 /* Check for errors in the receive cmp.queue entry */ 1442 1423 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1443 1424 { 1444 - struct nicvf_hw_stats *stats = &nic->hw_stats; 1445 - 1446 1425 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1447 1426 return 0; 1448 1427 ··· 1452 1435 1453 1436 switch (cqe_rx->err_opcode) { 1454 1437 case CQ_RX_ERROP_RE_PARTIAL: 1455 - stats->rx_bgx_truncated_pkts++; 1438 + this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); 1456 1439 break; 1457 1440 case CQ_RX_ERROP_RE_JABBER: 1458 - stats->rx_jabber_errs++; 1441 + this_cpu_inc(nic->drv_stats->rx_jabber_errs); 1459 1442 break; 1460 1443 case CQ_RX_ERROP_RE_FCS: 1461 - stats->rx_fcs_errs++; 1444 + this_cpu_inc(nic->drv_stats->rx_fcs_errs); 1462 1445 break; 1463 1446 case CQ_RX_ERROP_RE_RX_CTL: 1464 - stats->rx_bgx_errs++; 1447 + this_cpu_inc(nic->drv_stats->rx_bgx_errs); 1465 1448 break; 1466 1449 case CQ_RX_ERROP_PREL2_ERR: 1467 - stats->rx_prel2_errs++; 1450 + this_cpu_inc(nic->drv_stats->rx_prel2_errs); 1468 1451 break; 1469 1452 case CQ_RX_ERROP_L2_MAL: 1470 - stats->rx_l2_hdr_malformed++; 1453 + this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); 1471 1454 break; 1472 1455 case CQ_RX_ERROP_L2_OVERSIZE: 1473 - stats->rx_oversize++; 1456 + this_cpu_inc(nic->drv_stats->rx_oversize); 1474 1457 break; 1475 1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1476 - stats->rx_undersize++; 1459 + this_cpu_inc(nic->drv_stats->rx_undersize); 1477 1460 break; 1478 1461 case CQ_RX_ERROP_L2_LENMISM: 1479 - stats->rx_l2_len_mismatch++; 1462 + this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); 1480 1463 break; 1481 1464 case CQ_RX_ERROP_L2_PCLP: 1482 - stats->rx_l2_pclp++; 1465 + this_cpu_inc(nic->drv_stats->rx_l2_pclp); 1483 1466 break; 1484 1467 case CQ_RX_ERROP_IP_NOT: 1485 - stats->rx_ip_ver_errs++; 1468 + this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); 1486 1469 break; 1487 1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1488 - stats->rx_ip_csum_errs++; 1471 + this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); 1489 1472 break; 1490 1473 case CQ_RX_ERROP_IP_MAL: 1491 - stats->rx_ip_hdr_malformed++; 1474 + this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); 1492 1475 break; 1493 1476 case CQ_RX_ERROP_IP_MALD: 1494 - stats->rx_ip_payload_malformed++; 1477 + this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); 1495 1478 break; 1496 1479 case CQ_RX_ERROP_IP_HOP: 1497 - stats->rx_ip_ttl_errs++; 1480 + this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); 1498 1481 break; 1499 1482 case CQ_RX_ERROP_L3_PCLP: 1500 - stats->rx_l3_pclp++; 1483 + this_cpu_inc(nic->drv_stats->rx_l3_pclp); 1501 1484 break; 1502 1485 case CQ_RX_ERROP_L4_MAL: 1503 - stats->rx_l4_malformed++; 1486 + this_cpu_inc(nic->drv_stats->rx_l4_malformed); 1504 1487 break; 1505 1488 case CQ_RX_ERROP_L4_CHK: 1506 - stats->rx_l4_csum_errs++; 1489 + this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); 1507 1490 break; 1508 1491 case CQ_RX_ERROP_UDP_LEN: 1509 - stats->rx_udp_len_errs++; 1492 + this_cpu_inc(nic->drv_stats->rx_udp_len_errs); 1510 1493 break; 1511 1494 case CQ_RX_ERROP_L4_PORT: 1512 - stats->rx_l4_port_errs++; 1495 + this_cpu_inc(nic->drv_stats->rx_l4_port_errs); 1513 1496 break; 1514 1497 case CQ_RX_ERROP_TCP_FLAG: 1515 - stats->rx_tcp_flag_errs++; 1498 + this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); 1516 1499 break; 1517 1500 case CQ_RX_ERROP_TCP_OFFSET: 1518 - stats->rx_tcp_offset_errs++; 1501 + this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); 1519 1502 break; 1520 1503 case CQ_RX_ERROP_L4_PCLP: 1521 - stats->rx_l4_pclp++; 1504 + this_cpu_inc(nic->drv_stats->rx_l4_pclp); 1522 1505 break; 1523 1506 case CQ_RX_ERROP_RBDR_TRUNC: 1524 - stats->rx_truncated_pkts++; 1507 + this_cpu_inc(nic->drv_stats->rx_truncated_pkts); 1525 1508 break; 1526 1509 } 1527 1510 ··· 1529 1512 } 1530 1513 1531 1514 /* Check for errors in the send cmp.queue entry */ 1532 - int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1533 - struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1515 + int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) 1534 1516 { 1535 - struct cmp_queue_stats *stats = &cq->stats; 1536 - 1537 1517 switch (cqe_tx->send_status) { 1538 1518 case CQ_TX_ERROP_GOOD: 1539 - stats->tx.good++; 1540 1519 return 0; 1541 1520 case CQ_TX_ERROP_DESC_FAULT: 1542 - stats->tx.desc_fault++; 1521 + this_cpu_inc(nic->drv_stats->tx_desc_fault); 1543 1522 break; 1544 1523 case CQ_TX_ERROP_HDR_CONS_ERR: 1545 - stats->tx.hdr_cons_err++; 1524 + this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); 1546 1525 break; 1547 1526 case CQ_TX_ERROP_SUBDC_ERR: 1548 - stats->tx.subdesc_err++; 1527 + this_cpu_inc(nic->drv_stats->tx_subdesc_err); 1528 + break; 1529 + case CQ_TX_ERROP_MAX_SIZE_VIOL: 1530 + this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); 1549 1531 break; 1550 1532 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1551 - stats->tx.imm_size_oflow++; 1533 + this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); 1552 1534 break; 1553 1535 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1554 - stats->tx.data_seq_err++; 1536 + this_cpu_inc(nic->drv_stats->tx_data_seq_err); 1555 1537 break; 1556 1538 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1557 - stats->tx.mem_seq_err++; 1539 + this_cpu_inc(nic->drv_stats->tx_mem_seq_err); 1558 1540 break; 1559 1541 case CQ_TX_ERROP_LOCK_VIOL: 1560 - stats->tx.lock_viol++; 1542 + this_cpu_inc(nic->drv_stats->tx_lock_viol); 1561 1543 break; 1562 1544 case CQ_TX_ERROP_DATA_FAULT: 1563 - stats->tx.data_fault++; 1545 + this_cpu_inc(nic->drv_stats->tx_data_fault); 1564 1546 break; 1565 1547 case CQ_TX_ERROP_TSTMP_CONFLICT: 1566 - stats->tx.tstmp_conflict++; 1548 + this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); 1567 1549 break; 1568 1550 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1569 - stats->tx.tstmp_timeout++; 1551 + this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); 1570 1552 break; 1571 1553 case CQ_TX_ERROP_MEM_FAULT: 1572 - stats->tx.mem_fault++; 1554 + this_cpu_inc(nic->drv_stats->tx_mem_fault); 1573 1555 break; 1574 1556 case CQ_TX_ERROP_CK_OVERLAP: 1575 - stats->tx.csum_overlap++; 1557 + this_cpu_inc(nic->drv_stats->tx_csum_overlap); 1576 1558 break; 1577 1559 case CQ_TX_ERROP_CK_OFLOW: 1578 - stats->tx.csum_overflow++; 1560 + this_cpu_inc(nic->drv_stats->tx_csum_overflow); 1579 1561 break; 1580 1562 } 1581 1563
+2 -22
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
··· 158 158 CQ_TX_ERROP_DESC_FAULT = 0x10, 159 159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 160 160 CQ_TX_ERROP_SUBDC_ERR = 0x12, 161 + CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, 161 162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 162 163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 163 164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, ··· 171 170 CQ_TX_ERROP_CK_OFLOW = 0x89, 172 171 CQ_TX_ERROP_ENUM_LAST = 0x8a, 173 172 }; 174 - 175 - struct cmp_queue_stats { 176 - struct tx_stats { 177 - u64 good; 178 - u64 desc_fault; 179 - u64 hdr_cons_err; 180 - u64 subdesc_err; 181 - u64 imm_size_oflow; 182 - u64 data_seq_err; 183 - u64 mem_seq_err; 184 - u64 lock_viol; 185 - u64 data_fault; 186 - u64 tstmp_conflict; 187 - u64 tstmp_timeout; 188 - u64 mem_fault; 189 - u64 csum_overlap; 190 - u64 csum_overflow; 191 - } tx; 192 - } ____cacheline_aligned_in_smp; 193 173 194 174 enum RQ_SQ_STATS { 195 175 RQ_SQ_STATS_OCTS, ··· 223 241 spinlock_t lock; /* lock to serialize processing CQEs */ 224 242 void *desc; 225 243 struct q_desc_mem dmem; 226 - struct cmp_queue_stats stats; 227 244 int irq; 228 245 } ____cacheline_aligned_in_smp; 229 246 ··· 317 336 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 318 337 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 319 338 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 320 - int nicvf_check_cqe_tx_errs(struct nicvf *nic, 321 - struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 339 + int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); 322 340 #endif /* NICVF_QUEUES_H */
+2 -2
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1242 1242 1243 1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1244 1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1245 - bgx->bgx_id = 1246 - (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1245 + bgx->bgx_id = (pci_resource_start(pdev, 1246 + PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1247 1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1248 1248 bgx->max_lmac = MAX_LMAC_PER_BGX; 1249 1249 bgx_vnic[bgx->bgx_id] = bgx;
+2
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
··· 28 28 #define MAX_DMAC_PER_LMAC 8 29 29 #define MAX_FRAME_SIZE 9216 30 30 31 + #define BGX_ID_MASK 0x3 32 + 31 33 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 32 34 33 35 /* Registers */
-1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2951 2951 rq->cntxt_id, fl_id, 0xffff); 2952 2952 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2953 2953 rq->desc, rq->phys_addr); 2954 - napi_hash_del(&rq->napi); 2955 2954 netif_napi_del(&rq->napi); 2956 2955 rq->netdev = NULL; 2957 2956 rq->cntxt_id = rq->abs_id = 0;
+1 -1
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
··· 178 178 CH_PCI_ID_TABLE_FENTRY(0x6005), 179 179 CH_PCI_ID_TABLE_FENTRY(0x6006), 180 180 CH_PCI_ID_TABLE_FENTRY(0x6007), 181 + CH_PCI_ID_TABLE_FENTRY(0x6008), 181 182 CH_PCI_ID_TABLE_FENTRY(0x6009), 182 183 CH_PCI_ID_TABLE_FENTRY(0x600d), 183 - CH_PCI_ID_TABLE_FENTRY(0x6010), 184 184 CH_PCI_ID_TABLE_FENTRY(0x6011), 185 185 CH_PCI_ID_TABLE_FENTRY(0x6014), 186 186 CH_PCI_ID_TABLE_FENTRY(0x6015),
-1
drivers/net/ethernet/emulex/benet/be_main.c
··· 2813 2813 if (eqo->q.created) { 2814 2814 be_eq_clean(eqo); 2815 2815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2816 - napi_hash_del(&eqo->napi); 2817 2816 netif_napi_del(&eqo->napi); 2818 2817 free_cpumask_var(eqo->affinity_mask); 2819 2818 }
+7 -1
drivers/net/ethernet/hisilicon/hns/hnae.c
··· 332 332 return ERR_PTR(-ENODEV); 333 333 334 334 handle = dev->ops->get_handle(dev, port_id); 335 - if (IS_ERR(handle)) 335 + if (IS_ERR(handle)) { 336 + put_device(&dev->cls_dev); 336 337 return handle; 338 + } 337 339 338 340 handle->dev = dev; 339 341 handle->owner_dev = owner_dev; ··· 358 356 for (j = i - 1; j >= 0; j--) 359 357 hnae_fini_queue(handle->qs[j]); 360 358 359 + put_device(&dev->cls_dev); 360 + 361 361 return ERR_PTR(-ENOMEM); 362 362 } 363 363 EXPORT_SYMBOL(hnae_get_handle); ··· 381 377 dev->ops->put_handle(h); 382 378 383 379 module_put(dev->owner); 380 + 381 + put_device(&dev->cls_dev); 384 382 } 385 383 EXPORT_SYMBOL(hnae_put_handle); 386 384
+2
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 2446 2446 2447 2447 netif_info(port, ifup, dev, "enabling port\n"); 2448 2448 2449 + netif_carrier_off(dev); 2450 + 2449 2451 ret = ehea_up(dev); 2450 2452 if (!ret) { 2451 2453 port_napi_enable(port);
+6 -4
drivers/net/ethernet/ibm/ibmvnic.c
··· 1505 1505 adapter->max_rx_add_entries_per_subcrq > entries_page ? 1506 1506 entries_page : adapter->max_rx_add_entries_per_subcrq; 1507 1507 1508 - /* Choosing the maximum number of queues supported by firmware*/ 1509 - adapter->req_tx_queues = adapter->max_tx_queues; 1510 - adapter->req_rx_queues = adapter->max_rx_queues; 1508 + adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; 1509 + adapter->req_rx_queues = adapter->opt_rx_comp_queues; 1511 1510 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1512 1511 1513 1512 adapter->req_mtu = adapter->max_mtu; ··· 3705 3706 struct net_device *netdev; 3706 3707 unsigned char *mac_addr_p; 3707 3708 struct dentry *ent; 3708 - char buf[16]; /* debugfs name buf */ 3709 + char buf[17]; /* debugfs name buf */ 3709 3710 int rc; 3710 3711 3711 3712 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", ··· 3843 3844 3844 3845 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) 3845 3846 debugfs_remove_recursive(adapter->debugfs_dir); 3847 + 3848 + dma_unmap_single(&dev->dev, adapter->stats_token, 3849 + sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); 3846 3850 3847 3851 if (adapter->ras_comps) 3848 3852 dma_free_coherent(&dev->dev,
+2
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 1381 1381 temp = (val & 0x003fff00) >> 8; 1382 1382 1383 1383 temp *= 64000000; 1384 + temp += mp->t_clk / 2; 1384 1385 do_div(temp, mp->t_clk); 1385 1386 1386 1387 return (unsigned int)temp; ··· 1418 1417 1419 1418 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1420 1419 temp *= 64000000; 1420 + temp += mp->t_clk / 2; 1421 1421 do_div(temp, mp->t_clk); 1422 1422 1423 1423 return (unsigned int)temp;
+13
drivers/net/ethernet/marvell/sky2.c
··· 5220 5220 5221 5221 static void sky2_shutdown(struct pci_dev *pdev) 5222 5222 { 5223 + struct sky2_hw *hw = pci_get_drvdata(pdev); 5224 + int port; 5225 + 5226 + for (port = 0; port < hw->ports; port++) { 5227 + struct net_device *ndev = hw->dev[port]; 5228 + 5229 + rtnl_lock(); 5230 + if (netif_running(ndev)) { 5231 + dev_close(ndev); 5232 + netif_device_detach(ndev); 5233 + } 5234 + rtnl_unlock(); 5235 + } 5223 5236 sky2_suspend(&pdev->dev); 5224 5237 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); 5225 5238 pci_set_power_state(pdev, PCI_D3hot);
-1
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2202 2202 2203 2203 if (!shutdown) 2204 2204 free_netdev(dev); 2205 - dev->ethtool_ops = NULL; 2206 2205 } 2207 2206 2208 2207 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+16 -15
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1445 1445 c->netdev = priv->netdev; 1446 1446 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1447 1447 c->num_tc = priv->params.num_tc; 1448 + c->xdp = !!priv->xdp_prog; 1448 1449 1449 1450 if (priv->params.rx_am_enabled) 1450 1451 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); ··· 1469 1468 if (err) 1470 1469 goto err_close_tx_cqs; 1471 1470 1471 + /* XDP SQ CQ params are same as normal TXQ sq CQ params */ 1472 + err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, 1473 + priv->params.tx_cq_moderation) : 0; 1474 + if (err) 1475 + goto err_close_rx_cq; 1476 + 1472 1477 napi_enable(&c->napi); 1473 1478 1474 1479 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); ··· 1495 1488 } 1496 1489 } 1497 1490 1498 - if (priv->xdp_prog) { 1499 - /* XDP SQ CQ params are same as normal TXQ sq CQ params */ 1500 - err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, 1501 - priv->params.tx_cq_moderation); 1502 - if (err) 1503 - goto err_close_sqs; 1491 + err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; 1492 + if (err) 1493 + goto err_close_sqs; 1504 1494 1505 - err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq); 1506 - if (err) { 1507 - mlx5e_close_cq(&c->xdp_sq.cq); 1508 - goto err_close_sqs; 1509 - } 1510 - } 1511 - 1512 - c->xdp = !!priv->xdp_prog; 1513 1495 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 1514 1496 if (err) 1515 1497 goto err_close_xdp_sq; ··· 1508 1512 1509 1513 return 0; 1510 1514 err_close_xdp_sq: 1511 - mlx5e_close_sq(&c->xdp_sq); 1515 + if (c->xdp) 1516 + mlx5e_close_sq(&c->xdp_sq); 1512 1517 1513 1518 err_close_sqs: 1514 1519 mlx5e_close_sqs(c); ··· 1519 1522 1520 1523 err_disable_napi: 1521 1524 napi_disable(&c->napi); 1525 + if (c->xdp) 1526 + mlx5e_close_cq(&c->xdp_sq.cq); 1527 + 1528 + err_close_rx_cq: 1522 1529 mlx5e_close_cq(&c->rq.cq); 1523 1530 1524 1531 err_close_tx_cqs:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 308 308 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; 309 309 #endif 310 310 311 - netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; 311 + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; 312 312 netdev->hw_features |= NETIF_F_HW_TC; 313 313 314 314 eth_hw_addr_random(netdev);
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 237 237 skb_flow_dissector_target(f->dissector, 238 238 FLOW_DISSECTOR_KEY_VLAN, 239 239 f->mask); 240 - if (mask->vlan_id) { 240 + if (mask->vlan_id || mask->vlan_priority) { 241 241 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); 242 242 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); 243 243 244 244 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 245 245 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 246 + 247 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); 248 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); 246 249 } 247 250 } 248 251
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 57 57 if (esw->mode != SRIOV_OFFLOADS) 58 58 return ERR_PTR(-EOPNOTSUPP); 59 59 60 - action = attr->action; 60 + /* per flow vlan pop/push is emulated, don't set that into the firmware */ 61 + action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 61 62 62 63 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 63 64 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1690 1690 { 1691 1691 1692 1692 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 1693 - if (IS_ERR_OR_NULL(steering->root_ns)) 1693 + if (!steering->root_ns) 1694 1694 goto cleanup; 1695 1695 1696 1696 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1226 1226 1227 1227 pci_set_drvdata(pdev, dev); 1228 1228 1229 + dev->pdev = pdev; 1230 + dev->event = mlx5_core_event; 1231 + 1229 1232 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { 1230 1233 mlx5_core_warn(dev, 1231 1234 "selected profile out of range, selecting default (%d)\n", ··· 1236 1233 prof_sel = MLX5_DEFAULT_PROF; 1237 1234 } 1238 1235 dev->profile = &profile[prof_sel]; 1239 - dev->pdev = pdev; 1240 - dev->event = mlx5_core_event; 1241 1236 1242 1237 INIT_LIST_HEAD(&priv->ctx_list); 1243 1238 spin_lock_init(&priv->ctx_lock);
+3 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 231 231 232 232 span_entry->used = true; 233 233 span_entry->id = index; 234 - span_entry->ref_count = 0; 234 + span_entry->ref_count = 1; 235 235 span_entry->local_port = local_port; 236 236 return span_entry; 237 237 } ··· 270 270 271 271 span_entry = mlxsw_sp_span_entry_find(port); 272 272 if (span_entry) { 273 + /* Already exists, just take a reference */ 273 274 span_entry->ref_count++; 274 275 return span_entry; 275 276 } ··· 281 280 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 282 281 struct mlxsw_sp_span_entry *span_entry) 283 282 { 283 + WARN_ON(!span_entry->ref_count); 284 284 if (--span_entry->ref_count == 0) 285 285 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 286 286 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 115 115 struct mlxsw_sp_mid { 116 116 struct list_head list; 117 117 unsigned char addr[ETH_ALEN]; 118 - u16 vid; 118 + u16 fid; 119 119 u16 mid; 120 120 unsigned int ref_count; 121 121 };
+70 -64
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 594 594 return 0; 595 595 } 596 596 597 + static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); 598 + 597 599 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) 598 600 { 601 + mlxsw_sp_router_fib_flush(mlxsw_sp); 599 602 kfree(mlxsw_sp->router.vrs); 600 603 } 601 604 602 605 struct mlxsw_sp_neigh_key { 603 - unsigned char addr[sizeof(struct in6_addr)]; 604 - struct net_device *dev; 606 + struct neighbour *n; 605 607 }; 606 608 607 609 struct mlxsw_sp_neigh_entry { 608 610 struct rhash_head ht_node; 609 611 struct mlxsw_sp_neigh_key key; 610 612 u16 rif; 611 - struct neighbour *n; 612 613 bool offloaded; 613 614 struct delayed_work dw; 614 615 struct mlxsw_sp_port *mlxsw_sp_port; ··· 647 646 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); 648 647 649 648 static struct mlxsw_sp_neigh_entry * 650 - mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, 651 - struct net_device *dev, u16 rif, 652 - struct neighbour *n) 649 + mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) 653 650 { 654 651 struct mlxsw_sp_neigh_entry *neigh_entry; 655 652 656 653 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); 657 654 if (!neigh_entry) 658 655 return NULL; 659 - memcpy(neigh_entry->key.addr, addr, addr_len); 660 - neigh_entry->key.dev = dev; 656 + neigh_entry->key.n = n; 661 657 neigh_entry->rif = rif; 662 - neigh_entry->n = n; 663 658 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); 664 659 INIT_LIST_HEAD(&neigh_entry->nexthop_list); 665 660 return neigh_entry; ··· 668 671 } 669 672 670 673 static struct mlxsw_sp_neigh_entry * 671 - mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, 672 - size_t addr_len, struct net_device *dev) 674 + mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 673 675 { 674 - struct mlxsw_sp_neigh_key key = {{ 0 } }; 676 + struct mlxsw_sp_neigh_key key; 675 677 676 - memcpy(key.addr, addr, addr_len); 677 - key.dev = dev; 678 + key.n = n; 678 679 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, 679 680 &key, mlxsw_sp_neigh_ht_params); 680 681 } ··· 684 689 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 685 690 struct mlxsw_sp_neigh_entry *neigh_entry; 686 691 struct mlxsw_sp_rif *r; 687 - u32 dip; 688 692 int err; 689 693 690 694 if (n->tbl != &arp_tbl) 691 695 return 0; 692 696 693 - dip = ntohl(*((__be32 *) n->primary_key)); 694 - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), 695 - n->dev); 696 - if (neigh_entry) { 697 - WARN_ON(neigh_entry->n != n); 697 + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 698 + if (neigh_entry) 698 699 return 0; 699 - } 700 700 701 701 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 702 702 if (WARN_ON(!r)) 703 703 return -EINVAL; 704 704 705 - neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, 706 - r->rif, n); 705 + neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); 707 706 if (!neigh_entry) 708 707 return -ENOMEM; 709 708 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); ··· 716 727 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 717 728 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 718 729 struct mlxsw_sp_neigh_entry *neigh_entry; 719 - u32 dip; 720 730 721 731 if (n->tbl != &arp_tbl) 722 732 return; 723 733 724 - dip = ntohl(*((__be32 *) n->primary_key)); 725 - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), 726 - n->dev); 734 + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 727 735 if (!neigh_entry) 728 736 return; 729 737 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); ··· 803 817 } 804 818 } 805 819 820 + static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) 821 + { 822 + u8 num_rec, last_rec_index, num_entries; 823 + 824 + num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); 825 + last_rec_index = num_rec - 1; 826 + 827 + if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) 828 + return false; 829 + if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == 830 + MLXSW_REG_RAUHTD_TYPE_IPV6) 831 + return true; 832 + 833 + num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, 834 + last_rec_index); 835 + if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) 836 + return true; 837 + return false; 838 + } 839 + 806 840 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) 807 841 { 808 842 char *rauhtd_pl; ··· 849 843 for (i = 0; i < num_rec; i++) 850 844 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, 851 845 i); 852 - } while (num_rec); 846 + } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); 853 847 rtnl_unlock(); 854 848 855 849 kfree(rauhtd_pl); ··· 868 862 * is active regardless of the traffic. 869 863 */ 870 864 if (!list_empty(&neigh_entry->nexthop_list)) 871 - neigh_event_send(neigh_entry->n, NULL); 865 + neigh_event_send(neigh_entry->key.n, NULL); 872 866 } 873 867 rtnl_unlock(); 874 868 } ··· 914 908 rtnl_lock(); 915 909 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, 916 910 nexthop_neighs_list_node) { 917 - if (!(neigh_entry->n->nud_state & NUD_VALID) && 911 + if (!(neigh_entry->key.n->nud_state & NUD_VALID) && 918 912 !list_empty(&neigh_entry->nexthop_list)) 919 - neigh_event_send(neigh_entry->n, NULL); 913 + neigh_event_send(neigh_entry->key.n, NULL); 920 914 } 921 915 rtnl_unlock(); 922 916 ··· 933 927 { 934 928 struct mlxsw_sp_neigh_entry *neigh_entry = 935 929 container_of(work, struct mlxsw_sp_neigh_entry, dw.work); 936 - struct neighbour *n = neigh_entry->n; 930 + struct neighbour *n = neigh_entry->key.n; 937 931 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; 938 932 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 939 933 char rauht_pl[MLXSW_REG_RAUHT_LEN]; ··· 1036 1030 1037 1031 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1038 1032 dip = ntohl(*((__be32 *) n->primary_key)); 1039 - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, 1040 - &dip, 1041 - sizeof(__be32), 1042 - dev); 1043 - if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { 1033 + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 1034 + if (WARN_ON(!neigh_entry)) { 1044 1035 mlxsw_sp_port_dev_put(mlxsw_sp_port); 1045 1036 return NOTIFY_DONE; 1046 1037 } ··· 1346 1343 struct fib_nh *fib_nh) 1347 1344 { 1348 1345 struct mlxsw_sp_neigh_entry *neigh_entry; 1349 - u32 gwip = ntohl(fib_nh->nh_gw); 1350 1346 struct net_device *dev = fib_nh->nh_dev; 1351 1347 struct neighbour *n; 1352 1348 u8 nud_state; 1353 1349 1354 - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1355 - sizeof(gwip), dev); 1356 - if (!neigh_entry) { 1357 - __be32 gwipn = htonl(gwip); 1358 - 1359 - n = neigh_create(&arp_tbl, &gwipn, dev); 1350 + /* Take a reference of neigh here ensuring that neigh would 1351 + * not be detructed before the nexthop entry is finished. 1352 + * The reference is taken either in neigh_lookup() or 1353 + * in neith_create() in case n is not found. 1354 + */ 1355 + n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); 1356 + if (!n) { 1357 + n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); 1360 1358 if (IS_ERR(n)) 1361 1359 return PTR_ERR(n); 1362 1360 neigh_event_send(n, NULL); 1363 - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1364 - sizeof(gwip), dev); 1365 - if (!neigh_entry) { 1366 - neigh_release(n); 1367 - return -EINVAL; 1368 - } 1369 - } else { 1370 - /* Take a reference of neigh here ensuring that neigh would 1371 - * not be detructed before the nexthop entry is finished. 1372 - * The second branch takes the reference in neith_create() 1373 - */ 1374 - n = neigh_entry->n; 1375 - neigh_clone(n); 1361 + } 1362 + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 1363 + if (!neigh_entry) { 1364 + neigh_release(n); 1365 + return -EINVAL; 1376 1366 } 1377 1367 1378 1368 /* If that is the first nexthop connected to that neigh, add to ··· 1399 1403 if (list_empty(&nh->neigh_entry->nexthop_list)) 1400 1404 list_del(&nh->neigh_entry->nexthop_neighs_list_node); 1401 1405 1402 - neigh_release(neigh_entry->n); 1406 + neigh_release(neigh_entry->key.n); 1403 1407 } 1404 1408 1405 1409 static struct mlxsw_sp_nexthop_group * ··· 1459 1463 1460 1464 for (i = 0; i < fi->fib_nhs; i++) { 1461 1465 struct fib_nh *fib_nh = &fi->fib_nh[i]; 1462 - u32 gwip = ntohl(fib_nh->nh_gw); 1466 + struct neighbour *n = nh->neigh_entry->key.n; 1463 1467 1464 - if (memcmp(nh->neigh_entry->key.addr, 1465 - &gwip, sizeof(u32)) == 0 && 1466 - nh->neigh_entry->key.dev == fib_nh->nh_dev) 1468 + if (memcmp(n->primary_key, &fib_nh->nh_gw, 1469 + sizeof(fib_nh->nh_gw)) == 0 && 1470 + n->dev == fib_nh->nh_dev) 1467 1471 return true; 1468 1472 } 1469 1473 return false; ··· 1870 1874 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 1871 1875 } 1872 1876 1873 - static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) 1877 + static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) 1874 1878 { 1875 1879 struct mlxsw_resources *resources; 1876 1880 struct mlxsw_sp_fib_entry *fib_entry; 1877 1881 struct mlxsw_sp_fib_entry *tmp; 1878 1882 struct mlxsw_sp_vr *vr; 1879 1883 int i; 1880 - int err; 1881 1884 1882 1885 resources = mlxsw_core_resources_get(mlxsw_sp->core); 1883 1886 for (i = 0; i < resources->max_virtual_routers; i++) { 1884 1887 vr = &mlxsw_sp->router.vrs[i]; 1888 + 1885 1889 if (!vr->used) 1886 1890 continue; 1887 1891 ··· 1897 1901 break; 1898 1902 } 1899 1903 } 1904 + } 1905 + 1906 + static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) 1907 + { 1908 + int err; 1909 + 1910 + mlxsw_sp_router_fib_flush(mlxsw_sp); 1900 1911 mlxsw_sp->router.aborted = true; 1901 1912 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); 1902 1913 if (err) ··· 1960 1957 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); 1961 1958 struct fib_entry_notifier_info *fen_info = ptr; 1962 1959 int err; 1960 + 1961 + if (!net_eq(fen_info->info.net, &init_net)) 1962 + return NOTIFY_DONE; 1963 1963 1964 1964 switch (event) { 1965 1965 case FIB_EVENT_ENTRY_ADD:
+7 -7
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 929 929 930 930 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 931 931 const unsigned char *addr, 932 - u16 vid) 932 + u16 fid) 933 933 { 934 934 struct mlxsw_sp_mid *mid; 935 935 936 936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 937 - if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 937 + if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 938 938 return mid; 939 939 } 940 940 return NULL; ··· 942 942 943 943 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 944 944 const unsigned char *addr, 945 - u16 vid) 945 + u16 fid) 946 946 { 947 947 struct mlxsw_sp_mid *mid; 948 948 u16 mid_idx; ··· 958 958 959 959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 960 960 ether_addr_copy(mid->addr, addr); 961 - mid->vid = vid; 961 + mid->fid = fid; 962 962 mid->mid = mid_idx; 963 963 mid->ref_count = 0; 964 964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); ··· 991 991 if (switchdev_trans_ph_prepare(trans)) 992 992 return 0; 993 993 994 - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 994 + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); 995 995 if (!mid) { 996 - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 996 + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); 997 997 if (!mid) { 998 998 netdev_err(dev, "Unable to allocate MC group\n"); 999 999 return -ENOMEM; ··· 1137 1137 u16 mid_idx; 1138 1138 int err = 0; 1139 1139 1140 - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 1140 + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); 1141 1141 if (!mid) { 1142 1142 netdev_err(dev, "Unable to remove port from MC DB\n"); 1143 1143 return -EINVAL;
-3
drivers/net/ethernet/qlogic/qed/qed_hsi.h
··· 727 727 #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 728 728 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 729 729 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 730 - #define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 731 - #define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 732 - 733 730 }; 734 731 735 732 struct core_tx_bd {
+1
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 1119 1119 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << 1120 1120 CORE_TX_BD_FLAGS_START_BD_SHIFT; 1121 1121 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); 1122 + SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); 1122 1123 DMA_REGPAIR_LE(start_bd->addr, first_frag); 1123 1124 start_bd->nbytes = cpu_to_le16(first_frag_len); 1124 1125
+8 -9
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 839 839 { 840 840 int i; 841 841 842 + if (IS_ENABLED(CONFIG_QED_RDMA)) { 843 + params->rdma_pf_params.num_qps = QED_ROCE_QPS; 844 + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 845 + /* divide by 3 the MRs to avoid MF ILT overflow */ 846 + params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; 847 + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 848 + } 849 + 842 850 for (i = 0; i < cdev->num_hwfns; i++) { 843 851 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 844 852 845 853 p_hwfn->pf_params = *params; 846 854 } 847 - 848 - if (!IS_ENABLED(CONFIG_QED_RDMA)) 849 - return; 850 - 851 - params->rdma_pf_params.num_qps = QED_ROCE_QPS; 852 - params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 853 - /* divide by 3 the MRs to avoid MF ILT overflow */ 854 - params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; 855 - params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 856 855 } 857 856 858 857 static int qed_slowpath_start(struct qed_dev *cdev,
+16 -9
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
··· 175 175 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { 176 176 int tc; 177 177 178 - for (j = 0; j < QEDE_NUM_RQSTATS; j++) 179 - sprintf(buf + (k + j) * ETH_GSTRING_LEN, 180 - "%d: %s", i, qede_rqstats_arr[j].string); 181 - k += QEDE_NUM_RQSTATS; 182 - for (tc = 0; tc < edev->num_tc; tc++) { 183 - for (j = 0; j < QEDE_NUM_TQSTATS; j++) 178 + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 179 + for (j = 0; j < QEDE_NUM_RQSTATS; j++) 184 180 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 185 - "%d.%d: %s", i, tc, 186 - qede_tqstats_arr[j].string); 187 - k += QEDE_NUM_TQSTATS; 181 + "%d: %s", i, 182 + qede_rqstats_arr[j].string); 183 + k += QEDE_NUM_RQSTATS; 184 + } 185 + 186 + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 187 + for (tc = 0; tc < edev->num_tc; tc++) { 188 + for (j = 0; j < QEDE_NUM_TQSTATS; j++) 189 + sprintf(buf + (k + j) * 190 + ETH_GSTRING_LEN, 191 + "%d.%d: %s", i, tc, 192 + qede_tqstats_arr[j].string); 193 + k += QEDE_NUM_TQSTATS; 194 + } 188 195 } 189 196 } 190 197
+1 -1
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 2839 2839 } 2840 2840 2841 2841 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, 2842 - rxq->rx_buf_size, DMA_FROM_DEVICE); 2842 + PAGE_SIZE, DMA_FROM_DEVICE); 2843 2843 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 2844 2844 DP_NOTICE(edev, 2845 2845 "Failed to map TPA replacement buffer\n");
+11 -4
drivers/net/ethernet/qualcomm/emac/emac-mac.c
··· 575 575 576 576 mac |= TXEN | RXEN; /* enable RX/TX */ 577 577 578 - /* We don't have ethtool support yet, so force flow-control mode 579 - * to 'full' always. 580 - */ 581 - mac |= TXFC | RXFC; 578 + /* Configure MAC flow control to match the PHY's settings. */ 579 + if (phydev->pause) 580 + mac |= RXFC; 581 + if (phydev->pause != phydev->asym_pause) 582 + mac |= TXFC; 582 583 583 584 /* setup link speed */ 584 585 mac &= ~SPEED_MASK; ··· 1003 1002 /* enable mac irq */ 1004 1003 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); 1005 1004 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); 1005 + 1006 + /* Enable pause frames. Without this feature, the EMAC has been shown 1007 + * to receive (and drop) frames with FCS errors at gigabit connections. 1008 + */ 1009 + adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1010 + adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1006 1011 1007 1012 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 1008 1013 phy_start(adpt->phydev);
+1 -1
drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
··· 421 421 /* CDR Settings */ 422 422 {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, 423 423 UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, 424 - {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, 424 + {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, 425 425 {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, 426 426 427 427 /* TX/RX Settings */
+3
drivers/net/ethernet/sfc/efx.c
··· 485 485 *channel = *old_channel; 486 486 487 487 channel->napi_dev = NULL; 488 + INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); 489 + channel->napi_str.napi_id = 0; 490 + channel->napi_str.state = 0; 488 491 memset(&channel->eventq, 0, sizeof(channel->eventq)); 489 492 490 493 for (j = 0; j < EFX_TXQ_TYPES; j++) {
+1 -1
drivers/net/ethernet/stmicro/stmmac/Kconfig
··· 107 107 config DWMAC_STM32 108 108 tristate "STM32 DWMAC support" 109 109 default ARCH_STM32 110 - depends on OF && HAS_IOMEM 110 + depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST) 111 111 select MFD_SYSCON 112 112 ---help--- 113 113 Support for ethernet controller on STM32 SOCs.
+2 -2
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
··· 63 63 #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 64 64 #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 65 65 #define TSE_PCS_SW_RESET_TIMEOUT 100 66 - #define TSE_PCS_USE_SGMII_AN_MASK BIT(2) 67 - #define TSE_PCS_USE_SGMII_ENA BIT(1) 66 + #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 67 + #define TSE_PCS_USE_SGMII_ENA BIT(0) 68 68 69 69 #define SGMII_ADAPTER_CTRL_REG 0x00 70 70 #define SGMII_ADAPTER_DISABLE 0x0001
+14 -10
drivers/net/ethernet/stmicro/stmmac/common.h
··· 120 120 unsigned long ip_csum_bypassed; 121 121 unsigned long ipv4_pkt_rcvd; 122 122 unsigned long ipv6_pkt_rcvd; 123 - unsigned long rx_msg_type_ext_no_ptp; 124 - unsigned long rx_msg_type_sync; 125 - unsigned long rx_msg_type_follow_up; 126 - unsigned long rx_msg_type_delay_req; 127 - unsigned long rx_msg_type_delay_resp; 128 - unsigned long rx_msg_type_pdelay_req; 129 - unsigned long rx_msg_type_pdelay_resp; 130 - unsigned long rx_msg_type_pdelay_follow_up; 123 + unsigned long no_ptp_rx_msg_type_ext; 124 + unsigned long ptp_rx_msg_type_sync; 125 + unsigned long ptp_rx_msg_type_follow_up; 126 + unsigned long ptp_rx_msg_type_delay_req; 127 + unsigned long ptp_rx_msg_type_delay_resp; 128 + unsigned long ptp_rx_msg_type_pdelay_req; 129 + unsigned long ptp_rx_msg_type_pdelay_resp; 130 + unsigned long ptp_rx_msg_type_pdelay_follow_up; 131 + unsigned long ptp_rx_msg_type_announce; 132 + unsigned long ptp_rx_msg_type_management; 133 + unsigned long ptp_rx_msg_pkt_reserved_type; 131 134 unsigned long ptp_frame_type; 132 135 unsigned long ptp_ver; 133 136 unsigned long timestamp_dropped; ··· 485 482 /* PTP and HW Timer helpers */ 486 483 struct stmmac_hwtimestamp { 487 484 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 488 - u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); 485 + u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, 486 + int gmac4); 489 487 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 490 488 int (*config_addend) (void __iomem *ioaddr, u32 addend); 491 489 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 492 - int add_sub); 490 + int add_sub, int gmac4); 493 491 u64(*get_systime) (void __iomem *ioaddr); 494 492 }; 495 493
+12 -8
drivers/net/ethernet/stmicro/stmmac/descs.h
··· 155 155 #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) 156 156 157 157 /* Extended RDES4 message type definitions */ 158 - #define RDES_EXT_NO_PTP 0 159 - #define RDES_EXT_SYNC 1 160 - #define RDES_EXT_FOLLOW_UP 2 161 - #define RDES_EXT_DELAY_REQ 3 162 - #define RDES_EXT_DELAY_RESP 4 163 - #define RDES_EXT_PDELAY_REQ 5 164 - #define RDES_EXT_PDELAY_RESP 6 165 - #define RDES_EXT_PDELAY_FOLLOW_UP 7 158 + #define RDES_EXT_NO_PTP 0x0 159 + #define RDES_EXT_SYNC 0x1 160 + #define RDES_EXT_FOLLOW_UP 0x2 161 + #define RDES_EXT_DELAY_REQ 0x3 162 + #define RDES_EXT_DELAY_RESP 0x4 163 + #define RDES_EXT_PDELAY_REQ 0x5 164 + #define RDES_EXT_PDELAY_RESP 0x6 165 + #define RDES_EXT_PDELAY_FOLLOW_UP 0x7 166 + #define RDES_PTP_ANNOUNCE 0x8 167 + #define RDES_PTP_MANAGEMENT 0x9 168 + #define RDES_PTP_SIGNALING 0xa 169 + #define RDES_PTP_PKT_RESERVED_TYPE 0xf 166 170 167 171 /* Basic descriptor structure for normal and alternate descriptors */ 168 172 struct dma_desc {
+74 -21
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
··· 123 123 x->ipv4_pkt_rcvd++; 124 124 if (rdes1 & RDES1_IPV6_HEADER) 125 125 x->ipv6_pkt_rcvd++; 126 - if (message_type == RDES_EXT_SYNC) 127 - x->rx_msg_type_sync++; 126 + 127 + if (message_type == RDES_EXT_NO_PTP) 128 + x->no_ptp_rx_msg_type_ext++; 129 + else if (message_type == RDES_EXT_SYNC) 130 + x->ptp_rx_msg_type_sync++; 128 131 else if (message_type == RDES_EXT_FOLLOW_UP) 129 - x->rx_msg_type_follow_up++; 132 + x->ptp_rx_msg_type_follow_up++; 130 133 else if (message_type == RDES_EXT_DELAY_REQ) 131 - x->rx_msg_type_delay_req++; 134 + x->ptp_rx_msg_type_delay_req++; 132 135 else if (message_type == RDES_EXT_DELAY_RESP) 133 - x->rx_msg_type_delay_resp++; 136 + x->ptp_rx_msg_type_delay_resp++; 134 137 else if (message_type == RDES_EXT_PDELAY_REQ) 135 - x->rx_msg_type_pdelay_req++; 138 + x->ptp_rx_msg_type_pdelay_req++; 136 139 else if (message_type == RDES_EXT_PDELAY_RESP) 137 - x->rx_msg_type_pdelay_resp++; 140 + x->ptp_rx_msg_type_pdelay_resp++; 138 141 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 139 - x->rx_msg_type_pdelay_follow_up++; 140 - else 141 - x->rx_msg_type_ext_no_ptp++; 142 + x->ptp_rx_msg_type_pdelay_follow_up++; 143 + else if (message_type == RDES_PTP_ANNOUNCE) 144 + x->ptp_rx_msg_type_announce++; 145 + else if (message_type == RDES_PTP_MANAGEMENT) 146 + x->ptp_rx_msg_type_management++; 147 + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) 148 + x->ptp_rx_msg_pkt_reserved_type++; 142 149 143 150 if (rdes1 & RDES1_PTP_PACKET_TYPE) 144 151 x->ptp_frame_type++; ··· 211 204 212 205 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) 213 206 { 214 - return (p->des3 & TDES3_TIMESTAMP_STATUS) 215 - >> TDES3_TIMESTAMP_STATUS_SHIFT; 207 + /* Context type from W/B descriptor must be zero */ 208 + if (p->des3 & TDES3_CONTEXT_TYPE) 209 + return -EINVAL; 210 + 211 + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ 212 + if (p->des3 & TDES3_TIMESTAMP_STATUS) 213 + return 0; 214 + 215 + return 1; 216 216 } 217 217 218 - /* NOTE: For RX CTX bit has to be checked before 219 - * HAVE a specific function for TX and another one for RX 220 - */ 221 - static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) 218 + static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) 222 219 { 223 220 struct dma_desc *p = (struct dma_desc *)desc; 224 221 u64 ns; ··· 234 223 return ns; 235 224 } 236 225 237 - static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) 226 + static int dwmac4_rx_check_timestamp(void *desc) 238 227 { 239 228 struct dma_desc *p = (struct dma_desc *)desc; 229 + u32 own, ctxt; 230 + int ret = 1; 240 231 241 - return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) 242 - >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; 232 + own = p->des3 & RDES3_OWN; 233 + ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) 234 + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); 235 + 236 + if (likely(!own && ctxt)) { 237 + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) 238 + /* Corrupted value */ 239 + ret = -EINVAL; 240 + else 241 + /* A valid Timestamp is ready to be read */ 242 + ret = 0; 243 + } 244 + 245 + /* Timestamp not ready */ 246 + return ret; 247 + } 248 + 249 + static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) 250 + { 251 + struct dma_desc *p = (struct dma_desc *)desc; 252 + int ret = -EINVAL; 253 + 254 + /* Get the status from normal w/b descriptor */ 255 + if (likely(p->des3 & TDES3_RS1V)) { 256 + if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) { 257 + int i = 0; 258 + 259 + /* Check if timestamp is OK from context descriptor */ 260 + do { 261 + ret = dwmac4_rx_check_timestamp(desc); 262 + if (ret < 0) 263 + goto exit; 264 + i++; 265 + 266 + } while ((ret == 1) || (i < 10)); 267 + 268 + if (i == 10) 269 + ret = -EBUSY; 270 + } 271 + } 272 + exit: 273 + return ret; 243 274 } 244 275 245 276 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, ··· 426 373 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, 427 374 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, 428 375 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, 429 - .get_timestamp = dwmac4_wrback_get_timestamp, 430 - .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, 376 + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, 377 + .get_timestamp = dwmac4_get_timestamp, 431 378 .set_tx_ic = dwmac4_rd_set_tx_ic, 432 379 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, 433 380 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+4
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
··· 59 59 #define TDES3_CTXT_TCMSSV BIT(26) 60 60 61 61 /* TDES3 Common */ 62 + #define TDES3_RS1V BIT(26) 63 + #define TDES3_RS1V_SHIFT 26 62 64 #define TDES3_LAST_DESCRIPTOR BIT(28) 63 65 #define TDES3_LAST_DESCRIPTOR_SHIFT 28 64 66 #define TDES3_FIRST_DESCRIPTOR BIT(29) 65 67 #define TDES3_CONTEXT_TYPE BIT(30) 68 + #define TDES3_CONTEXT_TYPE_SHIFT 30 66 69 67 70 /* TDS3 use for both format (read and write back) */ 68 71 #define TDES3_OWN BIT(31) ··· 120 117 #define RDES3_LAST_DESCRIPTOR BIT(28) 121 118 #define RDES3_FIRST_DESCRIPTOR BIT(29) 122 119 #define RDES3_CONTEXT_DESCRIPTOR BIT(30) 120 + #define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 123 121 124 122 /* RDES3 (read format) */ 125 123 #define RDES3_BUFFER1_VALID_ADDR BIT(24)
+18 -10
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
··· 150 150 x->ipv4_pkt_rcvd++; 151 151 if (rdes4 & ERDES4_IPV6_PKT_RCVD) 152 152 x->ipv6_pkt_rcvd++; 153 - if (message_type == RDES_EXT_SYNC) 154 - x->rx_msg_type_sync++; 153 + 154 + if (message_type == RDES_EXT_NO_PTP) 155 + x->no_ptp_rx_msg_type_ext++; 156 + else if (message_type == RDES_EXT_SYNC) 157 + x->ptp_rx_msg_type_sync++; 155 158 else if (message_type == RDES_EXT_FOLLOW_UP) 156 - x->rx_msg_type_follow_up++; 159 + x->ptp_rx_msg_type_follow_up++; 157 160 else if (message_type == RDES_EXT_DELAY_REQ) 158 - x->rx_msg_type_delay_req++; 161 + x->ptp_rx_msg_type_delay_req++; 159 162 else if (message_type == RDES_EXT_DELAY_RESP) 160 - x->rx_msg_type_delay_resp++; 163 + x->ptp_rx_msg_type_delay_resp++; 161 164 else if (message_type == RDES_EXT_PDELAY_REQ) 162 - x->rx_msg_type_pdelay_req++; 165 + x->ptp_rx_msg_type_pdelay_req++; 163 166 else if (message_type == RDES_EXT_PDELAY_RESP) 164 - x->rx_msg_type_pdelay_resp++; 167 + x->ptp_rx_msg_type_pdelay_resp++; 165 168 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 166 - x->rx_msg_type_pdelay_follow_up++; 167 - else 168 - x->rx_msg_type_ext_no_ptp++; 169 + x->ptp_rx_msg_type_pdelay_follow_up++; 170 + else if (message_type == RDES_PTP_ANNOUNCE) 171 + x->ptp_rx_msg_type_announce++; 172 + else if (message_type == RDES_PTP_MANAGEMENT) 173 + x->ptp_rx_msg_type_management++; 174 + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) 175 + x->ptp_rx_msg_pkt_reserved_type++; 176 + 169 177 if (rdes4 & ERDES4_PTP_FRAME_TYPE) 170 178 x->ptp_frame_type++; 171 179 if (rdes4 & ERDES4_PTP_VER)
+1
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 129 129 int irq_wake; 130 130 spinlock_t ptp_lock; 131 131 void __iomem *mmcaddr; 132 + void __iomem *ptpaddr; 132 133 u32 rx_tail_addr; 133 134 u32 tx_tail_addr; 134 135 u32 mss;
+11 -8
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 115 115 STMMAC_STAT(ip_csum_bypassed), 116 116 STMMAC_STAT(ipv4_pkt_rcvd), 117 117 STMMAC_STAT(ipv6_pkt_rcvd), 118 - STMMAC_STAT(rx_msg_type_ext_no_ptp), 119 - STMMAC_STAT(rx_msg_type_sync), 120 - STMMAC_STAT(rx_msg_type_follow_up), 121 - STMMAC_STAT(rx_msg_type_delay_req), 122 - STMMAC_STAT(rx_msg_type_delay_resp), 123 - STMMAC_STAT(rx_msg_type_pdelay_req), 124 - STMMAC_STAT(rx_msg_type_pdelay_resp), 125 - STMMAC_STAT(rx_msg_type_pdelay_follow_up), 118 + STMMAC_STAT(no_ptp_rx_msg_type_ext), 119 + STMMAC_STAT(ptp_rx_msg_type_sync), 120 + STMMAC_STAT(ptp_rx_msg_type_follow_up), 121 + STMMAC_STAT(ptp_rx_msg_type_delay_req), 122 + STMMAC_STAT(ptp_rx_msg_type_delay_resp), 123 + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), 124 + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), 125 + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), 126 + STMMAC_STAT(ptp_rx_msg_type_announce), 127 + STMMAC_STAT(ptp_rx_msg_type_management), 128 + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), 126 129 STMMAC_STAT(ptp_frame_type), 127 130 STMMAC_STAT(ptp_ver), 128 131 STMMAC_STAT(timestamp_dropped),
+34 -9
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
··· 34 34 } 35 35 36 36 static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, 37 - u32 ptp_clock) 37 + u32 ptp_clock, int gmac4) 38 38 { 39 39 u32 value = readl(ioaddr + PTP_TCR); 40 40 unsigned long data; 41 41 42 - /* Convert the ptp_clock to nano second 43 - * formula = (2/ptp_clock) * 1000000000 44 - * where, ptp_clock = 50MHz. 42 + /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second 43 + * formula = (1/ptp_clock) * 1000000000 44 + * where ptp_clock is 50MHz if fine method is used to update system 45 45 */ 46 - data = (2000000000ULL / ptp_clock); 46 + if (value & PTP_TCR_TSCFUPDT) 47 + data = (1000000000ULL / 50000000); 48 + else 49 + data = (1000000000ULL / ptp_clock); 47 50 48 51 /* 0.465ns accuracy */ 49 52 if (!(value & PTP_TCR_TSCTRLSSR)) 50 53 data = (data * 1000) / 465; 54 + 55 + data &= PTP_SSIR_SSINC_MASK; 56 + 57 + if (gmac4) 58 + data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; 51 59 52 60 writel(data, ioaddr + PTP_SSIR); 53 61 ··· 112 104 } 113 105 114 106 static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 115 - int add_sub) 107 + int add_sub, int gmac4) 116 108 { 117 109 u32 value; 118 110 int limit; 119 111 112 + if (add_sub) { 113 + /* If the new sec value needs to be subtracted with 114 + * the system time, then MAC_STSUR reg should be 115 + * programmed with (2^32 – <new_sec_value>) 116 + */ 117 + if (gmac4) 118 + sec = (100000000ULL - sec); 119 + 120 + value = readl(ioaddr + PTP_TCR); 121 + if (value & PTP_TCR_TSCTRLSSR) 122 + nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); 123 + else 124 + nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); 125 + } 126 + 120 127 writel(sec, ioaddr + PTP_STSUR); 121 - writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), 122 - ioaddr + PTP_STNSUR); 128 + value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; 129 + writel(value, ioaddr + PTP_STNSUR); 130 + 123 131 /* issue command to initialize the system time value */ 124 132 value = readl(ioaddr + PTP_TCR); 125 133 value |= PTP_TCR_TSUPDT; ··· 158 134 { 159 135 u64 ns; 160 136 137 + /* Get the TSSS value */ 161 138 ns = readl(ioaddr + PTP_STNSR); 162 - /* convert sec time value to nanosecond */ 139 + /* Get the TSS and convert sec time value to nanosecond */ 163 140 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; 164 141 165 142 return ns;
+57 -47
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 340 340 341 341 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 342 342 * @priv: driver private structure 343 - * @entry : descriptor index to be used. 343 + * @p : descriptor pointer 344 344 * @skb : the socket buffer 345 345 * Description : 346 346 * This function will read timestamp from the descriptor & pass it to stack. 347 347 * and also perform some sanity checks. 348 348 */ 349 349 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 350 - unsigned int entry, struct sk_buff *skb) 350 + struct dma_desc *p, struct sk_buff *skb) 351 351 { 352 352 struct skb_shared_hwtstamps shhwtstamp; 353 353 u64 ns; 354 - void *desc = NULL; 355 354 356 355 if (!priv->hwts_tx_en) 357 356 return; ··· 359 360 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 360 361 return; 361 362 362 - if (priv->adv_ts) 363 - desc = (priv->dma_etx + entry); 364 - else 365 - desc = (priv->dma_tx + entry); 366 - 367 363 /* check tx tstamp status */ 368 - if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 369 - return; 364 + if (!priv->hw->desc->get_tx_timestamp_status(p)) { 365 + /* get the valid tstamp */ 366 + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 370 367 371 - /* get the valid tstamp */ 372 - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 368 + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 369 + shhwtstamp.hwtstamp = ns_to_ktime(ns); 373 370 374 - memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 375 - shhwtstamp.hwtstamp = ns_to_ktime(ns); 376 - /* pass tstamp to stack */ 377 - skb_tstamp_tx(skb, &shhwtstamp); 371 + netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); 372 + /* pass tstamp to stack */ 373 + skb_tstamp_tx(skb, &shhwtstamp); 374 + } 378 375 379 376 return; 380 377 } 381 378 382 379 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 383 380 * @priv: driver private structure 384 - * @entry : descriptor index to be used. 381 + * @p : descriptor pointer 382 + * @np : next descriptor pointer 385 383 * @skb : the socket buffer 386 384 * Description : 387 385 * This function will read received packet's timestamp from the descriptor 388 386 * and pass it to stack. It also perform some sanity checks. 389 387 */ 390 - static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 391 - unsigned int entry, struct sk_buff *skb) 388 + static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 389 + struct dma_desc *np, struct sk_buff *skb) 392 390 { 393 391 struct skb_shared_hwtstamps *shhwtstamp = NULL; 394 392 u64 ns; 395 - void *desc = NULL; 396 393 397 394 if (!priv->hwts_rx_en) 398 395 return; 399 396 400 - if (priv->adv_ts) 401 - desc = (priv->dma_erx + entry); 402 - else 403 - desc = (priv->dma_rx + entry); 397 + /* Check if timestamp is available */ 398 + if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 399 + /* For GMAC4, the valid timestamp is from CTX next desc. */ 400 + if (priv->plat->has_gmac4) 401 + ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); 402 + else 403 + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 404 404 405 - /* exit if rx tstamp is not valid */ 406 - if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 407 - return; 408 - 409 - /* get valid tstamp */ 410 - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 411 - shhwtstamp = skb_hwtstamps(skb); 412 - memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 413 - shhwtstamp->hwtstamp = ns_to_ktime(ns); 405 + netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); 406 + shhwtstamp = skb_hwtstamps(skb); 407 + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 408 + shhwtstamp->hwtstamp = ns_to_ktime(ns); 409 + } else { 410 + netdev_err(priv->dev, "cannot get RX hw timestamp\n"); 411 + } 414 412 } 415 413 416 414 /** ··· 594 598 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 595 599 596 600 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 597 - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 601 + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); 598 602 else { 599 603 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 600 604 tstamp_all | ptp_v2 | ptp_over_ethernet | 601 605 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 602 606 ts_master_en | snap_type_sel); 603 - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 607 + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); 604 608 605 609 /* program Sub Second Increment reg */ 606 610 sec_inc = priv->hw->ptp->config_sub_second_increment( 607 - priv->ioaddr, priv->clk_ptp_rate); 611 + priv->ptpaddr, priv->clk_ptp_rate, 612 + priv->plat->has_gmac4); 608 613 temp = div_u64(1000000000ULL, sec_inc); 609 614 610 615 /* calculate default added value: ··· 615 618 */ 616 619 temp = (u64)(temp << 32); 617 620 priv->default_addend = div_u64(temp, priv->clk_ptp_rate); 618 - priv->hw->ptp->config_addend(priv->ioaddr, 621 + priv->hw->ptp->config_addend(priv->ptpaddr, 619 622 priv->default_addend); 620 623 621 624 /* initialize system time */ 622 625 ktime_get_real_ts64(&now); 623 626 624 627 /* lower 32 bits of tv_sec are safe until y2106 */ 625 - priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, 628 + priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, 626 629 now.tv_nsec); 627 630 } 628 631 ··· 876 879 phy_disconnect(phydev); 877 880 return -ENODEV; 878 881 } 882 + 883 + /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid 884 + * subsequent PHY polling, make sure we force a link transition if 885 + * we have a UP/DOWN/UP transition 886 + */ 887 + if (phydev->is_pseudo_fixed_link) 888 + phydev->irq = PHY_POLL; 879 889 880 890 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 881 891 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); ··· 1337 1333 priv->dev->stats.tx_packets++; 1338 1334 priv->xstats.tx_pkt_n++; 1339 1335 } 1340 - stmmac_get_tx_hwtstamp(priv, entry, skb); 1336 + stmmac_get_tx_hwtstamp(priv, p, skb); 1341 1337 } 1342 1338 1343 1339 if (likely(priv->tx_skbuff_dma[entry].buf)) { ··· 1483 1479 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1484 1480 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1485 1481 1486 - if (priv->synopsys_id >= DWMAC_CORE_4_00) 1482 + if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1483 + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; 1487 1484 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; 1488 - else 1485 + } else { 1486 + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; 1489 1487 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; 1488 + } 1490 1489 1491 1490 dwmac_mmc_intr_all_mask(priv->mmcaddr); 1492 1491 ··· 2484 2477 if (netif_msg_rx_status(priv)) { 2485 2478 void *rx_head; 2486 2479 2487 - pr_debug("%s: descriptor ring:\n", __func__); 2480 + pr_info(">>>>>> %s: descriptor ring:\n", __func__); 2488 2481 if (priv->extend_desc) 2489 2482 rx_head = (void *)priv->dma_erx; 2490 2483 else ··· 2495 2488 while (count < limit) { 2496 2489 int status; 2497 2490 struct dma_desc *p; 2491 + struct dma_desc *np; 2498 2492 2499 2493 if (priv->extend_desc) 2500 2494 p = (struct dma_desc *)(priv->dma_erx + entry); ··· 2515 2507 next_entry = priv->cur_rx; 2516 2508 2517 2509 if (priv->extend_desc) 2518 - prefetch(priv->dma_erx + next_entry); 2510 + np = (struct dma_desc *)(priv->dma_erx + next_entry); 2519 2511 else 2520 - prefetch(priv->dma_rx + next_entry); 2512 + np = priv->dma_rx + next_entry; 2513 + 2514 + prefetch(np); 2521 2515 2522 2516 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2523 2517 priv->hw->desc->rx_extended_status(&priv->dev->stats, ··· 2571 2561 frame_len -= ETH_FCS_LEN; 2572 2562 2573 2563 if (netif_msg_rx_status(priv)) { 2574 - pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2564 + pr_info("\tdesc: %p [entry %d] buff=0x%x\n", 2575 2565 p, entry, des); 2576 2566 if (frame_len > ETH_FRAME_LEN) 2577 2567 pr_debug("\tframe size %d, COE: %d\n", ··· 2628 2618 DMA_FROM_DEVICE); 2629 2619 } 2630 2620 2631 - stmmac_get_rx_hwtstamp(priv, entry, skb); 2632 - 2633 2621 if (netif_msg_pktdata(priv)) { 2634 2622 pr_debug("frame received (%dbytes)", frame_len); 2635 2623 print_pkt(skb->data, frame_len); 2636 2624 } 2625 + 2626 + stmmac_get_rx_hwtstamp(priv, p, np, skb); 2637 2627 2638 2628 stmmac_rx_vlan(priv->dev, skb); 2639 2629
+5 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
··· 54 54 55 55 spin_lock_irqsave(&priv->ptp_lock, flags); 56 56 57 - priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 + priv->hw->ptp->config_addend(priv->ptpaddr, addend); 58 58 59 59 spin_unlock_irqrestore(&priv->ptp_lock, flags); 60 60 ··· 89 89 90 90 spin_lock_irqsave(&priv->ptp_lock, flags); 91 91 92 - priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 + priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, 93 + priv->plat->has_gmac4); 93 94 94 95 spin_unlock_irqrestore(&priv->ptp_lock, flags); 95 96 ··· 115 114 116 115 spin_lock_irqsave(&priv->ptp_lock, flags); 117 116 118 - ns = priv->hw->ptp->get_systime(priv->ioaddr); 117 + ns = priv->hw->ptp->get_systime(priv->ptpaddr); 119 118 120 119 spin_unlock_irqrestore(&priv->ptp_lock, flags); 121 120 ··· 142 141 143 142 spin_lock_irqsave(&priv->ptp_lock, flags); 144 143 145 - priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); 144 + priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); 146 145 147 146 spin_unlock_irqrestore(&priv->ptp_lock, flags); 148 147
+37 -35
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
··· 22 22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> 23 23 ******************************************************************************/ 24 24 25 - #ifndef __STMMAC_PTP_H__ 26 - #define __STMMAC_PTP_H__ 25 + #ifndef __STMMAC_PTP_H__ 26 + #define __STMMAC_PTP_H__ 27 + 28 + #define PTP_GMAC4_OFFSET 0xb00 29 + #define PTP_GMAC3_X_OFFSET 0x700 27 30 28 31 /* IEEE 1588 PTP register offsets */ 29 - #define PTP_TCR 0x0700 /* Timestamp Control Reg */ 30 - #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ 31 - #define PTP_STSR 0x0708 /* System Time – Seconds Regr */ 32 - #define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ 33 - #define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ 34 - #define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ 35 - #define PTP_TAR 0x0718 /* Timestamp Addend Reg */ 36 - #define PTP_TTSR 0x071C /* Target Time Seconds Reg */ 37 - #define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ 38 - #define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ 39 - #define PTP_TSR 0x0728 /* Timestamp Status */ 32 + #define PTP_TCR 0x00 /* Timestamp Control Reg */ 33 + #define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ 34 + #define PTP_STSR 0x08 /* System Time – Seconds Regr */ 35 + #define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ 36 + #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ 37 + #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ 38 + #define PTP_TAR 0x18 /* Timestamp Addend Reg */ 40 39 41 - #define PTP_STNSUR_ADDSUB_SHIFT 31 40 + #define PTP_STNSUR_ADDSUB_SHIFT 31 41 + #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ 42 + #define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ 42 43 43 - /* PTP TCR defines */ 44 - #define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ 45 - #define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ 46 - #define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ 47 - #define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ 48 - /* Timestamp Interrupt Trigger Enable */ 49 - #define PTP_TCR_TSTRIG 0x00000010 50 - #define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ 51 - #define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ 52 - /* Timestamp Digital or Binary Rollover Control */ 53 - #define PTP_TCR_TSCTRLSSR 0x00000200 54 - 44 + /* PTP Timestamp control register defines */ 45 + #define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ 46 + #define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ 47 + #define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ 48 + #define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ 49 + #define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ 50 + #define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ 51 + #define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ 52 + #define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ 55 53 /* Enable PTP packet Processing for Version 2 Format */ 56 - #define PTP_TCR_TSVER2ENA 0x00000400 54 + #define PTP_TCR_TSVER2ENA BIT(10) 57 55 /* Enable Processing of PTP over Ethernet Frames */ 58 - #define PTP_TCR_TSIPENA 0x00000800 56 + #define PTP_TCR_TSIPENA BIT(11) 59 57 /* Enable Processing of PTP Frames Sent over IPv6-UDP */ 60 - #define PTP_TCR_TSIPV6ENA 0x00001000 58 + #define PTP_TCR_TSIPV6ENA BIT(12) 61 59 /* Enable Processing of PTP Frames Sent over IPv4-UDP */ 62 - #define PTP_TCR_TSIPV4ENA 0x00002000 60 + #define PTP_TCR_TSIPV4ENA BIT(13) 63 61 /* Enable Timestamp Snapshot for Event Messages */ 64 - #define PTP_TCR_TSEVNTENA 0x00004000 62 + #define PTP_TCR_TSEVNTENA BIT(14) 65 63 /* Enable Snapshot for Messages Relevant to Master */ 66 - #define PTP_TCR_TSMSTRENA 0x00008000 64 + #define PTP_TCR_TSMSTRENA BIT(15) 67 65 /* Select PTP packets for Taking Snapshots */ 68 - #define PTP_TCR_SNAPTYPSEL_1 0x00010000 66 + #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 69 67 /* Enable MAC address for PTP Frame Filtering */ 70 - #define PTP_TCR_TSENMACADDR 0x00040000 68 + #define PTP_TCR_TSENMACADDR BIT(18) 71 69 72 - #endif /* __STMMAC_PTP_H__ */ 70 + /* SSIR defines */ 71 + #define PTP_SSIR_SSINC_MASK 0xff 72 + #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 73 + 74 + #endif /* __STMMAC_PTP_H__ */
+3 -2
drivers/net/ethernet/sun/sunbmac.c
··· 623 623 void __iomem *gregs = bp->gregs; 624 624 void __iomem *cregs = bp->creg; 625 625 void __iomem *bregs = bp->bregs; 626 + __u32 bblk_dvma = (__u32)bp->bblock_dvma; 626 627 unsigned char *e = &bp->dev->dev_addr[0]; 627 628 628 629 /* Latch current counters into statistics. */ ··· 672 671 bregs + BMAC_XIFCFG); 673 672 674 673 /* Tell the QEC where the ring descriptors are. */ 675 - sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), 674 + sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), 676 675 cregs + CREG_RXDS); 677 - sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), 676 + sbus_writel(bblk_dvma + bib_offset(be_txd, 0), 678 677 cregs + CREG_TXDS); 679 678 680 679 /* Setup the FIFO pointers into QEC local memory. */
+1 -1
drivers/net/ethernet/sun/sunbmac.h
··· 291 291 void __iomem *bregs; /* BigMAC Registers */ 292 292 void __iomem *tregs; /* BigMAC Transceiver */ 293 293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */ 294 - __u32 bblock_dvma; /* RX and TX descriptors */ 294 + dma_addr_t bblock_dvma; /* RX and TX descriptors */ 295 295 296 296 spinlock_t lock; 297 297
+6 -5
drivers/net/ethernet/sun/sunqe.c
··· 124 124 { 125 125 struct qe_init_block *qb = qep->qe_block; 126 126 struct sunqe_buffers *qbufs = qep->buffers; 127 - __u32 qbufs_dvma = qep->buffers_dvma; 127 + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; 128 128 int i; 129 129 130 130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; ··· 144 144 void __iomem *mregs = qep->mregs; 145 145 void __iomem *gregs = qecp->gregs; 146 146 unsigned char *e = &qep->dev->dev_addr[0]; 147 + __u32 qblk_dvma = (__u32)qep->qblock_dvma; 147 148 u32 tmp; 148 149 int i; 149 150 ··· 153 152 return -EAGAIN; 154 153 155 154 /* Setup initial rx/tx init block pointers. */ 156 - sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 157 - sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 155 + sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 156 + sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 158 157 159 158 /* Enable/mask the various irq's. */ 160 159 sbus_writel(0, cregs + CREG_RIMASK); ··· 414 413 struct net_device *dev = qep->dev; 415 414 struct qe_rxd *this; 416 415 struct sunqe_buffers *qbufs = qep->buffers; 417 - __u32 qbufs_dvma = qep->buffers_dvma; 416 + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; 418 417 int elem = qep->rx_new; 419 418 u32 flags; 420 419 ··· 573 572 { 574 573 struct sunqe *qep = netdev_priv(dev); 575 574 struct sunqe_buffers *qbufs = qep->buffers; 576 - __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 575 + __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; 577 576 unsigned char *txbuf; 578 577 int len, entry; 579 578
+2 -2
drivers/net/ethernet/sun/sunqe.h
··· 334 334 void __iomem *qcregs; /* QEC per-channel Registers */ 335 335 void __iomem *mregs; /* Per-channel MACE Registers */ 336 336 struct qe_init_block *qe_block; /* RX and TX descriptors */ 337 - __u32 qblock_dvma; /* RX and TX descriptors */ 337 + dma_addr_t qblock_dvma; /* RX and TX descriptors */ 338 338 spinlock_t lock; /* Protects txfull state */ 339 339 int rx_new, rx_old; /* RX ring extents */ 340 340 int tx_new, tx_old; /* TX ring extents */ 341 341 struct sunqe_buffers *buffers; /* CPU visible address. */ 342 - __u32 buffers_dvma; /* DVMA visible address. */ 342 + dma_addr_t buffers_dvma; /* DVMA visible address. */ 343 343 struct sunqec *parent; 344 344 u8 mconfig; /* Base MACE mconfig value */ 345 345 struct platform_device *op; /* QE's OF device struct */
+3
drivers/net/ethernet/ti/cpsw-phy-sel.c
··· 176 176 } 177 177 178 178 dev = bus_find_device(&platform_bus_type, NULL, node, match); 179 + of_node_put(node); 179 180 priv = dev_get_drvdata(dev); 180 181 181 182 priv->cpsw_phy_sel(priv, phy_mode, slave); 183 + 184 + put_device(dev); 182 185 } 183 186 EXPORT_SYMBOL_GPL(cpsw_phy_sel); 184 187
+74 -21
drivers/net/ethernet/ti/cpsw.c
··· 2375 2375 * to the PHY is the Ethernet MAC DT node. 2376 2376 */ 2377 2377 ret = of_phy_register_fixed_link(slave_node); 2378 - if (ret) 2378 + if (ret) { 2379 + if (ret != -EPROBE_DEFER) 2380 + dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); 2379 2381 return ret; 2382 + } 2380 2383 slave_data->phy_node = of_node_get(slave_node); 2381 2384 } else if (parp) { 2382 2385 u32 phyid; ··· 2400 2397 } 2401 2398 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2402 2399 PHY_ID_FMT, mdio->name, phyid); 2400 + put_device(&mdio->dev); 2403 2401 } else { 2404 2402 dev_err(&pdev->dev, 2405 2403 "No slave[%d] phy_id, phy-handle, or fixed-link property\n", ··· 2442 2438 } 2443 2439 2444 2440 return 0; 2441 + } 2442 + 2443 + static void cpsw_remove_dt(struct platform_device *pdev) 2444 + { 2445 + struct net_device *ndev = platform_get_drvdata(pdev); 2446 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2447 + struct cpsw_platform_data *data = &cpsw->data; 2448 + struct device_node *node = pdev->dev.of_node; 2449 + struct device_node *slave_node; 2450 + int i = 0; 2451 + 2452 + for_each_available_child_of_node(node, slave_node) { 2453 + struct cpsw_slave_data *slave_data = &data->slave_data[i]; 2454 + 2455 + if (strcmp(slave_node->name, "slave")) 2456 + continue; 2457 + 2458 + if (of_phy_is_fixed_link(slave_node)) { 2459 + struct phy_device *phydev; 2460 + 2461 + phydev = of_phy_find_device(slave_node); 2462 + if (phydev) { 2463 + fixed_phy_unregister(phydev); 2464 + /* Put references taken by 2465 + * of_phy_find_device() and 2466 + * of_phy_register_fixed_link(). 2467 + */ 2468 + phy_device_free(phydev); 2469 + phy_device_free(phydev); 2470 + } 2471 + } 2472 + 2473 + of_node_put(slave_data->phy_node); 2474 + 2475 + i++; 2476 + if (i == data->slaves) 2477 + break; 2478 + } 2479 + 2480 + of_platform_depopulate(&pdev->dev); 2445 2481 } 2446 2482 2447 2483 static int cpsw_probe_dual_emac(struct cpsw_priv *priv) ··· 2591 2547 int irq; 2592 2548 2593 2549 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); 2550 + if (!cpsw) 2551 + return -ENOMEM; 2552 + 2594 2553 cpsw->dev = &pdev->dev; 2595 2554 2596 2555 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); ··· 2631 2584 /* Select default pin state */ 2632 2585 pinctrl_pm_select_default_state(&pdev->dev); 2633 2586 2634 - if (cpsw_probe_dt(&cpsw->data, pdev)) { 2635 - dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2636 - ret = -ENODEV; 2587 + /* Need to enable clocks with runtime PM api to access module 2588 + * registers 2589 + */ 2590 + ret = pm_runtime_get_sync(&pdev->dev); 2591 + if (ret < 0) { 2592 + pm_runtime_put_noidle(&pdev->dev); 2637 2593 goto clean_runtime_disable_ret; 2638 2594 } 2595 + 2596 + ret = cpsw_probe_dt(&cpsw->data, pdev); 2597 + if (ret) 2598 + goto clean_dt_ret; 2599 + 2639 2600 data = &cpsw->data; 2640 2601 cpsw->rx_ch_num = 1; 2641 2602 cpsw->tx_ch_num = 1; ··· 2663 2608 GFP_KERNEL); 2664 2609 if (!cpsw->slaves) { 2665 2610 ret = -ENOMEM; 2666 - goto clean_runtime_disable_ret; 2611 + goto clean_dt_ret; 2667 2612 } 2668 2613 for (i = 0; i < data->slaves; i++) 2669 2614 cpsw->slaves[i].slave_num = i; ··· 2675 2620 if (IS_ERR(clk)) { 2676 2621 dev_err(priv->dev, "fck is not found\n"); 2677 2622 ret = -ENODEV; 2678 - goto clean_runtime_disable_ret; 2623 + goto clean_dt_ret; 2679 2624 } 2680 2625 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 2681 2626 ··· 2683 2628 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); 2684 2629 if (IS_ERR(ss_regs)) { 2685 2630 ret = PTR_ERR(ss_regs); 2686 - goto clean_runtime_disable_ret; 2631 + goto clean_dt_ret; 2687 2632 } 2688 2633 cpsw->regs = ss_regs; 2689 2634 2690 - /* Need to enable clocks with runtime PM api to access module 2691 - * registers 2692 - */ 2693 - ret = pm_runtime_get_sync(&pdev->dev); 2694 - if (ret < 0) { 2695 - pm_runtime_put_noidle(&pdev->dev); 2696 - goto clean_runtime_disable_ret; 2697 - } 2698 2635 cpsw->version = readl(&cpsw->regs->id_ver); 2699 - pm_runtime_put_sync(&pdev->dev); 2700 2636 2701 2637 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2702 2638 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2703 2639 if (IS_ERR(cpsw->wr_regs)) { 2704 2640 ret = PTR_ERR(cpsw->wr_regs); 2705 - goto clean_runtime_disable_ret; 2641 + goto clean_dt_ret; 2706 2642 } 2707 2643 2708 2644 memset(&dma_params, 0, sizeof(dma_params)); ··· 2730 2684 default: 2731 2685 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); 2732 2686 ret = -ENODEV; 2733 - goto clean_runtime_disable_ret; 2687 + goto clean_dt_ret; 2734 2688 } 2735 2689 for (i = 0; i < cpsw->data.slaves; i++) { 2736 2690 struct cpsw_slave *slave = &cpsw->slaves[i]; ··· 2759 2713 if (!cpsw->dma) { 2760 2714 dev_err(priv->dev, "error initializing dma\n"); 2761 2715 ret = -ENOMEM; 2762 - goto clean_runtime_disable_ret; 2716 + goto clean_dt_ret; 2763 2717 } 2764 2718 2765 2719 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); ··· 2857 2811 ret = cpsw_probe_dual_emac(priv); 2858 2812 if (ret) { 2859 2813 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2860 - goto clean_ale_ret; 2814 + goto clean_unregister_netdev_ret; 2861 2815 } 2862 2816 } 2863 2817 2818 + pm_runtime_put(&pdev->dev); 2819 + 2864 2820 return 0; 2865 2821 2822 + clean_unregister_netdev_ret: 2823 + unregister_netdev(ndev); 2866 2824 clean_ale_ret: 2867 2825 cpsw_ale_destroy(cpsw->ale); 2868 2826 clean_dma_ret: 2869 2827 cpdma_ctlr_destroy(cpsw->dma); 2828 + clean_dt_ret: 2829 + cpsw_remove_dt(pdev); 2830 + pm_runtime_put_sync(&pdev->dev); 2870 2831 clean_runtime_disable_ret: 2871 2832 pm_runtime_disable(&pdev->dev); 2872 2833 clean_ndev_ret: ··· 2899 2846 2900 2847 cpsw_ale_destroy(cpsw->ale); 2901 2848 cpdma_ctlr_destroy(cpsw->dma); 2902 - of_platform_depopulate(&pdev->dev); 2849 + cpsw_remove_dt(pdev); 2903 2850 pm_runtime_put_sync(&pdev->dev); 2904 2851 pm_runtime_disable(&pdev->dev); 2905 2852 if (cpsw->data.dual_emac)
+6 -4
drivers/net/ethernet/ti/davinci_emac.c
··· 1410 1410 int i = 0; 1411 1411 struct emac_priv *priv = netdev_priv(ndev); 1412 1412 struct phy_device *phydev = NULL; 1413 + struct device *phy = NULL; 1413 1414 1414 1415 ret = pm_runtime_get_sync(&priv->pdev->dev); 1415 1416 if (ret < 0) { ··· 1489 1488 1490 1489 /* use the first phy on the bus if pdata did not give us a phy id */ 1491 1490 if (!phydev && !priv->phy_id) { 1492 - struct device *phy; 1493 - 1494 1491 phy = bus_find_device(&mdio_bus_type, NULL, NULL, 1495 1492 match_first_device); 1496 - if (phy) 1493 + if (phy) { 1497 1494 priv->phy_id = dev_name(phy); 1495 + if (!priv->phy_id || !*priv->phy_id) 1496 + put_device(phy); 1497 + } 1498 1498 } 1499 1499 1500 1500 if (!phydev && priv->phy_id && *priv->phy_id) { 1501 1501 phydev = phy_connect(ndev, priv->phy_id, 1502 1502 &emac_adjust_link, 1503 1503 PHY_INTERFACE_MODE_MII); 1504 - 1504 + put_device(phy); /* reference taken by bus_find_device */ 1505 1505 if (IS_ERR(phydev)) { 1506 1506 dev_err(emac_dev, "could not connect to phy %s\n", 1507 1507 priv->phy_id);
+1 -1
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
··· 1694 1694 pr_debug("%s: bssid matched\n", __func__); 1695 1695 break; 1696 1696 } else { 1697 - pr_debug("%s: bssid unmached\n", __func__); 1697 + pr_debug("%s: bssid unmatched\n", __func__); 1698 1698 continue; 1699 1699 } 1700 1700 }
+1 -2
drivers/net/ethernet/xscale/ixp4xx_eth.c
··· 708 708 if (!qmgr_stat_below_low_watermark(rxq) && 709 709 napi_reschedule(napi)) { /* not empty again */ 710 710 #if DEBUG_RX 711 - printk(KERN_DEBUG "%s: eth_poll" 712 - " napi_reschedule successed\n", 711 + printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n", 713 712 dev->name); 714 713 #endif 715 714 qmgr_disable_irq(rxq);
+22 -9
drivers/net/macvlan.c
··· 1278 1278 struct net_device *lowerdev; 1279 1279 int err; 1280 1280 int macmode; 1281 + bool create = false; 1281 1282 1282 1283 if (!tb[IFLA_LINK]) 1283 1284 return -EINVAL; ··· 1305 1304 err = macvlan_port_create(lowerdev); 1306 1305 if (err < 0) 1307 1306 return err; 1307 + create = true; 1308 1308 } 1309 1309 port = macvlan_port_get_rtnl(lowerdev); 1310 1310 1311 1311 /* Only 1 macvlan device can be created in passthru mode */ 1312 - if (port->passthru) 1313 - return -EINVAL; 1312 + if (port->passthru) { 1313 + /* The macvlan port must be not created this time, 1314 + * still goto destroy_macvlan_port for readability. 1315 + */ 1316 + err = -EINVAL; 1317 + goto destroy_macvlan_port; 1318 + } 1314 1319 1315 1320 vlan->lowerdev = lowerdev; 1316 1321 vlan->dev = dev; ··· 1332 1325 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1333 1326 1334 1327 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1335 - if (port->count) 1336 - return -EINVAL; 1328 + if (port->count) { 1329 + err = -EINVAL; 1330 + goto destroy_macvlan_port; 1331 + } 1337 1332 port->passthru = true; 1338 1333 eth_hw_addr_inherit(dev, lowerdev); 1339 1334 } 1340 1335 1341 1336 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { 1342 - if (vlan->mode != MACVLAN_MODE_SOURCE) 1343 - return -EINVAL; 1337 + if (vlan->mode != MACVLAN_MODE_SOURCE) { 1338 + err = -EINVAL; 1339 + goto destroy_macvlan_port; 1340 + } 1344 1341 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); 1345 1342 err = macvlan_changelink_sources(vlan, macmode, data); 1346 1343 if (err) 1347 - return err; 1344 + goto destroy_macvlan_port; 1348 1345 } 1349 1346 1350 1347 err = register_netdevice(dev); 1351 1348 if (err < 0) 1352 - return err; 1349 + goto destroy_macvlan_port; 1353 1350 1354 1351 dev->priv_flags |= IFF_MACVLAN; 1355 1352 err = netdev_upper_dev_link(lowerdev, dev); ··· 1368 1357 1369 1358 unregister_netdev: 1370 1359 unregister_netdevice(dev); 1371 - 1360 + destroy_macvlan_port: 1361 + if (create) 1362 + macvlan_port_destroy(port->dev); 1372 1363 return err; 1373 1364 } 1374 1365 EXPORT_SYMBOL_GPL(macvlan_common_newlink);
+1 -1
drivers/net/phy/fixed_phy.c
··· 279 279 void fixed_phy_unregister(struct phy_device *phy) 280 280 { 281 281 phy_device_remove(phy); 282 - 282 + of_node_put(phy->mdio.dev.of_node); 283 283 fixed_phy_del(phy->mdio.addr); 284 284 } 285 285 EXPORT_SYMBOL_GPL(fixed_phy_unregister);
+2
drivers/net/phy/phy_device.c
··· 723 723 phydev = to_phy_device(d); 724 724 725 725 rc = phy_connect_direct(dev, phydev, handler, interface); 726 + put_device(d); 726 727 if (rc) 727 728 return ERR_PTR(rc); 728 729 ··· 954 953 phydev = to_phy_device(d); 955 954 956 955 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); 956 + put_device(d); 957 957 if (rc) 958 958 return ERR_PTR(rc); 959 959
+33 -1
drivers/net/phy/vitesse.c
··· 62 62 /* Vitesse Extended Page Access Register */ 63 63 #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f 64 64 65 + /* Vitesse VSC8601 Extended PHY Control Register 1 */ 66 + #define MII_VSC8601_EPHY_CTL 0x17 67 + #define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8) 68 + 65 69 #define PHY_ID_VSC8234 0x000fc620 66 70 #define PHY_ID_VSC8244 0x000fc6c0 67 71 #define PHY_ID_VSC8514 0x00070670 ··· 113 109 err = vsc824x_add_skew(phydev); 114 110 115 111 return err; 112 + } 113 + 114 + /* This adds a skew for both TX and RX clocks, so the skew should only be 115 + * applied to "rgmii-id" interfaces. It may not work as expected 116 + * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ 117 + static int vsc8601_add_skew(struct phy_device *phydev) 118 + { 119 + int ret; 120 + 121 + ret = phy_read(phydev, MII_VSC8601_EPHY_CTL); 122 + if (ret < 0) 123 + return ret; 124 + 125 + ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW; 126 + return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret); 127 + } 128 + 129 + static int vsc8601_config_init(struct phy_device *phydev) 130 + { 131 + int ret = 0; 132 + 133 + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) 134 + ret = vsc8601_add_skew(phydev); 135 + 136 + if (ret < 0) 137 + return ret; 138 + 139 + return genphy_config_init(phydev); 116 140 } 117 141 118 142 static int vsc824x_ack_interrupt(struct phy_device *phydev) ··· 307 275 .phy_id_mask = 0x000ffff0, 308 276 .features = PHY_GBIT_FEATURES, 309 277 .flags = PHY_HAS_INTERRUPT, 310 - .config_init = &genphy_config_init, 278 + .config_init = &vsc8601_config_init, 311 279 .config_aneg = &genphy_config_aneg, 312 280 .read_status = &genphy_read_status, 313 281 .ack_interrupt = &vsc824x_ack_interrupt,
+17
drivers/net/usb/ax88179_178a.c
··· 1656 1656 .tx_fixup = ax88179_tx_fixup, 1657 1657 }; 1658 1658 1659 + static const struct driver_info cypress_GX3_info = { 1660 + .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller", 1661 + .bind = ax88179_bind, 1662 + .unbind = ax88179_unbind, 1663 + .status = ax88179_status, 1664 + .link_reset = ax88179_link_reset, 1665 + .reset = ax88179_reset, 1666 + .stop = ax88179_stop, 1667 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1668 + .rx_fixup = ax88179_rx_fixup, 1669 + .tx_fixup = ax88179_tx_fixup, 1670 + }; 1671 + 1659 1672 static const struct driver_info dlink_dub1312_info = { 1660 1673 .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", 1661 1674 .bind = ax88179_bind, ··· 1730 1717 /* ASIX AX88178A 10/100/1000 */ 1731 1718 USB_DEVICE(0x0b95, 0x178a), 1732 1719 .driver_info = (unsigned long)&ax88178a_info, 1720 + }, { 1721 + /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ 1722 + USB_DEVICE(0x04b4, 0x3610), 1723 + .driver_info = (unsigned long)&cypress_GX3_info, 1733 1724 }, { 1734 1725 /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ 1735 1726 USB_DEVICE(0x2001, 0x4a00),
+12 -9
drivers/net/usb/r8152.c
··· 1730 1730 u8 checksum = CHECKSUM_NONE; 1731 1731 u32 opts2, opts3; 1732 1732 1733 - if (tp->version == RTL_VER_01) 1733 + if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) 1734 1734 goto return_result; 1735 1735 1736 1736 opts2 = le32_to_cpu(rx_desc->opts2); ··· 1745 1745 checksum = CHECKSUM_NONE; 1746 1746 else 1747 1747 checksum = CHECKSUM_UNNECESSARY; 1748 - } else if (RD_IPV6_CS) { 1748 + } else if (opts2 & RD_IPV6_CS) { 1749 1749 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) 1750 1750 checksum = CHECKSUM_UNNECESSARY; 1751 1751 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) ··· 3266 3266 goto out; 3267 3267 3268 3268 res = usb_autopm_get_interface(tp->intf); 3269 - if (res < 0) { 3270 - free_all_mem(tp); 3271 - goto out; 3272 - } 3269 + if (res < 0) 3270 + goto out_free; 3273 3271 3274 3272 mutex_lock(&tp->control); 3275 3273 ··· 3283 3285 netif_device_detach(tp->netdev); 3284 3286 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 3285 3287 res); 3286 - free_all_mem(tp); 3287 - } else { 3288 - napi_enable(&tp->napi); 3288 + goto out_unlock; 3289 3289 } 3290 + napi_enable(&tp->napi); 3290 3291 3291 3292 mutex_unlock(&tp->control); 3292 3293 ··· 3294 3297 tp->pm_notifier.notifier_call = rtl_notifier; 3295 3298 register_pm_notifier(&tp->pm_notifier); 3296 3299 #endif 3300 + return 0; 3297 3301 3302 + out_unlock: 3303 + mutex_unlock(&tp->control); 3304 + usb_autopm_put_interface(tp->intf); 3305 + out_free: 3306 + free_all_mem(tp); 3298 3307 out: 3299 3308 return res; 3300 3309 }
+25 -10
drivers/net/virtio_net.c
··· 1497 1497 netif_napi_del(&vi->rq[i].napi); 1498 1498 } 1499 1499 1500 + /* We called napi_hash_del() before netif_napi_del(), 1501 + * we need to respect an RCU grace period before freeing vi->rq 1502 + */ 1503 + synchronize_net(); 1504 + 1500 1505 kfree(vi->rq); 1501 1506 kfree(vi->sq); 1502 1507 } ··· 2043 2038 { 0 }, 2044 2039 }; 2045 2040 2041 + #define VIRTNET_FEATURES \ 2042 + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 2043 + VIRTIO_NET_F_MAC, \ 2044 + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 2045 + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 2046 + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 2047 + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 2048 + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 2049 + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 2050 + VIRTIO_NET_F_CTRL_MAC_ADDR, \ 2051 + VIRTIO_NET_F_MTU 2052 + 2046 2053 static unsigned int features[] = { 2047 - VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 2048 - VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 2049 - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 2050 - VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 2051 - VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 2052 - VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 2053 - VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 2054 - VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 2055 - VIRTIO_NET_F_CTRL_MAC_ADDR, 2054 + VIRTNET_FEATURES, 2055 + }; 2056 + 2057 + static unsigned int features_legacy[] = { 2058 + VIRTNET_FEATURES, 2059 + VIRTIO_NET_F_GSO, 2056 2060 VIRTIO_F_ANY_LAYOUT, 2057 - VIRTIO_NET_F_MTU, 2058 2061 }; 2059 2062 2060 2063 static struct virtio_driver virtio_net_driver = { 2061 2064 .feature_table = features, 2062 2065 .feature_table_size = ARRAY_SIZE(features), 2066 + .feature_table_legacy = features_legacy, 2067 + .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 2063 2068 .driver.name = KBUILD_MODNAME, 2064 2069 .driver.owner = THIS_MODULE, 2065 2070 .id_table = id_table,
+3 -1
drivers/net/vxlan.c
··· 944 944 { 945 945 struct vxlan_dev *vxlan; 946 946 struct vxlan_sock *sock4; 947 - struct vxlan_sock *sock6 = NULL; 947 + #if IS_ENABLED(CONFIG_IPV6) 948 + struct vxlan_sock *sock6; 949 + #endif 948 950 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 949 951 950 952 sock4 = rtnl_dereference(dev->vn4_sock);
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 4516 4516 /* store current 11d setting */ 4517 4517 if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, 4518 4518 &ifp->vif->is_11d)) { 4519 - supports_11d = false; 4519 + is_11d = supports_11d = false; 4520 4520 } else { 4521 4521 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, 4522 4522 settings->beacon.tail_len,
+38 -11
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 1087 1087 ret = iwl_mvm_switch_to_d3(mvm); 1088 1088 if (ret) 1089 1089 return ret; 1090 + } else { 1091 + /* In theory, we wouldn't have to stop a running sched 1092 + * scan in order to start another one (for 1093 + * net-detect). But in practice this doesn't seem to 1094 + * work properly, so stop any running sched_scan now. 1095 + */ 1096 + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 1097 + if (ret) 1098 + return ret; 1090 1099 } 1091 1100 1092 1101 /* rfkill release can be either for wowlan or netdetect */ ··· 1263 1254 out: 1264 1255 if (ret < 0) { 1265 1256 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1266 - ieee80211_restart_hw(mvm->hw); 1257 + if (mvm->restart_fw > 0) { 1258 + mvm->restart_fw--; 1259 + ieee80211_restart_hw(mvm->hw); 1260 + } 1267 1261 iwl_mvm_free_nd(mvm); 1268 1262 } 1269 1263 out_noreset: ··· 2100 2088 iwl_mvm_update_changed_regdom(mvm); 2101 2089 2102 2090 if (mvm->net_detect) { 2091 + /* If this is a non-unified image, we restart the FW, 2092 + * so no need to stop the netdetect scan. If that 2093 + * fails, continue and try to get the wake-up reasons, 2094 + * but trigger a HW restart by keeping a failure code 2095 + * in ret. 2096 + */ 2097 + if (unified_image) 2098 + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 2099 + false); 2100 + 2103 2101 iwl_mvm_query_netdetect_reasons(mvm, vif); 2104 2102 /* has unlocked the mutex, so skip that */ 2105 2103 goto out; ··· 2293 2271 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2294 2272 { 2295 2273 struct iwl_mvm *mvm = inode->i_private; 2296 - int remaining_time = 10; 2274 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2275 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2297 2276 2298 2277 mvm->d3_test_active = false; 2299 2278 ··· 2305 2282 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2306 2283 2307 2284 iwl_abort_notification_waits(&mvm->notif_wait); 2308 - ieee80211_restart_hw(mvm->hw); 2285 + if (!unified_image) { 2286 + int remaining_time = 10; 2309 2287 2310 - /* wait for restart and disconnect all interfaces */ 2311 - while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2312 - remaining_time > 0) { 2313 - remaining_time--; 2314 - msleep(1000); 2288 + ieee80211_restart_hw(mvm->hw); 2289 + 2290 + /* wait for restart and disconnect all interfaces */ 2291 + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2292 + remaining_time > 0) { 2293 + remaining_time--; 2294 + msleep(1000); 2295 + } 2296 + 2297 + if (remaining_time == 0) 2298 + IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2315 2299 } 2316 - 2317 - if (remaining_time == 0) 2318 - IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); 2319 2300 2320 2301 ieee80211_iterate_active_interfaces_atomic( 2321 2302 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+2 -2
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
··· 1529 1529 .data = { &cmd, }, 1530 1530 .len = { sizeof(cmd) }, 1531 1531 }; 1532 - size_t delta, len; 1533 - ssize_t ret; 1532 + size_t delta; 1533 + ssize_t ret, len; 1534 1534 1535 1535 hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, 1536 1536 DEBUG_GROUP, 0);
+1 -2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 4121 4121 struct iwl_mvm_internal_rxq_notif *notif, 4122 4122 u32 size) 4123 4123 { 4124 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); 4125 4124 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4126 4125 int ret; 4127 4126 ··· 4142 4143 } 4143 4144 4144 4145 if (notif->sync) 4145 - ret = wait_event_timeout(notif_waitq, 4146 + ret = wait_event_timeout(mvm->rx_sync_waitq, 4146 4147 atomic_read(&mvm->queue_sync_counter) == 0, 4147 4148 HZ); 4148 4149 WARN_ON_ONCE(!ret);
+1
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 937 937 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ 938 938 spinlock_t d0i3_tx_lock; 939 939 wait_queue_head_t d0i3_exit_waitq; 940 + wait_queue_head_t rx_sync_waitq; 940 941 941 942 /* BT-Coex */ 942 943 struct iwl_bt_coex_profile_notif last_bt_notif;
+1
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 619 619 spin_lock_init(&mvm->refs_lock); 620 620 skb_queue_head_init(&mvm->d0i3_tx); 621 621 init_waitqueue_head(&mvm->d0i3_exit_waitq); 622 + init_waitqueue_head(&mvm->rx_sync_waitq); 622 623 623 624 atomic_set(&mvm->queue_sync_counter, 0); 624 625
+2 -1
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
··· 547 547 "Received expired RX queue sync message\n"); 548 548 return; 549 549 } 550 - atomic_dec(&mvm->queue_sync_counter); 550 + if (!atomic_dec_return(&mvm->queue_sync_counter)) 551 + wake_up(&mvm->rx_sync_waitq); 551 552 } 552 553 553 554 switch (internal_notif->type) {
+27 -6
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 1199 1199 1200 1200 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) 1201 1201 { 1202 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1203 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1204 + 1202 1205 /* This looks a bit arbitrary, but the idea is that if we run 1203 1206 * out of possible simultaneous scans and the userspace is 1204 1207 * trying to run a scan type that is already running, we ··· 1228 1225 return -EBUSY; 1229 1226 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 1230 1227 case IWL_MVM_SCAN_NETDETECT: 1231 - /* No need to stop anything for net-detect since the 1232 - * firmware is restarted anyway. This way, any sched 1233 - * scans that were running will be restarted when we 1234 - * resume. 1235 - */ 1236 - return 0; 1228 + /* For non-unified images, there's no need to stop 1229 + * anything for net-detect since the firmware is 1230 + * restarted anyway. This way, any sched scans that 1231 + * were running will be restarted when we resume. 1232 + */ 1233 + if (!unified_image) 1234 + return 0; 1235 + 1236 + /* If this is a unified image and we ran out of scans, 1237 + * we need to stop something. Prefer stopping regular 1238 + * scans, because the results are useless at this 1239 + * point, and we should be able to keep running 1240 + * another scheduled scan while suspended. 1241 + */ 1242 + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) 1243 + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, 1244 + true); 1245 + if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) 1246 + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, 1247 + true); 1248 + 1249 + /* fall through, something is wrong if no scan was 1250 + * running but we ran out of scans. 1251 + */ 1237 1252 default: 1238 1253 WARN_ON(1); 1239 1254 break;
+49 -32
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 541 541 MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 542 542 543 543 #ifdef CONFIG_ACPI 544 - #define SPL_METHOD "SPLC" 545 - #define SPL_DOMAINTYPE_MODULE BIT(0) 546 - #define SPL_DOMAINTYPE_WIFI BIT(1) 547 - #define SPL_DOMAINTYPE_WIGIG BIT(2) 548 - #define SPL_DOMAINTYPE_RFEM BIT(3) 544 + #define ACPI_SPLC_METHOD "SPLC" 545 + #define ACPI_SPLC_DOMAIN_WIFI (0x07) 549 546 550 - static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) 547 + static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) 551 548 { 552 - union acpi_object *limits, *domain_type, *power_limit; 549 + union acpi_object *data_pkg, *dflt_pwr_limit; 550 + int i; 553 551 554 - if (splx->type != ACPI_TYPE_PACKAGE || 555 - splx->package.count != 2 || 556 - splx->package.elements[0].type != ACPI_TYPE_INTEGER || 557 - splx->package.elements[0].integer.value != 0) { 558 - IWL_ERR(trans, "Unsupported splx structure\n"); 552 + /* We need at least two elements, one for the revision and one 553 + * for the data itself. Also check that the revision is 554 + * supported (currently only revision 0). 555 + */ 556 + if (splc->type != ACPI_TYPE_PACKAGE || 557 + splc->package.count < 2 || 558 + splc->package.elements[0].type != ACPI_TYPE_INTEGER || 559 + splc->package.elements[0].integer.value != 0) { 560 + IWL_DEBUG_INFO(trans, 561 + "Unsupported structure returned by the SPLC method. Ignoring.\n"); 559 562 return 0; 560 563 } 561 564 562 - limits = &splx->package.elements[1]; 563 - if (limits->type != ACPI_TYPE_PACKAGE || 564 - limits->package.count < 2 || 565 - limits->package.elements[0].type != ACPI_TYPE_INTEGER || 566 - limits->package.elements[1].type != ACPI_TYPE_INTEGER) { 567 - IWL_ERR(trans, "Invalid limits element\n"); 565 + /* loop through all the packages to find the one for WiFi */ 566 + for (i = 1; i < splc->package.count; i++) { 567 + union acpi_object *domain; 568 + 569 + data_pkg = &splc->package.elements[i]; 570 + 571 + /* Skip anything that is not a package with the right 572 + * amount of elements (i.e. at least 2 integers). 573 + */ 574 + if (data_pkg->type != ACPI_TYPE_PACKAGE || 575 + data_pkg->package.count < 2 || 576 + data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 577 + data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 578 + continue; 579 + 580 + domain = &data_pkg->package.elements[0]; 581 + if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) 582 + break; 583 + 584 + data_pkg = NULL; 585 + } 586 + 587 + if (!data_pkg) { 588 + IWL_DEBUG_INFO(trans, 589 + "No element for the WiFi domain returned by the SPLC method.\n"); 568 590 return 0; 569 591 } 570 592 571 - domain_type = &limits->package.elements[0]; 572 - power_limit = &limits->package.elements[1]; 573 - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { 574 - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); 575 - return 0; 576 - } 577 - 578 - return power_limit->integer.value; 593 + dflt_pwr_limit = &data_pkg->package.elements[1]; 594 + return dflt_pwr_limit->integer.value; 579 595 } 580 596 581 597 static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) 582 598 { 583 599 acpi_handle pxsx_handle; 584 600 acpi_handle handle; 585 - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; 601 + struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; 586 602 acpi_status status; 587 603 588 604 pxsx_handle = ACPI_HANDLE(&pdev->dev); ··· 609 593 } 610 594 611 595 /* Get the method's handle */ 612 - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); 596 + status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, 597 + &handle); 613 598 if (ACPI_FAILURE(status)) { 614 - IWL_DEBUG_INFO(trans, "SPL method not found\n"); 599 + IWL_DEBUG_INFO(trans, "SPLC method not found\n"); 615 600 return; 616 601 } 617 602 618 603 /* Call SPLC with no arguments */ 619 - status = acpi_evaluate_object(handle, NULL, NULL, &splx); 604 + status = acpi_evaluate_object(handle, NULL, NULL, &splc); 620 605 if (ACPI_FAILURE(status)) { 621 606 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); 622 607 return; 623 608 } 624 609 625 - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); 610 + trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); 626 611 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", 627 612 trans->dflt_pwr_limit); 628 - kfree(splx.pointer); 613 + kfree(splc.pointer); 629 614 } 630 615 631 616 #else /* CONFIG_ACPI */
+8
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 592 592 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 593 593 int slots_num, u32 txq_id) 594 594 { 595 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 595 596 int ret; 596 597 597 598 txq->need_update = false; ··· 607 606 return ret; 608 607 609 608 spin_lock_init(&txq->lock); 609 + 610 + if (txq_id == trans_pcie->cmd_queue) { 611 + static struct lock_class_key iwl_pcie_cmd_queue_lock_class; 612 + 613 + lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); 614 + } 615 + 610 616 __skb_queue_head_init(&txq->overflow_q); 611 617 612 618 /*
+1 -1
drivers/net/wireless/mac80211_hwsim.c
··· 826 826 data->bcn_delta = do_div(delta, bcn_int); 827 827 } else { 828 828 data->tsf_offset -= delta; 829 - data->bcn_delta = -do_div(delta, bcn_int); 829 + data->bcn_delta = -(s64)do_div(delta, bcn_int); 830 830 } 831 831 } 832 832
+2 -2
drivers/net/xen-netfront.c
··· 304 304 queue->rx_skbs[id] = skb; 305 305 306 306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 307 - BUG_ON((signed short)ref < 0); 307 + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 308 308 queue->grant_rx_ref[id] = ref; 309 309 310 310 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); ··· 428 428 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 429 429 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 430 430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 431 - BUG_ON((signed short)ref < 0); 431 + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 432 432 433 433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 434 434 gfn, GNTMAP_readonly);
+1 -1
drivers/nfc/mei_phy.c
··· 133 133 return -ENOMEM; 134 134 135 135 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); 136 - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { 136 + if (bytes_recv < 0 || bytes_recv < if_version_length) { 137 137 pr_err("Could not read IF version\n"); 138 138 r = -EIO; 139 139 goto err;
+9 -6
drivers/ntb/hw/intel/ntb_hw_intel.c
··· 112 112 113 113 module_param_named(xeon_b2b_usd_bar4_addr64, 114 114 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); 115 - MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 115 + MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, 116 116 "XEON B2B USD BAR 4 64-bit address"); 117 117 118 118 module_param_named(xeon_b2b_usd_bar4_addr32, 119 119 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); 120 - MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 120 + MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, 121 121 "XEON B2B USD split-BAR 4 32-bit address"); 122 122 123 123 module_param_named(xeon_b2b_usd_bar5_addr32, 124 124 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); 125 - MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 125 + MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, 126 126 "XEON B2B USD split-BAR 5 32-bit address"); 127 127 128 128 module_param_named(xeon_b2b_dsd_bar2_addr64, ··· 132 132 133 133 module_param_named(xeon_b2b_dsd_bar4_addr64, 134 134 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); 135 - MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 135 + MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, 136 136 "XEON B2B DSD BAR 4 64-bit address"); 137 137 138 138 module_param_named(xeon_b2b_dsd_bar4_addr32, 139 139 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); 140 - MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 140 + MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, 141 141 "XEON B2B DSD split-BAR 4 32-bit address"); 142 142 143 143 module_param_named(xeon_b2b_dsd_bar5_addr32, 144 144 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); 145 - MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 145 + MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, 146 146 "XEON B2B DSD split-BAR 5 32-bit address"); 147 147 148 148 #ifndef ioread64 ··· 1755 1755 XEON_B2B_MIN_SIZE); 1756 1756 if (!ndev->peer_mmio) 1757 1757 return -EIO; 1758 + 1759 + ndev->peer_addr = pci_resource_start(pdev, b2b_bar); 1758 1760 } 1759 1761 1760 1762 return 0; ··· 2021 2019 goto err_mmio; 2022 2020 } 2023 2021 ndev->peer_mmio = ndev->self_mmio; 2022 + ndev->peer_addr = pci_resource_start(pdev, 0); 2024 2023 2025 2024 return 0; 2026 2025
+1 -1
drivers/ntb/ntb_transport.c
··· 257 257 #define NTB_QP_DEF_NUM_ENTRIES 100 258 258 #define NTB_LINK_DOWN_TIMEOUT 10 259 259 #define DMA_RETRIES 20 260 - #define DMA_OUT_RESOURCE_TO 50 260 + #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) 261 261 262 262 static void ntb_transport_rxc_db(unsigned long data); 263 263 static const struct ntb_ctx_ops ntb_transport_ops;
+4 -4
drivers/ntb/test/ntb_perf.c
··· 72 72 #define MAX_THREADS 32 73 73 #define MAX_TEST_SIZE SZ_1M 74 74 #define MAX_SRCS 32 75 - #define DMA_OUT_RESOURCE_TO 50 75 + #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) 76 76 #define DMA_RETRIES 20 77 77 #define SZ_4G (1ULL << 32) 78 78 #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ ··· 589 589 return -ENOMEM; 590 590 591 591 if (mutex_is_locked(&perf->run_mutex)) { 592 - out_off = snprintf(buf, 64, "running\n"); 592 + out_off = scnprintf(buf, 64, "running\n"); 593 593 goto read_from_buf; 594 594 } 595 595 ··· 600 600 break; 601 601 602 602 if (pctx->status) { 603 - out_off += snprintf(buf + out_off, 1024 - out_off, 603 + out_off += scnprintf(buf + out_off, 1024 - out_off, 604 604 "%d: error %d\n", i, 605 605 pctx->status); 606 606 continue; 607 607 } 608 608 609 609 rate = div64_u64(pctx->copied, pctx->diff_us); 610 - out_off += snprintf(buf + out_off, 1024 - out_off, 610 + out_off += scnprintf(buf + out_off, 1024 - out_off, 611 611 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", 612 612 i, pctx->copied, pctx->diff_us, rate); 613 613 }
+1 -1
drivers/ntb/test/ntb_pingpong.c
··· 88 88 89 89 static unsigned long db_init = 0x7; 90 90 module_param(db_init, ulong, 0644); 91 - MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); 91 + MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); 92 92 93 93 struct pp_ctx { 94 94 struct ntb_dev *ntb;
+1 -1
drivers/nvme/host/lightnvm.c
··· 612 612 613 613 ret = nvm_register(dev); 614 614 615 - ns->lba_shift = ilog2(dev->sec_size) - 9; 615 + ns->lba_shift = ilog2(dev->sec_size); 616 616 617 617 if (sysfs_create_group(&dev->dev.kobj, attrs)) 618 618 pr_warn("%s: failed to create sysfs group for identification\n",
+4 -14
drivers/nvme/host/pci.c
··· 1242 1242 1243 1243 result = nvme_enable_ctrl(&dev->ctrl, cap); 1244 1244 if (result) 1245 - goto free_nvmeq; 1245 + return result; 1246 1246 1247 1247 nvmeq->cq_vector = 0; 1248 1248 result = queue_request_irq(nvmeq); 1249 1249 if (result) { 1250 1250 nvmeq->cq_vector = -1; 1251 - goto free_nvmeq; 1251 + return result; 1252 1252 } 1253 1253 1254 - return result; 1255 - 1256 - free_nvmeq: 1257 - nvme_free_queues(dev, 0); 1258 1254 return result; 1259 1255 } 1260 1256 ··· 1313 1317 max = min(dev->max_qid, dev->queue_count - 1); 1314 1318 for (i = dev->online_queues; i <= max; i++) { 1315 1319 ret = nvme_create_queue(dev->queues[i], i); 1316 - if (ret) { 1317 - nvme_free_queues(dev, i); 1320 + if (ret) 1318 1321 break; 1319 - } 1320 1322 } 1321 1323 1322 1324 /* ··· 1454 1460 result = queue_request_irq(adminq); 1455 1461 if (result) { 1456 1462 adminq->cq_vector = -1; 1457 - goto free_queues; 1463 + return result; 1458 1464 } 1459 1465 return nvme_create_io_queues(dev); 1460 - 1461 - free_queues: 1462 - nvme_free_queues(dev, 1); 1463 - return result; 1464 1466 } 1465 1467 1466 1468 static void nvme_del_queue_end(struct request *req, int error)
+39 -3
drivers/nvme/host/rdma.c
··· 83 83 NVME_RDMA_Q_CONNECTED = (1 << 0), 84 84 NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), 85 85 NVME_RDMA_Q_DELETING = (1 << 2), 86 + NVME_RDMA_Q_LIVE = (1 << 3), 86 87 }; 87 88 88 89 struct nvme_rdma_queue { ··· 625 624 626 625 for (i = 1; i < ctrl->queue_count; i++) { 627 626 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 628 - if (ret) 629 - break; 627 + if (ret) { 628 + dev_info(ctrl->ctrl.device, 629 + "failed to connect i/o queue: %d\n", ret); 630 + goto out_free_queues; 631 + } 632 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 630 633 } 631 634 635 + return 0; 636 + 637 + out_free_queues: 638 + nvme_rdma_free_io_queues(ctrl); 632 639 return ret; 633 640 } 634 641 ··· 721 712 if (ret) 722 713 goto stop_admin_q; 723 714 715 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 716 + 724 717 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 725 718 if (ret) 726 719 goto stop_admin_q; ··· 772 761 773 762 nvme_stop_keep_alive(&ctrl->ctrl); 774 763 775 - for (i = 0; i < ctrl->queue_count; i++) 764 + for (i = 0; i < ctrl->queue_count; i++) { 776 765 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); 766 + clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 767 + } 777 768 778 769 if (ctrl->queue_count > 1) 779 770 nvme_stop_queues(&ctrl->ctrl); ··· 1391 1378 return BLK_EH_HANDLED; 1392 1379 } 1393 1380 1381 + /* 1382 + * We cannot accept any other command until the Connect command has completed. 1383 + */ 1384 + static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1385 + struct request *rq) 1386 + { 1387 + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1388 + struct nvme_command *cmd = (struct nvme_command *)rq->cmd; 1389 + 1390 + if (rq->cmd_type != REQ_TYPE_DRV_PRIV || 1391 + cmd->common.opcode != nvme_fabrics_command || 1392 + cmd->fabrics.fctype != nvme_fabrics_type_connect) 1393 + return false; 1394 + } 1395 + 1396 + return true; 1397 + } 1398 + 1394 1399 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1395 1400 const struct blk_mq_queue_data *bd) 1396 1401 { ··· 1424 1393 int ret; 1425 1394 1426 1395 WARN_ON_ONCE(rq->tag < 0); 1396 + 1397 + if (!nvme_rdma_queue_is_ready(queue, rq)) 1398 + return BLK_MQ_RQ_QUEUE_BUSY; 1427 1399 1428 1400 dev = queue->device->dev; 1429 1401 ib_dma_sync_single_for_cpu(dev, sqe->dma, ··· 1577 1543 error = nvmf_connect_admin_queue(&ctrl->ctrl); 1578 1544 if (error) 1579 1545 goto out_cleanup_queue; 1546 + 1547 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 1580 1548 1581 1549 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); 1582 1550 if (error) {
+7 -3
drivers/nvme/target/core.c
··· 838 838 839 839 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) 840 840 { 841 - ctrl->csts |= NVME_CSTS_CFS; 842 - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 843 - schedule_work(&ctrl->fatal_err_work); 841 + mutex_lock(&ctrl->lock); 842 + if (!(ctrl->csts & NVME_CSTS_CFS)) { 843 + ctrl->csts |= NVME_CSTS_CFS; 844 + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 845 + schedule_work(&ctrl->fatal_err_work); 846 + } 847 + mutex_unlock(&ctrl->lock); 844 848 } 845 849 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); 846 850
+15 -3
drivers/nvme/target/rdma.c
··· 951 951 952 952 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 953 953 { 954 + ib_drain_qp(queue->cm_id->qp); 954 955 rdma_destroy_qp(queue->cm_id); 955 956 ib_free_cq(queue->cq); 956 957 } ··· 1067 1066 spin_lock_init(&queue->rsp_wr_wait_lock); 1068 1067 INIT_LIST_HEAD(&queue->free_rsps); 1069 1068 spin_lock_init(&queue->rsps_lock); 1069 + INIT_LIST_HEAD(&queue->queue_list); 1070 1070 1071 1071 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 1072 1072 if (queue->idx < 0) { ··· 1246 1244 1247 1245 if (disconnect) { 1248 1246 rdma_disconnect(queue->cm_id); 1249 - ib_drain_qp(queue->cm_id->qp); 1250 1247 schedule_work(&queue->release_work); 1251 1248 } 1252 1249 } ··· 1270 1269 { 1271 1270 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 1272 1271 1273 - pr_err("failed to connect queue\n"); 1272 + mutex_lock(&nvmet_rdma_queue_mutex); 1273 + if (!list_empty(&queue->queue_list)) 1274 + list_del_init(&queue->queue_list); 1275 + mutex_unlock(&nvmet_rdma_queue_mutex); 1276 + 1277 + pr_err("failed to connect queue %d\n", queue->idx); 1274 1278 schedule_work(&queue->release_work); 1275 1279 } 1276 1280 ··· 1358 1352 case RDMA_CM_EVENT_ADDR_CHANGE: 1359 1353 case RDMA_CM_EVENT_DISCONNECTED: 1360 1354 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1361 - nvmet_rdma_queue_disconnect(queue); 1355 + /* 1356 + * We might end up here when we already freed the qp 1357 + * which means queue release sequence is in progress, 1358 + * so don't get in the way... 1359 + */ 1360 + if (queue) 1361 + nvmet_rdma_queue_disconnect(queue); 1362 1362 break; 1363 1363 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1364 1364 ret = nvmet_rdma_device_removal(cm_id, queue);
-2
drivers/of/base.c
··· 2077 2077 name = of_get_property(of_aliases, "stdout", NULL); 2078 2078 if (name) 2079 2079 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 2080 - if (of_stdout) 2081 - console_set_by_of(); 2082 2080 } 2083 2081 2084 2082 if (!of_aliases)
+5 -1
drivers/of/of_mdio.c
··· 292 292 mdiodev = to_mdio_device(d); 293 293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) 294 294 return to_phy_device(d); 295 + put_device(d); 295 296 } 296 297 297 298 return NULL; ··· 457 456 status.link = 1; 458 457 status.duplex = of_property_read_bool(fixed_link_node, 459 458 "full-duplex"); 460 - if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 459 + if (of_property_read_u32(fixed_link_node, "speed", 460 + &status.speed)) { 461 + of_node_put(fixed_link_node); 461 462 return -EINVAL; 463 + } 462 464 status.pause = of_property_read_bool(fixed_link_node, "pause"); 463 465 status.asym_pause = of_property_read_bool(fixed_link_node, 464 466 "asym-pause");
+62
drivers/pci/host/pcie-rockchip.c
··· 190 190 struct reset_control *mgmt_rst; 191 191 struct reset_control *mgmt_sticky_rst; 192 192 struct reset_control *pipe_rst; 193 + struct reset_control *pm_rst; 194 + struct reset_control *aclk_rst; 195 + struct reset_control *pclk_rst; 193 196 struct clk *aclk_pcie; 194 197 struct clk *aclk_perf_pcie; 195 198 struct clk *hclk_pcie; ··· 410 407 unsigned long timeout; 411 408 412 409 gpiod_set_value(rockchip->ep_gpio, 0); 410 + 411 + err = reset_control_assert(rockchip->aclk_rst); 412 + if (err) { 413 + dev_err(dev, "assert aclk_rst err %d\n", err); 414 + return err; 415 + } 416 + 417 + err = reset_control_assert(rockchip->pclk_rst); 418 + if (err) { 419 + dev_err(dev, "assert pclk_rst err %d\n", err); 420 + return err; 421 + } 422 + 423 + err = reset_control_assert(rockchip->pm_rst); 424 + if (err) { 425 + dev_err(dev, "assert pm_rst err %d\n", err); 426 + return err; 427 + } 428 + 429 + udelay(10); 430 + 431 + err = reset_control_deassert(rockchip->pm_rst); 432 + if (err) { 433 + dev_err(dev, "deassert pm_rst err %d\n", err); 434 + return err; 435 + } 436 + 437 + err = reset_control_deassert(rockchip->aclk_rst); 438 + if (err) { 439 + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); 440 + return err; 441 + } 442 + 443 + err = reset_control_deassert(rockchip->pclk_rst); 444 + if (err) { 445 + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); 446 + return err; 447 + } 413 448 414 449 err = phy_init(rockchip->phy); 415 450 if (err < 0) { ··· 820 779 if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) 821 780 dev_err(dev, "missing pipe reset property in node\n"); 822 781 return PTR_ERR(rockchip->pipe_rst); 782 + } 783 + 784 + rockchip->pm_rst = devm_reset_control_get(dev, "pm"); 785 + if (IS_ERR(rockchip->pm_rst)) { 786 + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) 787 + dev_err(dev, "missing pm reset property in node\n"); 788 + return PTR_ERR(rockchip->pm_rst); 789 + } 790 + 791 + rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); 792 + if (IS_ERR(rockchip->pclk_rst)) { 793 + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) 794 + dev_err(dev, "missing pclk reset property in node\n"); 795 + return PTR_ERR(rockchip->pclk_rst); 796 + } 797 + 798 + rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); 799 + if (IS_ERR(rockchip->aclk_rst)) { 800 + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) 801 + dev_err(dev, "missing aclk reset property in node\n"); 802 + return PTR_ERR(rockchip->aclk_rst); 823 803 } 824 804 825 805 rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+6
drivers/pci/pci-mid.c
··· 29 29 return intel_mid_pci_set_power_state(pdev, state); 30 30 } 31 31 32 + static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) 33 + { 34 + return intel_mid_pci_get_power_state(pdev); 35 + } 36 + 32 37 static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) 33 38 { 34 39 return PCI_D3hot; ··· 57 52 static struct pci_platform_pm_ops mid_pci_platform_pm = { 58 53 .is_manageable = mid_pci_power_manageable, 59 54 .set_state = mid_pci_set_power_state, 55 + .get_state = mid_pci_get_power_state, 60 56 .choose_state = mid_pci_choose_state, 61 57 .sleep_wake = mid_pci_sleep_wake, 62 58 .run_wake = mid_pci_run_wake,
+8
drivers/pci/setup-res.c
··· 121 121 return -EINVAL; 122 122 } 123 123 124 + /* 125 + * If we have a shadow copy in RAM, the PCI device doesn't respond 126 + * to the shadow range, so we don't need to claim it, and upstream 127 + * bridges don't need to route the range to the device. 128 + */ 129 + if (res->flags & IORESOURCE_ROM_SHADOW) 130 + return 0; 131 + 124 132 root = pci_find_parent_resource(dev, res); 125 133 if (!root) { 126 134 dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+1 -1
drivers/pcmcia/soc_common.c
··· 107 107 108 108 ret = regulator_enable(r->reg); 109 109 } else { 110 - regulator_disable(r->reg); 110 + ret = regulator_disable(r->reg); 111 111 } 112 112 if (ret == 0) 113 113 r->on = on;
+3 -2
drivers/phy/phy-da8xx-usb.c
··· 198 198 } else { 199 199 int ret; 200 200 201 - ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); 201 + ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", 202 + "ohci-da8xx"); 202 203 if (ret) 203 204 dev_warn(dev, "Failed to create usb11 phy lookup\n"); 204 205 ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", ··· 217 216 218 217 if (!pdev->dev.of_node) { 219 218 phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); 220 - phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); 219 + phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx"); 221 220 } 222 221 223 222 return 0;
+1 -12
drivers/phy/phy-rockchip-pcie.c
··· 249 249 static int rockchip_pcie_phy_exit(struct phy *phy) 250 250 { 251 251 struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); 252 - int err = 0; 253 252 254 253 clk_disable_unprepare(rk_phy->clk_pciephy_ref); 255 254 256 - err = reset_control_deassert(rk_phy->phy_rst); 257 - if (err) { 258 - dev_err(&phy->dev, "deassert phy_rst err %d\n", err); 259 - goto err_reset; 260 - } 261 - 262 - return err; 263 - 264 - err_reset: 265 - clk_prepare_enable(rk_phy->clk_pciephy_ref); 266 - return err; 255 + return 0; 267 256 } 268 257 269 258 static const struct phy_ops ops = {
+1 -1
drivers/phy/phy-sun4i-usb.c
··· 264 264 return ret; 265 265 } 266 266 267 - if (data->cfg->enable_pmu_unk1) { 267 + if (phy->pmu && data->cfg->enable_pmu_unk1) { 268 268 val = readl(phy->pmu + REG_PMU_UNK1); 269 269 writel(val & ~2, phy->pmu + REG_PMU_UNK1); 270 270 }
+1 -1
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
··· 26 26 27 27 #define ASPEED_G5_NR_PINS 228 28 28 29 - #define COND1 SIG_DESC_BIT(SCU90, 6, 0) 29 + #define COND1 { SCU90, BIT(6), 0, 0 } 30 30 #define COND2 { SCU94, GENMASK(1, 0), 0, 0 } 31 31 32 32 #define B14 0
+1 -1
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
··· 844 844 845 845 static int __init iproc_gpio_init(void) 846 846 { 847 - return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); 847 + return platform_driver_register(&iproc_gpio_driver); 848 848 } 849 849 arch_initcall_sync(iproc_gpio_init);
+1 -1
drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
··· 741 741 742 742 static int __init nsp_gpio_init(void) 743 743 { 744 - return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); 744 + return platform_driver_register(&nsp_gpio_driver); 745 745 } 746 746 arch_initcall_sync(nsp_gpio_init);
+1
drivers/pinctrl/freescale/pinctrl-imx.c
··· 687 687 if (!info->functions) 688 688 return -ENOMEM; 689 689 690 + info->group_index = 0; 690 691 if (flat_funcs) { 691 692 info->ngroups = of_get_child_count(np); 692 693 } else {
+14 -3
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1652 1652 } 1653 1653 1654 1654 #ifdef CONFIG_PM_SLEEP 1655 - static int chv_pinctrl_suspend(struct device *dev) 1655 + static int chv_pinctrl_suspend_noirq(struct device *dev) 1656 1656 { 1657 1657 struct platform_device *pdev = to_platform_device(dev); 1658 1658 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); 1659 + unsigned long flags; 1659 1660 int i; 1661 + 1662 + raw_spin_lock_irqsave(&chv_lock, flags); 1660 1663 1661 1664 pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); 1662 1665 ··· 1681 1678 ctx->padctrl1 = readl(reg); 1682 1679 } 1683 1680 1681 + raw_spin_unlock_irqrestore(&chv_lock, flags); 1682 + 1684 1683 return 0; 1685 1684 } 1686 1685 1687 - static int chv_pinctrl_resume(struct device *dev) 1686 + static int chv_pinctrl_resume_noirq(struct device *dev) 1688 1687 { 1689 1688 struct platform_device *pdev = to_platform_device(dev); 1690 1689 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); 1690 + unsigned long flags; 1691 1691 int i; 1692 + 1693 + raw_spin_lock_irqsave(&chv_lock, flags); 1692 1694 1693 1695 /* 1694 1696 * Mask all interrupts before restoring per-pin configuration ··· 1739 1731 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1740 1732 chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); 1741 1733 1734 + raw_spin_unlock_irqrestore(&chv_lock, flags); 1735 + 1742 1736 return 0; 1743 1737 } 1744 1738 #endif 1745 1739 1746 1740 static const struct dev_pm_ops chv_pinctrl_pm_ops = { 1747 - SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) 1741 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, 1742 + chv_pinctrl_resume_noirq) 1748 1743 }; 1749 1744 1750 1745 static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
+1 -1
drivers/pinctrl/pinctrl-st.c
··· 1512 1512 if (info->irqmux_base || gpio_irq > 0) { 1513 1513 err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, 1514 1514 0, handle_simple_irq, 1515 - IRQ_TYPE_LEVEL_LOW); 1515 + IRQ_TYPE_NONE); 1516 1516 if (err) { 1517 1517 gpiochip_remove(&bank->gpio_chip); 1518 1518 dev_info(dev, "could not add irqchip\n");
+5 -3
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 1092 1092 return -EINVAL; 1093 1093 } 1094 1094 1095 - ret = stm32_pctrl_dt_setup_irq(pdev, pctl); 1096 - if (ret) 1097 - return ret; 1095 + if (of_find_property(np, "interrupt-parent", NULL)) { 1096 + ret = stm32_pctrl_dt_setup_irq(pdev, pctl); 1097 + if (ret) 1098 + return ret; 1099 + } 1098 1100 1099 1101 for_each_child_of_node(np, child) 1100 1102 if (of_property_read_bool(child, "gpio-controller"))
+7
drivers/platform/x86/ideapad-laptop.c
··· 934 934 }, 935 935 }, 936 936 { 937 + .ident = "Lenovo Yoga 900", 938 + .matches = { 939 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 940 + DMI_MATCH(DMI_BOARD_NAME, "VIUU4"), 941 + }, 942 + }, 943 + { 937 944 .ident = "Lenovo YOGA 910-13IKB", 938 945 .matches = { 939 946 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+1 -1
drivers/platform/x86/intel-hid.c
··· 264 264 return AE_OK; 265 265 266 266 if (acpi_match_device_ids(dev, ids) == 0) 267 - if (acpi_create_platform_device(dev)) 267 + if (acpi_create_platform_device(dev, NULL)) 268 268 dev_info(&dev->dev, 269 269 "intel-hid: created platform device\n"); 270 270
+1 -1
drivers/platform/x86/intel-vbtn.c
··· 164 164 return AE_OK; 165 165 166 166 if (acpi_match_device_ids(dev, ids) == 0) 167 - if (acpi_create_platform_device(dev)) 167 + if (acpi_create_platform_device(dev, NULL)) 168 168 dev_info(&dev->dev, 169 169 "intel-vbtn: created platform device\n"); 170 170
+19 -7
drivers/platform/x86/toshiba-wmi.c
··· 24 24 #include <linux/acpi.h> 25 25 #include <linux/input.h> 26 26 #include <linux/input/sparse-keymap.h> 27 + #include <linux/dmi.h> 27 28 28 29 MODULE_AUTHOR("Azael Avalos"); 29 30 MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); 30 31 MODULE_LICENSE("GPL"); 31 32 32 - #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" 33 + #define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" 33 34 34 - MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); 35 + MODULE_ALIAS("wmi:"WMI_EVENT_GUID); 35 36 36 37 static struct input_dev *toshiba_wmi_input_dev; 37 38 ··· 64 63 kfree(response.pointer); 65 64 } 66 65 66 + static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { 67 + { 68 + .ident = "Toshiba laptop", 69 + .matches = { 70 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 71 + }, 72 + }, 73 + {} 74 + }; 75 + 67 76 static int __init toshiba_wmi_input_setup(void) 68 77 { 69 78 acpi_status status; ··· 92 81 if (err) 93 82 goto err_free_dev; 94 83 95 - status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, 84 + status = wmi_install_notify_handler(WMI_EVENT_GUID, 96 85 toshiba_wmi_notify, NULL); 97 86 if (ACPI_FAILURE(status)) { 98 87 err = -EIO; ··· 106 95 return 0; 107 96 108 97 err_remove_notifier: 109 - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); 98 + wmi_remove_notify_handler(WMI_EVENT_GUID); 110 99 err_free_keymap: 111 100 sparse_keymap_free(toshiba_wmi_input_dev); 112 101 err_free_dev: ··· 116 105 117 106 static void toshiba_wmi_input_destroy(void) 118 107 { 119 - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); 108 + wmi_remove_notify_handler(WMI_EVENT_GUID); 120 109 sparse_keymap_free(toshiba_wmi_input_dev); 121 110 input_unregister_device(toshiba_wmi_input_dev); 122 111 } ··· 125 114 { 126 115 int ret; 127 116 128 - if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) 117 + if (!wmi_has_guid(WMI_EVENT_GUID) || 118 + !dmi_check_system(toshiba_wmi_dmi_table)) 129 119 return -ENODEV; 130 120 131 121 ret = toshiba_wmi_input_setup(); ··· 142 130 143 131 static void __exit toshiba_wmi_exit(void) 144 132 { 145 - if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) 133 + if (wmi_has_guid(WMI_EVENT_GUID)) 146 134 toshiba_wmi_input_destroy(); 147 135 } 148 136
+1
drivers/rtc/rtc-asm9260.c
··· 327 327 { .compatible = "alphascale,asm9260-rtc", }, 328 328 {} 329 329 }; 330 + MODULE_DEVICE_TABLE(of, asm9260_dt_ids); 330 331 331 332 static struct platform_driver asm9260_rtc_driver = { 332 333 .probe = asm9260_rtc_probe,
+8 -7
drivers/rtc/rtc-cmos.c
··· 776 776 spin_unlock_irq(&rtc_lock); 777 777 } 778 778 779 - static void __exit cmos_do_remove(struct device *dev) 779 + static void cmos_do_remove(struct device *dev) 780 780 { 781 781 struct cmos_rtc *cmos = dev_get_drvdata(dev); 782 782 struct resource *ports; ··· 996 996 struct cmos_rtc *cmos = dev_get_drvdata(dev); 997 997 unsigned char rtc_control = 0; 998 998 unsigned char rtc_intr; 999 + unsigned long flags; 999 1000 1000 - spin_lock_irq(&rtc_lock); 1001 + spin_lock_irqsave(&rtc_lock, flags); 1001 1002 if (cmos_rtc.suspend_ctrl) 1002 1003 rtc_control = CMOS_READ(RTC_CONTROL); 1003 1004 if (rtc_control & RTC_AIE) { ··· 1007 1006 rtc_intr = CMOS_READ(RTC_INTR_FLAGS); 1008 1007 rtc_update_irq(cmos->rtc, 1, rtc_intr); 1009 1008 } 1010 - spin_unlock_irq(&rtc_lock); 1009 + spin_unlock_irqrestore(&rtc_lock, flags); 1011 1010 1012 1011 pm_wakeup_event(dev, 0); 1013 1012 acpi_clear_event(ACPI_EVENT_RTC); ··· 1130 1129 pnp_irq(pnp, 0)); 1131 1130 } 1132 1131 1133 - static void __exit cmos_pnp_remove(struct pnp_dev *pnp) 1132 + static void cmos_pnp_remove(struct pnp_dev *pnp) 1134 1133 { 1135 1134 cmos_do_remove(&pnp->dev); 1136 1135 } ··· 1162 1161 .name = (char *) driver_name, 1163 1162 .id_table = rtc_ids, 1164 1163 .probe = cmos_pnp_probe, 1165 - .remove = __exit_p(cmos_pnp_remove), 1164 + .remove = cmos_pnp_remove, 1166 1165 .shutdown = cmos_pnp_shutdown, 1167 1166 1168 1167 /* flag ensures resume() gets called, and stops syslog spam */ ··· 1239 1238 return cmos_do_probe(&pdev->dev, resource, irq); 1240 1239 } 1241 1240 1242 - static int __exit cmos_platform_remove(struct platform_device *pdev) 1241 + static int cmos_platform_remove(struct platform_device *pdev) 1243 1242 { 1244 1243 cmos_do_remove(&pdev->dev); 1245 1244 return 0; ··· 1264 1263 MODULE_ALIAS("platform:rtc_cmos"); 1265 1264 1266 1265 static struct platform_driver cmos_platform_driver = { 1267 - .remove = __exit_p(cmos_platform_remove), 1266 + .remove = cmos_platform_remove, 1268 1267 .shutdown = cmos_platform_shutdown, 1269 1268 .driver = { 1270 1269 .name = driver_name,
+30 -8
drivers/rtc/rtc-omap.c
··· 113 113 /* OMAP_RTC_OSC_REG bit fields: */ 114 114 #define OMAP_RTC_OSC_32KCLK_EN BIT(6) 115 115 #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) 116 + #define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) 116 117 117 118 /* OMAP_RTC_IRQWAKEEN bit fields: */ 118 119 #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) ··· 147 146 u8 interrupts_reg; 148 147 bool is_pmic_controller; 149 148 bool has_ext_clk; 149 + bool is_suspending; 150 150 const struct omap_rtc_device_type *type; 151 151 struct pinctrl_dev *pctldev; 152 152 }; ··· 788 786 */ 789 787 if (rtc->has_ext_clk) { 790 788 reg = rtc_read(rtc, OMAP_RTC_OSC_REG); 791 - rtc_write(rtc, OMAP_RTC_OSC_REG, 792 - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); 789 + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; 790 + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; 791 + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); 793 792 } 794 793 795 794 rtc->type->lock(rtc); ··· 901 898 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); 902 899 rtc->type->lock(rtc); 903 900 904 - /* Disable the clock/module */ 905 - pm_runtime_put_sync(dev); 901 + rtc->is_suspending = true; 906 902 907 903 return 0; 908 904 } ··· 910 908 { 911 909 struct omap_rtc *rtc = dev_get_drvdata(dev); 912 910 913 - /* Enable the clock/module so that we can access the registers */ 914 - pm_runtime_get_sync(dev); 915 - 916 911 rtc->type->unlock(rtc); 917 912 if (device_may_wakeup(dev)) 918 913 disable_irq_wake(rtc->irq_alarm); ··· 917 918 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); 918 919 rtc->type->lock(rtc); 919 920 921 + rtc->is_suspending = false; 922 + 920 923 return 0; 921 924 } 922 925 #endif 923 926 924 - static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); 927 + #ifdef CONFIG_PM 928 + static int omap_rtc_runtime_suspend(struct device *dev) 929 + { 930 + struct omap_rtc *rtc = dev_get_drvdata(dev); 931 + 932 + if (rtc->is_suspending && !rtc->has_ext_clk) 933 + return -EBUSY; 934 + 935 + return 0; 936 + } 937 + 938 + static int omap_rtc_runtime_resume(struct device *dev) 939 + { 940 + return 0; 941 + } 942 + #endif 943 + 944 + static const struct dev_pm_ops omap_rtc_pm_ops = { 945 + SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) 946 + SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, 947 + omap_rtc_runtime_resume, NULL) 948 + }; 925 949 926 950 static void omap_rtc_shutdown(struct platform_device *pdev) 927 951 {
+2 -1
drivers/scsi/cxgbi/libcxgbi.c
··· 2081 2081 /* never reached the xmit task callout */ 2082 2082 if (tdata->skb) 2083 2083 __kfree_skb(tdata->skb); 2084 - memset(tdata, 0, sizeof(*tdata)); 2085 2084 2086 2085 task_release_itt(task, task->hdr_itt); 2086 + memset(tdata, 0, sizeof(*tdata)); 2087 + 2087 2088 iscsi_tcp_cleanup_task(task); 2088 2089 } 2089 2090 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
+4 -1
drivers/scsi/device_handler/scsi_dh_alua.c
··· 793 793 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); 794 794 WARN_ON(pg->flags & ALUA_PG_RUN_STPG); 795 795 spin_unlock_irqrestore(&pg->lock, flags); 796 + kref_put(&pg->kref, release_port_group); 796 797 return; 797 798 } 798 799 if (pg->flags & ALUA_SYNC_STPG) ··· 891 890 /* Do not queue if the worker is already running */ 892 891 if (!(pg->flags & ALUA_PG_RUNNING)) { 893 892 kref_get(&pg->kref); 893 + sdev = NULL; 894 894 start_queue = 1; 895 895 } 896 896 } ··· 903 901 if (start_queue && 904 902 !queue_delayed_work(alua_wq, &pg->rtpg_work, 905 903 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 906 - scsi_device_put(sdev); 904 + if (sdev) 905 + scsi_device_put(sdev); 907 906 kref_put(&pg->kref, release_port_group); 908 907 } 909 908 }
+1 -1
drivers/scsi/megaraid/megaraid_sas.h
··· 2233 2233 }; 2234 2234 2235 2235 #define MEGASAS_IS_LOGICAL(scp) \ 2236 - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 2236 + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) 2237 2237 2238 2238 #define MEGASAS_DEV_INDEX(scp) \ 2239 2239 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+2 -2
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 1273 1273 sas_target_priv_data->handle = raid_device->handle; 1274 1274 sas_target_priv_data->sas_address = raid_device->wwid; 1275 1275 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1276 - sas_target_priv_data->raid_device = raid_device; 1277 1276 if (ioc->is_warpdrive) 1278 - raid_device->starget = starget; 1277 + sas_target_priv_data->raid_device = raid_device; 1278 + raid_device->starget = starget; 1279 1279 } 1280 1280 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1281 1281 return 0;
+16
drivers/scsi/qla2xxx/qla_os.c
··· 707 707 srb_t *sp; 708 708 int rval; 709 709 710 + if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { 711 + cmd->result = DID_NO_CONNECT << 16; 712 + goto qc24_fail_command; 713 + } 714 + 710 715 if (ha->flags.eeh_busy) { 711 716 if (ha->flags.pci_channel_io_perm_failure) { 712 717 ql_dbg(ql_dbg_aer, vha, 0x9010, ··· 1456 1451 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1457 1452 sp = req->outstanding_cmds[cnt]; 1458 1453 if (sp) { 1454 + /* Get a reference to the sp and drop the lock. 1455 + * The reference ensures this sp->done() call 1456 + * - and not the call in qla2xxx_eh_abort() - 1457 + * ends the SCSI command (with result 'res'). 1458 + */ 1459 + sp_get(sp); 1460 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1461 + qla2xxx_eh_abort(GET_CMD_SP(sp)); 1462 + spin_lock_irqsave(&ha->hardware_lock, flags); 1459 1463 req->outstanding_cmds[cnt] = NULL; 1460 1464 sp->done(vha, sp, res); 1461 1465 } ··· 2355 2341 { 2356 2342 scsi_qla_host_t *vha = shost_priv(shost); 2357 2343 2344 + if (test_bit(UNLOADING, &vha->dpc_flags)) 2345 + return 1; 2358 2346 if (!vha->host) 2359 2347 return 1; 2360 2348 if (time > vha->hw->loop_reset_delay * HZ)
+3 -2
drivers/scsi/vmw_pvscsi.c
··· 793 793 unsigned long flags; 794 794 int result = SUCCESS; 795 795 DECLARE_COMPLETION_ONSTACK(abort_cmp); 796 + int done; 796 797 797 798 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", 798 799 adapter->host->host_no, cmd); ··· 825 824 pvscsi_abort_cmd(adapter, ctx); 826 825 spin_unlock_irqrestore(&adapter->hw_lock, flags); 827 826 /* Wait for 2 secs for the completion. */ 828 - wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); 827 + done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); 829 828 spin_lock_irqsave(&adapter->hw_lock, flags); 830 829 831 - if (!completion_done(&abort_cmp)) { 830 + if (!done) { 832 831 /* 833 832 * Failed to abort the command, unmark the fact that it 834 833 * was requested to be aborted.
+1 -1
drivers/scsi/vmw_pvscsi.h
··· 26 26 27 27 #include <linux/types.h> 28 28 29 - #define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" 29 + #define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k" 30 30 31 31 #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 32 32
+2 -1
drivers/staging/comedi/drivers/ni_tio.c
··· 207 207 * clock period is specified by user with prescaling 208 208 * already taken into account. 209 209 */ 210 - return counter->clock_period_ps; 210 + *period_ps = counter->clock_period_ps; 211 + return 0; 211 212 } 212 213 213 214 switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+1
drivers/staging/greybus/arche-platform.c
··· 186 186 exit: 187 187 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); 188 188 mutex_unlock(&arche_pdata->platform_state_mutex); 189 + put_device(&pdev->dev); 189 190 of_node_put(np); 190 191 return ret; 191 192 }
+10 -7
drivers/staging/iio/impedance-analyzer/ad5933.c
··· 655 655 __be16 buf[2]; 656 656 int val[2]; 657 657 unsigned char status; 658 + int ret; 658 659 659 660 mutex_lock(&indio_dev->mlock); 660 661 if (st->state == AD5933_CTRL_INIT_START_FREQ) { ··· 663 662 ad5933_cmd(st, AD5933_CTRL_START_SWEEP); 664 663 st->state = AD5933_CTRL_START_SWEEP; 665 664 schedule_delayed_work(&st->work, st->poll_time_jiffies); 666 - mutex_unlock(&indio_dev->mlock); 667 - return; 665 + goto out; 668 666 } 669 667 670 - ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); 668 + ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); 669 + if (ret) 670 + goto out; 671 671 672 672 if (status & AD5933_STAT_DATA_VALID) { 673 673 int scan_count = bitmap_weight(indio_dev->active_scan_mask, 674 674 indio_dev->masklength); 675 - ad5933_i2c_read(st->client, 675 + ret = ad5933_i2c_read(st->client, 676 676 test_bit(1, indio_dev->active_scan_mask) ? 677 677 AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, 678 678 scan_count * 2, (u8 *)buf); 679 + if (ret) 680 + goto out; 679 681 680 682 if (scan_count == 2) { 681 683 val[0] = be16_to_cpu(buf[0]); ··· 690 686 } else { 691 687 /* no data available - try again later */ 692 688 schedule_delayed_work(&st->work, st->poll_time_jiffies); 693 - mutex_unlock(&indio_dev->mlock); 694 - return; 689 + goto out; 695 690 } 696 691 697 692 if (status & AD5933_STAT_SWEEP_DONE) { ··· 703 700 ad5933_cmd(st, AD5933_CTRL_INC_FREQ); 704 701 schedule_delayed_work(&st->work, st->poll_time_jiffies); 705 702 } 706 - 703 + out: 707 704 mutex_unlock(&indio_dev->mlock); 708 705 } 709 706
+2 -6
drivers/staging/nvec/nvec_ps2.c
··· 106 106 { 107 107 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); 108 108 struct serio *ser_dev; 109 - char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; 110 109 111 - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); 110 + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); 112 111 if (!ser_dev) 113 112 return -ENOMEM; 114 113 115 - ser_dev->id.type = SERIO_PS_PSTHRU; 114 + ser_dev->id.type = SERIO_8042; 116 115 ser_dev->write = ps2_sendcommand; 117 116 ser_dev->start = ps2_startstreaming; 118 117 ser_dev->stop = ps2_stopstreaming; ··· 125 126 nvec_register_notifier(nvec, &ps2_dev.notifier, 0); 126 127 127 128 serio_register_port(ser_dev); 128 - 129 - /* mouse reset */ 130 - nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); 131 129 132 130 return 0; 133 131 }
+4 -4
drivers/staging/sm750fb/ddk750_reg.h
··· 601 601 602 602 #define PANEL_PLANE_TL 0x08001C 603 603 #define PANEL_PLANE_TL_TOP_SHIFT 16 604 - #define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) 605 - #define PANEL_PLANE_TL_LEFT_MASK 0xeff 604 + #define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16) 605 + #define PANEL_PLANE_TL_LEFT_MASK 0x7ff 606 606 607 607 #define PANEL_PLANE_BR 0x080020 608 608 #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 609 - #define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) 610 - #define PANEL_PLANE_BR_RIGHT_MASK 0xeff 609 + #define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16) 610 + #define PANEL_PLANE_BR_RIGHT_MASK 0x7ff 611 611 612 612 #define PANEL_HORIZONTAL_TOTAL 0x080024 613 613 #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
+2 -2
drivers/usb/class/cdc-acm.c
··· 932 932 DECLARE_WAITQUEUE(wait, current); 933 933 struct async_icount old, new; 934 934 935 - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) 936 - return -EINVAL; 937 935 do { 938 936 spin_lock_irq(&acm->read_lock); 939 937 old = acm->oldcount; ··· 1158 1160 1159 1161 if (quirks == IGNORE_DEVICE) 1160 1162 return -ENODEV; 1163 + 1164 + memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); 1161 1165 1162 1166 num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; 1163 1167
+2 -3
drivers/usb/dwc3/core.c
··· 769 769 return 0; 770 770 771 771 err4: 772 - phy_power_off(dwc->usb2_generic_phy); 772 + phy_power_off(dwc->usb3_generic_phy); 773 773 774 774 err3: 775 - phy_power_off(dwc->usb3_generic_phy); 775 + phy_power_off(dwc->usb2_generic_phy); 776 776 777 777 err2: 778 778 usb_phy_set_suspend(dwc->usb2_phy, 1); 779 779 usb_phy_set_suspend(dwc->usb3_phy, 1); 780 - dwc3_core_exit(dwc); 781 780 782 781 err1: 783 782 usb_phy_shutdown(dwc->usb2_phy);
+1
drivers/usb/dwc3/dwc3-st.c
··· 31 31 #include <linux/slab.h> 32 32 #include <linux/regmap.h> 33 33 #include <linux/reset.h> 34 + #include <linux/pinctrl/consumer.h> 34 35 #include <linux/usb/of.h> 35 36 36 37 #include "core.h"
-8
drivers/usb/gadget/function/u_ether.c
··· 588 588 589 589 req->length = length; 590 590 591 - /* throttle high/super speed IRQ rate back slightly */ 592 - if (gadget_is_dualspeed(dev->gadget)) 593 - req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || 594 - dev->gadget->speed == USB_SPEED_SUPER)) && 595 - !list_empty(&dev->tx_reqs)) 596 - ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) 597 - : 0; 598 - 599 591 retval = usb_ep_queue(in, req, GFP_ATOMIC); 600 592 switch (retval) { 601 593 default:
+8
drivers/usb/host/pci-quirks.c
··· 995 995 } 996 996 val = readl(base + ext_cap_offset); 997 997 998 + /* Auto handoff never worked for these devices. Force it and continue */ 999 + if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || 1000 + (pdev->vendor == PCI_VENDOR_ID_RENESAS 1001 + && pdev->device == 0x0014)) { 1002 + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; 1003 + writel(val, base + ext_cap_offset); 1004 + } 1005 + 998 1006 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 999 1007 if (val & XHCI_HC_BIOS_OWNED) { 1000 1008 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
+2 -1
drivers/usb/musb/da8xx.c
··· 479 479 480 480 glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); 481 481 if (IS_ERR(glue->phy)) { 482 - dev_err(&pdev->dev, "failed to get phy\n"); 482 + if (PTR_ERR(glue->phy) != -EPROBE_DEFER) 483 + dev_err(&pdev->dev, "failed to get phy\n"); 483 484 return PTR_ERR(glue->phy); 484 485 } 485 486
-5
drivers/usb/musb/musb_core.c
··· 2114 2114 musb->io.ep_offset = musb_flat_ep_offset; 2115 2115 musb->io.ep_select = musb_flat_ep_select; 2116 2116 } 2117 - /* And override them with platform specific ops if specified. */ 2118 - if (musb->ops->ep_offset) 2119 - musb->io.ep_offset = musb->ops->ep_offset; 2120 - if (musb->ops->ep_select) 2121 - musb->io.ep_select = musb->ops->ep_select; 2122 2117 2123 2118 /* At least tusb6010 has its own offsets */ 2124 2119 if (musb->ops->ep_offset)
+13 -3
drivers/uwb/lc-rc.c
··· 56 56 struct uwb_rc *rc = NULL; 57 57 58 58 dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); 59 - if (dev) 59 + if (dev) { 60 60 rc = dev_get_drvdata(dev); 61 + put_device(dev); 62 + } 63 + 61 64 return rc; 62 65 } 63 66 ··· 470 467 if (dev) { 471 468 rc = dev_get_drvdata(dev); 472 469 __uwb_rc_get(rc); 470 + put_device(dev); 473 471 } 472 + 474 473 return rc; 475 474 } 476 475 EXPORT_SYMBOL_GPL(__uwb_rc_try_get); ··· 525 520 526 521 dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, 527 522 find_rc_grandpa); 528 - if (dev) 523 + if (dev) { 529 524 rc = dev_get_drvdata(dev); 525 + put_device(dev); 526 + } 527 + 530 528 return rc; 531 529 } 532 530 EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); ··· 561 553 struct uwb_rc *rc = NULL; 562 554 563 555 dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); 564 - if (dev) 556 + if (dev) { 565 557 rc = dev_get_drvdata(dev); 558 + put_device(dev); 559 + } 566 560 567 561 return rc; 568 562 }
+2
drivers/uwb/pal.c
··· 97 97 98 98 dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); 99 99 100 + put_device(dev); 101 + 100 102 return (dev != NULL); 101 103 } 102 104
+2 -2
drivers/video/fbdev/amba-clcd-versatile.c
··· 526 526 np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, 527 527 &clcd_id); 528 528 if (!np) { 529 - dev_err(dev, "no Versatile syscon node\n"); 530 - return -ENODEV; 529 + /* Vexpress does not have this */ 530 + return 0; 531 531 } 532 532 versatile_clcd_type = (enum versatile_clcd)clcd_id->data; 533 533
+114 -103
fs/aio.c
··· 1078 1078 unsigned tail, pos, head; 1079 1079 unsigned long flags; 1080 1080 1081 + if (kiocb->ki_flags & IOCB_WRITE) { 1082 + struct file *file = kiocb->ki_filp; 1083 + 1084 + /* 1085 + * Tell lockdep we inherited freeze protection from submission 1086 + * thread. 1087 + */ 1088 + __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1089 + file_end_write(file); 1090 + } 1091 + 1081 1092 /* 1082 1093 * Special case handling for sync iocbs: 1083 1094 * - events go directly into the iocb for fast handling ··· 1403 1392 return -EINVAL; 1404 1393 } 1405 1394 1406 - typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *); 1407 - 1408 - static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len, 1409 - struct iovec **iovec, 1410 - bool compat, 1411 - struct iov_iter *iter) 1395 + static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, 1396 + bool vectored, bool compat, struct iov_iter *iter) 1412 1397 { 1398 + void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1399 + size_t len = iocb->aio_nbytes; 1400 + 1401 + if (!vectored) { 1402 + ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1403 + *iovec = NULL; 1404 + return ret; 1405 + } 1413 1406 #ifdef CONFIG_COMPAT 1414 1407 if (compat) 1415 - return compat_import_iovec(rw, 1416 - (struct compat_iovec __user *)buf, 1417 - len, UIO_FASTIOV, iovec, iter); 1408 + return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, 1409 + iter); 1418 1410 #endif 1419 - return import_iovec(rw, (struct iovec __user *)buf, 1420 - len, UIO_FASTIOV, iovec, iter); 1411 + return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); 1421 1412 } 1422 1413 1423 - /* 1424 - * aio_run_iocb: 1425 - * Performs the initial checks and io submission. 1426 - */ 1427 - static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, 1428 - char __user *buf, size_t len, bool compat) 1414 + static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret) 1429 1415 { 1430 - struct file *file = req->ki_filp; 1431 - ssize_t ret; 1432 - int rw; 1433 - fmode_t mode; 1434 - rw_iter_op *iter_op; 1435 - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1436 - struct iov_iter iter; 1437 - 1438 - switch (opcode) { 1439 - case IOCB_CMD_PREAD: 1440 - case IOCB_CMD_PREADV: 1441 - mode = FMODE_READ; 1442 - rw = READ; 1443 - iter_op = file->f_op->read_iter; 1444 - goto rw_common; 1445 - 1446 - case IOCB_CMD_PWRITE: 1447 - case IOCB_CMD_PWRITEV: 1448 - mode = FMODE_WRITE; 1449 - rw = WRITE; 1450 - iter_op = file->f_op->write_iter; 1451 - goto rw_common; 1452 - rw_common: 1453 - if (unlikely(!(file->f_mode & mode))) 1454 - return -EBADF; 1455 - 1456 - if (!iter_op) 1457 - return -EINVAL; 1458 - 1459 - if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) 1460 - ret = aio_setup_vectored_rw(rw, buf, len, 1461 - &iovec, compat, &iter); 1462 - else { 1463 - ret = import_single_range(rw, buf, len, iovec, &iter); 1464 - iovec = NULL; 1465 - } 1466 - if (!ret) 1467 - ret = rw_verify_area(rw, file, &req->ki_pos, 1468 - iov_iter_count(&iter)); 1469 - if (ret < 0) { 1470 - kfree(iovec); 1471 - return ret; 1472 - } 1473 - 1474 - if (rw == WRITE) 1475 - file_start_write(file); 1476 - 1477 - ret = iter_op(req, &iter); 1478 - 1479 - if (rw == WRITE) 1480 - file_end_write(file); 1481 - kfree(iovec); 1482 - break; 1483 - 1484 - case IOCB_CMD_FDSYNC: 1485 - if (!file->f_op->aio_fsync) 1486 - return -EINVAL; 1487 - 1488 - ret = file->f_op->aio_fsync(req, 1); 1489 - break; 1490 - 1491 - case IOCB_CMD_FSYNC: 1492 - if (!file->f_op->aio_fsync) 1493 - return -EINVAL; 1494 - 1495 - ret = file->f_op->aio_fsync(req, 0); 1496 - break; 1497 - 1498 - default: 1499 - pr_debug("EINVAL: no operation provided\n"); 1500 - return -EINVAL; 1501 - } 1502 - 1503 - if (ret != -EIOCBQUEUED) { 1416 + switch (ret) { 1417 + case -EIOCBQUEUED: 1418 + return ret; 1419 + case -ERESTARTSYS: 1420 + case -ERESTARTNOINTR: 1421 + case -ERESTARTNOHAND: 1422 + case -ERESTART_RESTARTBLOCK: 1504 1423 /* 1505 1424 * There's no easy way to restart the syscall since other AIO's 1506 1425 * may be already running. Just fail this IO with EINTR. 1507 1426 */ 1508 - if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 1509 - ret == -ERESTARTNOHAND || 1510 - ret == -ERESTART_RESTARTBLOCK)) 1511 - ret = -EINTR; 1427 + ret = -EINTR; 1428 + /*FALLTHRU*/ 1429 + default: 1512 1430 aio_complete(req, ret, 0); 1431 + return 0; 1513 1432 } 1433 + } 1514 1434 1515 - return 0; 1435 + static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, 1436 + bool compat) 1437 + { 1438 + struct file *file = req->ki_filp; 1439 + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1440 + struct iov_iter iter; 1441 + ssize_t ret; 1442 + 1443 + if (unlikely(!(file->f_mode & FMODE_READ))) 1444 + return -EBADF; 1445 + if (unlikely(!file->f_op->read_iter)) 1446 + return -EINVAL; 1447 + 1448 + ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); 1449 + if (ret) 1450 + return ret; 1451 + ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1452 + if (!ret) 1453 + ret = aio_ret(req, file->f_op->read_iter(req, &iter)); 1454 + kfree(iovec); 1455 + return ret; 1456 + } 1457 + 1458 + static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, 1459 + bool compat) 1460 + { 1461 + struct file *file = req->ki_filp; 1462 + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1463 + struct iov_iter iter; 1464 + ssize_t ret; 1465 + 1466 + if (unlikely(!(file->f_mode & FMODE_WRITE))) 1467 + return -EBADF; 1468 + if (unlikely(!file->f_op->write_iter)) 1469 + return -EINVAL; 1470 + 1471 + ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); 1472 + if (ret) 1473 + return ret; 1474 + ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1475 + if (!ret) { 1476 + req->ki_flags |= IOCB_WRITE; 1477 + file_start_write(file); 1478 + ret = aio_ret(req, file->f_op->write_iter(req, &iter)); 1479 + /* 1480 + * We release freeze protection in aio_complete(). Fool lockdep 1481 + * by telling it the lock got released so that it doesn't 1482 + * complain about held lock when we return to userspace. 1483 + */ 1484 + __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1485 + } 1486 + kfree(iovec); 1487 + return ret; 1516 1488 } 1517 1489 1518 1490 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1519 1491 struct iocb *iocb, bool compat) 1520 1492 { 1521 1493 struct aio_kiocb *req; 1494 + struct file *file; 1522 1495 ssize_t ret; 1523 1496 1524 1497 /* enforce forwards compatibility on users */ ··· 1525 1530 if (unlikely(!req)) 1526 1531 return -EAGAIN; 1527 1532 1528 - req->common.ki_filp = fget(iocb->aio_fildes); 1533 + req->common.ki_filp = file = fget(iocb->aio_fildes); 1529 1534 if (unlikely(!req->common.ki_filp)) { 1530 1535 ret = -EBADF; 1531 1536 goto out_put_req; ··· 1560 1565 req->ki_user_iocb = user_iocb; 1561 1566 req->ki_user_data = iocb->aio_data; 1562 1567 1563 - ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode, 1564 - (char __user *)(unsigned long)iocb->aio_buf, 1565 - iocb->aio_nbytes, 1566 - compat); 1567 - if (ret) 1568 - goto out_put_req; 1568 + get_file(file); 1569 + switch (iocb->aio_lio_opcode) { 1570 + case IOCB_CMD_PREAD: 1571 + ret = aio_read(&req->common, iocb, false, compat); 1572 + break; 1573 + case IOCB_CMD_PWRITE: 1574 + ret = aio_write(&req->common, iocb, false, compat); 1575 + break; 1576 + case IOCB_CMD_PREADV: 1577 + ret = aio_read(&req->common, iocb, true, compat); 1578 + break; 1579 + case IOCB_CMD_PWRITEV: 1580 + ret = aio_write(&req->common, iocb, true, compat); 1581 + break; 1582 + default: 1583 + pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1584 + ret = -EINVAL; 1585 + break; 1586 + } 1587 + fput(file); 1569 1588 1589 + if (ret && ret != -EIOCBQUEUED) 1590 + goto out_put_req; 1570 1591 return 0; 1571 1592 out_put_req: 1572 1593 put_reqs_available(ctx, 1);
-1
fs/ceph/file.c
··· 1770 1770 .fsync = ceph_fsync, 1771 1771 .lock = ceph_lock, 1772 1772 .flock = ceph_flock, 1773 - .splice_read = generic_file_splice_read, 1774 1773 .splice_write = iter_file_splice_write, 1775 1774 .unlocked_ioctl = ceph_ioctl, 1776 1775 .compat_ioctl = ceph_ioctl,
+3
fs/coredump.c
··· 1 1 #include <linux/slab.h> 2 2 #include <linux/file.h> 3 3 #include <linux/fdtable.h> 4 + #include <linux/freezer.h> 4 5 #include <linux/mm.h> 5 6 #include <linux/stat.h> 6 7 #include <linux/fcntl.h> ··· 424 423 if (core_waiters > 0) { 425 424 struct core_thread *ptr; 426 425 426 + freezer_do_not_count(); 427 427 wait_for_completion(&core_state->startup); 428 + freezer_count(); 428 429 /* 429 430 * Wait for all the threads to become inactive, so that 430 431 * all the thread context (extended register state, like
+21 -32
fs/crypto/fname.c
··· 39 39 static int fname_encrypt(struct inode *inode, 40 40 const struct qstr *iname, struct fscrypt_str *oname) 41 41 { 42 - u32 ciphertext_len; 43 42 struct skcipher_request *req = NULL; 44 43 DECLARE_FS_COMPLETION_RESULT(ecr); 45 44 struct fscrypt_info *ci = inode->i_crypt_info; 46 45 struct crypto_skcipher *tfm = ci->ci_ctfm; 47 46 int res = 0; 48 47 char iv[FS_CRYPTO_BLOCK_SIZE]; 49 - struct scatterlist src_sg, dst_sg; 48 + struct scatterlist sg; 50 49 int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); 51 - char *workbuf, buf[32], *alloc_buf = NULL; 52 - unsigned lim; 50 + unsigned int lim; 51 + unsigned int cryptlen; 53 52 54 53 lim = inode->i_sb->s_cop->max_namelen(inode); 55 54 if (iname->len <= 0 || iname->len > lim) 56 55 return -EIO; 57 56 58 - ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE); 59 - ciphertext_len = round_up(ciphertext_len, padding); 60 - ciphertext_len = min(ciphertext_len, lim); 57 + /* 58 + * Copy the filename to the output buffer for encrypting in-place and 59 + * pad it with the needed number of NUL bytes. 60 + */ 61 + cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); 62 + cryptlen = round_up(cryptlen, padding); 63 + cryptlen = min(cryptlen, lim); 64 + memcpy(oname->name, iname->name, iname->len); 65 + memset(oname->name + iname->len, 0, cryptlen - iname->len); 61 66 62 - if (ciphertext_len <= sizeof(buf)) { 63 - workbuf = buf; 64 - } else { 65 - alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); 66 - if (!alloc_buf) 67 - return -ENOMEM; 68 - workbuf = alloc_buf; 69 - } 67 + /* Initialize the IV */ 68 + memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); 70 69 71 - /* Allocate request */ 70 + /* Set up the encryption request */ 72 71 req = skcipher_request_alloc(tfm, GFP_NOFS); 73 72 if (!req) { 74 73 printk_ratelimited(KERN_ERR 75 - "%s: crypto_request_alloc() failed\n", __func__); 76 - kfree(alloc_buf); 74 + "%s: skcipher_request_alloc() failed\n", __func__); 77 75 return -ENOMEM; 78 76 } 79 77 skcipher_request_set_callback(req, 80 78 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 81 79 fname_crypt_complete, &ecr); 80 + sg_init_one(&sg, oname->name, cryptlen); 81 + skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); 82 82 83 - /* Copy the input */ 84 - memcpy(workbuf, iname->name, iname->len); 85 - if (iname->len < ciphertext_len) 86 - memset(workbuf + iname->len, 0, ciphertext_len - iname->len); 87 - 88 - /* Initialize IV */ 89 - memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); 90 - 91 - /* Create encryption request */ 92 - sg_init_one(&src_sg, workbuf, ciphertext_len); 93 - sg_init_one(&dst_sg, oname->name, ciphertext_len); 94 - skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); 83 + /* Do the encryption */ 95 84 res = crypto_skcipher_encrypt(req); 96 85 if (res == -EINPROGRESS || res == -EBUSY) { 86 + /* Request is being completed asynchronously; wait for it */ 97 87 wait_for_completion(&ecr.completion); 98 88 res = ecr.res; 99 89 } 100 - kfree(alloc_buf); 101 90 skcipher_request_free(req); 102 91 if (res < 0) { 103 92 printk_ratelimited(KERN_ERR ··· 94 105 return res; 95 106 } 96 107 97 - oname->len = ciphertext_len; 108 + oname->len = cryptlen; 98 109 return 0; 99 110 } 100 111
+13 -3
fs/crypto/keyinfo.c
··· 185 185 struct crypto_skcipher *ctfm; 186 186 const char *cipher_str; 187 187 int keysize; 188 - u8 raw_key[FS_MAX_KEY_SIZE]; 188 + u8 *raw_key = NULL; 189 189 int res; 190 190 191 191 res = fscrypt_initialize(); ··· 238 238 if (res) 239 239 goto out; 240 240 241 + /* 242 + * This cannot be a stack buffer because it is passed to the scatterlist 243 + * crypto API as part of key derivation. 244 + */ 245 + res = -ENOMEM; 246 + raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); 247 + if (!raw_key) 248 + goto out; 249 + 241 250 if (fscrypt_dummy_context_enabled(inode)) { 242 251 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); 243 252 goto got_key; ··· 285 276 if (res) 286 277 goto out; 287 278 288 - memzero_explicit(raw_key, sizeof(raw_key)); 279 + kzfree(raw_key); 280 + raw_key = NULL; 289 281 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { 290 282 put_crypt_info(crypt_info); 291 283 goto retry; ··· 297 287 if (res == -ENOKEY) 298 288 res = 0; 299 289 put_crypt_info(crypt_info); 300 - memzero_explicit(raw_key, sizeof(raw_key)); 290 + kzfree(raw_key); 301 291 return res; 302 292 } 303 293
+1
fs/ext4/ext4.h
··· 235 235 #define EXT4_MAX_BLOCK_SIZE 65536 236 236 #define EXT4_MIN_BLOCK_LOG_SIZE 10 237 237 #define EXT4_MAX_BLOCK_LOG_SIZE 16 238 + #define EXT4_MAX_CLUSTER_LOG_SIZE 30 238 239 #ifdef __KERNEL__ 239 240 # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) 240 241 #else
+16 -1
fs/ext4/super.c
··· 3565 3565 if (blocksize < EXT4_MIN_BLOCK_SIZE || 3566 3566 blocksize > EXT4_MAX_BLOCK_SIZE) { 3567 3567 ext4_msg(sb, KERN_ERR, 3568 - "Unsupported filesystem blocksize %d", blocksize); 3568 + "Unsupported filesystem blocksize %d (%d log_block_size)", 3569 + blocksize, le32_to_cpu(es->s_log_block_size)); 3570 + goto failed_mount; 3571 + } 3572 + if (le32_to_cpu(es->s_log_block_size) > 3573 + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 3574 + ext4_msg(sb, KERN_ERR, 3575 + "Invalid log block size: %u", 3576 + le32_to_cpu(es->s_log_block_size)); 3569 3577 goto failed_mount; 3570 3578 } 3571 3579 ··· 3703 3695 ext4_msg(sb, KERN_ERR, 3704 3696 "cluster size (%d) smaller than " 3705 3697 "block size (%d)", clustersize, blocksize); 3698 + goto failed_mount; 3699 + } 3700 + if (le32_to_cpu(es->s_log_cluster_size) > 3701 + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 3702 + ext4_msg(sb, KERN_ERR, 3703 + "Invalid log cluster size: %u", 3704 + le32_to_cpu(es->s_log_cluster_size)); 3706 3705 goto failed_mount; 3707 3706 } 3708 3707 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+5
fs/fuse/dir.c
··· 286 286 .d_release = fuse_dentry_release, 287 287 }; 288 288 289 + const struct dentry_operations fuse_root_dentry_operations = { 290 + .d_init = fuse_dentry_init, 291 + .d_release = fuse_dentry_release, 292 + }; 293 + 289 294 int fuse_valid_type(int m) 290 295 { 291 296 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
+6
fs/fuse/file.c
··· 1985 1985 { 1986 1986 struct inode *inode = page->mapping->host; 1987 1987 1988 + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 1989 + if (!copied) 1990 + goto unlock; 1991 + 1988 1992 if (!PageUptodate(page)) { 1989 1993 /* Zero any unwritten bytes at the end of the page */ 1990 1994 size_t endoff = (pos + copied) & ~PAGE_MASK; ··· 1999 1995 2000 1996 fuse_write_update_size(inode, pos + copied); 2001 1997 set_page_dirty(page); 1998 + 1999 + unlock: 2002 2000 unlock_page(page); 2003 2001 put_page(page); 2004 2002
+1
fs/fuse/fuse_i.h
··· 692 692 extern const struct file_operations fuse_dev_operations; 693 693 694 694 extern const struct dentry_operations fuse_dentry_operations; 695 + extern const struct dentry_operations fuse_root_dentry_operations; 695 696 696 697 /** 697 698 * Inode to nodeid comparison.
+2 -1
fs/fuse/inode.c
··· 1131 1131 1132 1132 err = -ENOMEM; 1133 1133 root = fuse_get_root_inode(sb, d.rootmode); 1134 + sb->s_d_op = &fuse_root_dentry_operations; 1134 1135 root_dentry = d_make_root(root); 1135 1136 if (!root_dentry) 1136 1137 goto err_dev_free; 1137 - /* only now - we want root dentry with NULL ->d_op */ 1138 + /* Root dentry doesn't have .d_revalidate */ 1138 1139 sb->s_d_op = &fuse_dentry_operations; 1139 1140 1140 1141 init_req = fuse_request_alloc(0);
+2 -1
fs/nfs/client.c
··· 314 314 /* Match the full socket address */ 315 315 if (!rpc_cmp_addr_port(sap, clap)) 316 316 /* Match all xprt_switch full socket addresses */ 317 - if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, 317 + if (IS_ERR(clp->cl_rpcclient) || 318 + !rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, 318 319 sap)) 319 320 continue; 320 321
+1 -1
fs/nfs/namespace.c
··· 98 98 return end; 99 99 } 100 100 namelen = strlen(base); 101 - if (flags & NFS_PATH_CANONICAL) { 101 + if (*end == '/') { 102 102 /* Strip off excess slashes in base string */ 103 103 while (namelen > 0 && base[namelen - 1] == '/') 104 104 namelen--;
+7 -5
fs/nfs/nfs4session.c
··· 178 178 __must_hold(&tbl->slot_tbl_lock) 179 179 { 180 180 struct nfs4_slot *slot; 181 + int ret; 181 182 182 183 slot = nfs4_lookup_slot(tbl, slotid); 183 - if (IS_ERR(slot)) 184 - return PTR_ERR(slot); 185 - *seq_nr = slot->seq_nr; 186 - return 0; 184 + ret = PTR_ERR_OR_ZERO(slot); 185 + if (!ret) 186 + *seq_nr = slot->seq_nr; 187 + 188 + return ret; 187 189 } 188 190 189 191 /* ··· 198 196 static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, 199 197 u32 slotid, u32 seq_nr) 200 198 { 201 - u32 cur_seq; 199 + u32 cur_seq = 0; 202 200 bool ret = false; 203 201 204 202 spin_lock(&tbl->slot_tbl_lock);
+2
fs/nfs/pnfs.c
··· 146 146 u32 id; 147 147 int i; 148 148 149 + if (fsinfo->nlayouttypes == 0) 150 + goto out_no_driver; 149 151 if (!(server->nfs_client->cl_exchange_flags & 150 152 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { 151 153 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
-2
fs/ntfs/dir.c
··· 1544 1544 .iterate = ntfs_readdir, /* Read directory contents. */ 1545 1545 #ifdef NTFS_RW 1546 1546 .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ 1547 - /*.aio_fsync = ,*/ /* Sync all outstanding async 1548 - i/o operations on a kiocb. */ 1549 1547 #endif /* NTFS_RW */ 1550 1548 /*.ioctl = ,*/ /* Perform function on the 1551 1549 mounted filesystem. */
+1 -1
fs/ocfs2/dir.c
··· 3699 3699 static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, 3700 3700 struct ocfs2_dx_root_block *dx_root) 3701 3701 { 3702 - int credits = ocfs2_clusters_to_blocks(osb->sb, 2); 3702 + int credits = ocfs2_clusters_to_blocks(osb->sb, 3); 3703 3703 3704 3704 credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list); 3705 3705 credits += ocfs2_quota_trans_credits(osb->sb);
+2
fs/orangefs/orangefs-debugfs.c
··· 114 114 }; 115 115 116 116 const struct file_operations debug_help_fops = { 117 + .owner = THIS_MODULE, 117 118 .open = orangefs_debug_help_open, 118 119 .read = seq_read, 119 120 .release = seq_release, ··· 122 121 }; 123 122 124 123 static const struct file_operations kernel_debug_fops = { 124 + .owner = THIS_MODULE, 125 125 .open = orangefs_debug_open, 126 126 .read = orangefs_debug_read, 127 127 .write = orangefs_debug_write,
-5
fs/splice.c
··· 299 299 { 300 300 struct iov_iter to; 301 301 struct kiocb kiocb; 302 - loff_t isize; 303 302 int idx, ret; 304 - 305 - isize = i_size_read(in->f_mapping->host); 306 - if (unlikely(*ppos >= isize)) 307 - return 0; 308 303 309 304 iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len); 310 305 idx = to.idx;
+14 -8
fs/xattr.c
··· 170 170 const void *value, size_t size, int flags) 171 171 { 172 172 struct inode *inode = dentry->d_inode; 173 - int error = -EOPNOTSUPP; 173 + int error = -EAGAIN; 174 174 int issec = !strncmp(name, XATTR_SECURITY_PREFIX, 175 175 XATTR_SECURITY_PREFIX_LEN); 176 176 ··· 183 183 security_inode_post_setxattr(dentry, name, value, 184 184 size, flags); 185 185 } 186 - } else if (issec) { 187 - const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 188 - 186 + } else { 189 187 if (unlikely(is_bad_inode(inode))) 190 188 return -EIO; 191 - error = security_inode_setsecurity(inode, suffix, value, 192 - size, flags); 193 - if (!error) 194 - fsnotify_xattr(dentry); 189 + } 190 + if (error == -EAGAIN) { 191 + error = -EOPNOTSUPP; 192 + 193 + if (issec) { 194 + const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 195 + 196 + error = security_inode_setsecurity(inode, suffix, value, 197 + size, flags); 198 + if (!error) 199 + fsnotify_xattr(dentry); 200 + } 195 201 } 196 202 197 203 return error;
+5 -12
fs/xfs/libxfs/xfs_defer.c
··· 199 199 struct xfs_defer_pending *dfp; 200 200 201 201 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 202 - trace_xfs_defer_intake_work(tp->t_mountp, dfp); 203 202 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 204 203 dfp->dfp_count); 204 + trace_xfs_defer_intake_work(tp->t_mountp, dfp); 205 205 list_sort(tp->t_mountp, &dfp->dfp_work, 206 206 dfp->dfp_type->diff_items); 207 207 list_for_each(li, &dfp->dfp_work) ··· 221 221 struct xfs_defer_pending *dfp; 222 222 223 223 trace_xfs_defer_trans_abort(tp->t_mountp, dop); 224 - /* 225 - * If the transaction was committed, drop the intent reference 226 - * since we're bailing out of here. The other reference is 227 - * dropped when the intent hits the AIL. If the transaction 228 - * was not committed, the intent is freed by the intent item 229 - * unlock handler on abort. 230 - */ 231 - if (!dop->dop_committed) 232 - return; 233 224 234 - /* Abort intent items. */ 225 + /* Abort intent items that don't have a done item. */ 235 226 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 236 227 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 237 - if (!dfp->dfp_done) 228 + if (dfp->dfp_intent && !dfp->dfp_done) { 238 229 dfp->dfp_type->abort_intent(dfp->dfp_intent); 230 + dfp->dfp_intent = NULL; 231 + } 239 232 } 240 233 241 234 /* Shut down FS. */
+70 -94
include/acpi/actbl.h
··· 230 230 /* Fields common to all versions of the FADT */ 231 231 232 232 struct acpi_table_fadt { 233 - struct acpi_table_header header; /* [V1] Common ACPI table header */ 234 - u32 facs; /* [V1] 32-bit physical address of FACS */ 235 - u32 dsdt; /* [V1] 32-bit physical address of DSDT */ 236 - u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ 237 - u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ 238 - u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ 239 - u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ 240 - u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ 241 - u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ 242 - u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ 243 - u8 pstate_control; /* [V1] Processor performance state control */ 244 - u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ 245 - u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ 246 - u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ 247 - u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ 248 - u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ 249 - u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ 250 - u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ 251 - u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ 252 - u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ 253 - u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ 254 - u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ 255 - u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ 256 - u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ 257 - u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ 258 - u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ 259 - u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ 260 - u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ 261 - u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ 262 - u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ 263 - u16 flush_stride; /* [V1] Number of flush strides that need to be read */ 264 - u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ 265 - u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ 266 - u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ 267 - u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ 268 - u8 century; /* [V1] Index to century in RTC CMOS RAM */ 269 - u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ 270 - u8 reserved; /* [V1] Reserved, must be zero */ 271 - u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ 272 - /* End of Version 1 FADT fields (ACPI 1.0) */ 273 - 274 - struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ 275 - u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ 276 - u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 277 - u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ 278 - u64 Xfacs; /* [V3] 64-bit physical address of FACS */ 279 - u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ 280 - struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ 281 - struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ 282 - struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ 283 - struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ 284 - struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ 285 - struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 286 - struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ 287 - struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ 288 - /* End of Version 3 FADT fields (ACPI 2.0) */ 289 - 290 - struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ 291 - /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ 292 - 293 - struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ 294 - /* End of Version 5 FADT fields (ACPI 5.0) */ 295 - 296 - u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ 297 - /* End of Version 6 FADT fields (ACPI 6.0) */ 298 - 233 + struct acpi_table_header header; /* Common ACPI table header */ 234 + u32 facs; /* 32-bit physical address of FACS */ 235 + u32 dsdt; /* 32-bit physical address of DSDT */ 236 + u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ 237 + u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ 238 + u16 sci_interrupt; /* System vector of SCI interrupt */ 239 + u32 smi_command; /* 32-bit Port address of SMI command port */ 240 + u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ 241 + u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ 242 + u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ 243 + u8 pstate_control; /* Processor performance state control */ 244 + u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ 245 + u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ 246 + u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ 247 + u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ 248 + u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ 249 + u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ 250 + u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ 251 + u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ 252 + u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ 253 + u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ 254 + u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ 255 + u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ 256 + u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ 257 + u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ 258 + u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ 259 + u8 cst_control; /* Support for the _CST object and C-States change notification */ 260 + u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ 261 + u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ 262 + u16 flush_size; /* Processor memory cache line width, in bytes */ 263 + u16 flush_stride; /* Number of flush strides that need to be read */ 264 + u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ 265 + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ 266 + u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ 267 + u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ 268 + u8 century; /* Index to century in RTC CMOS RAM */ 269 + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ 270 + u8 reserved; /* Reserved, must be zero */ 271 + u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ 272 + struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ 273 + u8 reset_value; /* Value to write to the reset_register port to reset the system */ 274 + u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 275 + u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ 276 + u64 Xfacs; /* 64-bit physical address of FACS */ 277 + u64 Xdsdt; /* 64-bit physical address of DSDT */ 278 + struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ 279 + struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ 280 + struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ 281 + struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ 282 + struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ 283 + struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 284 + struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ 285 + struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 286 + struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ 287 + struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ 288 + u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ 299 289 }; 300 290 301 291 /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ ··· 301 311 302 312 /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ 303 313 304 - #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ 305 - #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ 314 + #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ 315 + #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ 306 316 307 317 /* Masks for FADT flags */ 308 318 ··· 399 409 * match the expected length. In other words, the length of the 400 410 * FADT is the bottom line as to what the version really is. 401 411 * 402 - * NOTE: There is no officialy released V2 of the FADT. This 403 - * version was used only for prototyping and testing during the 404 - * 32-bit to 64-bit transition. V3 was the first official 64-bit 405 - * version of the FADT. 406 - * 407 - * Update this list of defines when a new version of the FADT is 408 - * added to the ACPI specification. Note that the FADT version is 409 - * only incremented when new fields are appended to the existing 410 - * version. Therefore, the FADT version is competely independent 411 - * from the version of the ACPI specification where it is 412 - * defined. 413 - * 414 - * For reference, the various FADT lengths are as follows: 415 - * FADT V1 size: 0x074 ACPI 1.0 416 - * FADT V3 size: 0x0F4 ACPI 2.0 417 - * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 418 - * FADT V5 size: 0x10C ACPI 5.0 419 - * FADT V6 size: 0x114 ACPI 6.0 412 + * For reference, the values below are as follows: 413 + * FADT V1 size: 0x074 414 + * FADT V2 size: 0x084 415 + * FADT V3 size: 0x0F4 416 + * FADT V4 size: 0x0F4 417 + * FADT V5 size: 0x10C 418 + * FADT V6 size: 0x114 420 419 */ 421 - #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ 422 - #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ 423 - #define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ 424 - #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ 425 - #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ 420 + #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) 421 + #define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) 422 + #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) 423 + #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) 424 + #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) 426 425 427 - /* Update these when new FADT versions are added */ 428 - 429 - #define ACPI_FADT_MAX_VERSION 6 430 426 #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" 431 427 432 428 #endif /* __ACTBL_H__ */
+3
include/acpi/platform/aclinux.h
··· 191 191 #ifndef __init 192 192 #define __init 193 193 #endif 194 + #ifndef __iomem 195 + #define __iomem 196 + #endif 194 197 195 198 /* Host-dependent types and defines for user-space ACPICA */ 196 199
+3
include/asm-generic/sections.h
··· 14 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 15 15 * and/or .init.* sections. 16 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 17 + * [__start_data_ro_after_init, __end_data_ro_after_init]: 18 + * contains data.ro_after_init section 17 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 18 20 * may be out of this range on some architectures. 19 21 * [_sinittext, _einittext]: contains .init.text.* sections ··· 33 31 extern char __bss_start[], __bss_stop[]; 34 32 extern char __init_begin[], __init_end[]; 35 33 extern char _sinittext[], _einittext[]; 34 + extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36 35 extern char _end[]; 37 36 extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38 37 extern char __kprobes_text_start[], __kprobes_text_end[];
+4 -1
include/asm-generic/vmlinux.lds.h
··· 259 259 * own by defining an empty RO_AFTER_INIT_DATA. 260 260 */ 261 261 #ifndef RO_AFTER_INIT_DATA 262 - #define RO_AFTER_INIT_DATA *(.data..ro_after_init) 262 + #define RO_AFTER_INIT_DATA \ 263 + __start_data_ro_after_init = .; \ 264 + *(.data..ro_after_init) \ 265 + __end_data_ro_after_init = .; 263 266 #endif 264 267 265 268 /*
+2 -1
include/linux/acpi.h
··· 555 555 int acpi_device_modalias(struct device *, char *, int); 556 556 void acpi_walk_dep_device_list(acpi_handle handle); 557 557 558 - struct platform_device *acpi_create_platform_device(struct acpi_device *); 558 + struct platform_device *acpi_create_platform_device(struct acpi_device *, 559 + struct property_entry *); 559 560 #define ACPI_PTR(_ptr) (_ptr) 560 561 561 562 static inline void acpi_device_set_enumerated(struct acpi_device *adev)
+3 -2
include/linux/bpf_verifier.h
··· 14 14 * are obviously wrong for any sort of memory access. 15 15 */ 16 16 #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) 17 - #define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) 17 + #define BPF_REGISTER_MIN_RANGE -1 18 18 19 19 struct bpf_reg_state { 20 20 enum bpf_reg_type type; ··· 22 22 * Used to determine if any memory access using this register will 23 23 * result in a bad access. 24 24 */ 25 - u64 min_value, max_value; 25 + s64 min_value; 26 + u64 max_value; 26 27 union { 27 28 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 28 29 s64 imm;
+2
include/linux/ceph/osd_client.h
··· 258 258 struct ceph_entity_addr addr; 259 259 }; 260 260 261 + #define CEPH_LINGER_ID_START 0xffff000000000000ULL 262 + 261 263 struct ceph_osd_client { 262 264 struct ceph_client *client; 263 265
-6
include/linux/console.h
··· 173 173 #endif 174 174 extern bool console_suspend_enabled; 175 175 176 - #ifdef CONFIG_OF 177 - extern void console_set_by_of(void); 178 - #else 179 - static inline void console_set_by_of(void) {} 180 - #endif 181 - 182 176 /* Suspend and resume console messages over PM events */ 183 177 extern void suspend_console(void); 184 178 extern void resume_console(void);
+3 -2
include/linux/frontswap.h
··· 106 106 107 107 static inline void frontswap_init(unsigned type, unsigned long *map) 108 108 { 109 - if (frontswap_enabled()) 110 - __frontswap_init(type, map); 109 + #ifdef CONFIG_FRONTSWAP 110 + __frontswap_init(type, map); 111 + #endif 111 112 } 112 113 113 114 #endif /* _LINUX_FRONTSWAP_H */
+1 -1
include/linux/fs.h
··· 321 321 #define IOCB_HIPRI (1 << 3) 322 322 #define IOCB_DSYNC (1 << 4) 323 323 #define IOCB_SYNC (1 << 5) 324 + #define IOCB_WRITE (1 << 6) 324 325 325 326 struct kiocb { 326 327 struct file *ki_filp; ··· 1710 1709 int (*flush) (struct file *, fl_owner_t id); 1711 1710 int (*release) (struct inode *, struct file *); 1712 1711 int (*fsync) (struct file *, loff_t, loff_t, int datasync); 1713 - int (*aio_fsync) (struct kiocb *, int datasync); 1714 1712 int (*fasync) (int, struct file *, int); 1715 1713 int (*lock) (struct file *, int, struct file_lock *); 1716 1714 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+1 -1
include/linux/huge_mm.h
··· 22 22 unsigned char *vec); 23 23 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 24 24 unsigned long new_addr, unsigned long old_end, 25 - pmd_t *old_pmd, pmd_t *new_pmd); 25 + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 26 26 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 27 27 unsigned long addr, pgprot_t newprot, 28 28 int prot_numa);
+1 -1
include/linux/ipv6.h
··· 149 149 { 150 150 #if defined(CONFIG_NET_L3_MASTER_DEV) 151 151 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 152 - ipv6_l3mdev_skb(IP6CB(skb)->flags)) 152 + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) 153 153 return true; 154 154 #endif 155 155 return false;
+15
include/linux/netdevice.h
··· 3354 3354 bool is_skb_forwardable(const struct net_device *dev, 3355 3355 const struct sk_buff *skb); 3356 3356 3357 + static __always_inline int ____dev_forward_skb(struct net_device *dev, 3358 + struct sk_buff *skb) 3359 + { 3360 + if (skb_orphan_frags(skb, GFP_ATOMIC) || 3361 + unlikely(!is_skb_forwardable(dev, skb))) { 3362 + atomic_long_inc(&dev->rx_dropped); 3363 + kfree_skb(skb); 3364 + return NET_RX_DROP; 3365 + } 3366 + 3367 + skb_scrub_packet(skb, true); 3368 + skb->priority = 0; 3369 + return 0; 3370 + } 3371 + 3357 3372 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3358 3373 3359 3374 extern int netdev_budget;
+7
include/linux/phy/phy.h
··· 253 253 return -ENOSYS; 254 254 } 255 255 256 + static inline int phy_reset(struct phy *phy) 257 + { 258 + if (!phy) 259 + return 0; 260 + return -ENOSYS; 261 + } 262 + 256 263 static inline int phy_get_bus_width(struct phy *phy) 257 264 { 258 265 return -ENOSYS;
+1
include/linux/sunrpc/svc_xprt.h
··· 25 25 void (*xpo_detach)(struct svc_xprt *); 26 26 void (*xpo_free)(struct svc_xprt *); 27 27 int (*xpo_secure_port)(struct svc_rqst *); 28 + void (*xpo_kill_temp_xprt)(struct svc_xprt *); 28 29 }; 29 30 30 31 struct svc_xprt_class {
+3
include/net/gro_cells.h
··· 68 68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 69 69 70 70 __skb_queue_head_init(&cell->napi_skbs); 71 + 72 + set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); 73 + 71 74 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 72 75 napi_enable(&cell->napi); 73 76 }
+1 -2
include/net/ip.h
··· 47 47 #define IPSKB_REROUTED BIT(4) 48 48 #define IPSKB_DOREDIRECT BIT(5) 49 49 #define IPSKB_FRAG_PMTU BIT(6) 50 - #define IPSKB_FRAG_SEGS BIT(7) 51 - #define IPSKB_L3SLAVE BIT(8) 50 + #define IPSKB_L3SLAVE BIT(7) 52 51 53 52 u16 frag_max_size; 54 53 };
+1
include/net/ip6_tunnel.h
··· 146 146 { 147 147 int pkt_len, err; 148 148 149 + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 149 150 pkt_len = skb->len - skb_inner_network_offset(skb); 150 151 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 151 152 if (unlikely(net_xmit_eval(err)))
+1
include/net/ip_fib.h
··· 243 243 struct netlink_callback *cb); 244 244 int fib_table_flush(struct net *net, struct fib_table *table); 245 245 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 246 + void fib_table_flush_external(struct fib_table *table); 246 247 void fib_free_table(struct fib_table *tb); 247 248 248 249 #ifndef CONFIG_IP_MULTIPLE_TABLES
+1 -1
include/net/net_namespace.h
··· 170 170 extern struct list_head net_namespace_list; 171 171 172 172 struct net *get_net_ns_by_pid(pid_t pid); 173 - struct net *get_net_ns_by_fd(int pid); 173 + struct net *get_net_ns_by_fd(int fd); 174 174 175 175 #ifdef CONFIG_SYSCTL 176 176 void ipx_register_sysctl(void);
+1 -2
include/net/netfilter/nf_conntrack_labels.h
··· 30 30 if (net->ct.labels_used == 0) 31 31 return NULL; 32 32 33 - return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, 34 - sizeof(struct nf_conn_labels), GFP_ATOMIC); 33 + return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); 35 34 #else 36 35 return NULL; 37 36 #endif
+5 -3
include/net/netfilter/nf_tables.h
··· 145 145 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; 146 146 } 147 147 148 - unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); 148 + int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); 149 149 unsigned int nft_parse_register(const struct nlattr *attr); 150 150 int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); 151 151 ··· 542 542 const struct nft_set_ext_tmpl *tmpl, 543 543 const u32 *key, const u32 *data, 544 544 u64 timeout, gfp_t gfp); 545 - void nft_set_elem_destroy(const struct nft_set *set, void *elem); 545 + void nft_set_elem_destroy(const struct nft_set *set, void *elem, 546 + bool destroy_expr); 546 547 547 548 /** 548 549 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch ··· 694 693 { 695 694 int err; 696 695 697 - __module_get(src->ops->type->owner); 698 696 if (src->ops->clone) { 699 697 dst->ops = src->ops; 700 698 err = src->ops->clone(dst, src); ··· 702 702 } else { 703 703 memcpy(dst, src, src->ops->size); 704 704 } 705 + 706 + __module_get(src->ops->type->owner); 705 707 return 0; 706 708 } 707 709
+1 -1
include/net/sctp/sctp.h
··· 152 152 struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, 153 153 struct sctphdr *, struct sctp_association **, 154 154 struct sctp_transport **); 155 - void sctp_err_finish(struct sock *, struct sctp_association *); 155 + void sctp_err_finish(struct sock *, struct sctp_transport *); 156 156 void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, 157 157 struct sctp_transport *t, __u32 pmtu); 158 158 void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
+2 -2
include/net/sock.h
··· 1596 1596 void sock_gen_put(struct sock *sk); 1597 1597 1598 1598 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1599 - unsigned int trim_cap); 1599 + unsigned int trim_cap, bool refcounted); 1600 1600 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1601 1601 const int nested) 1602 1602 { 1603 - return __sk_receive_skb(sk, skb, nested, 1); 1603 + return __sk_receive_skb(sk, skb, nested, 1, true); 1604 1604 } 1605 1605 1606 1606 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+2 -1
include/net/tcp.h
··· 805 805 { 806 806 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 807 807 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 808 - ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 808 + skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 809 809 return true; 810 810 #endif 811 811 return false; ··· 1220 1220 1221 1221 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1222 1222 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1223 + int tcp_filter(struct sock *sk, struct sk_buff *skb); 1223 1224 1224 1225 #undef STATE_TRACE 1225 1226
-1
include/uapi/linux/atm_zatm.h
··· 14 14 15 15 #include <linux/atmapi.h> 16 16 #include <linux/atmioc.h> 17 - #include <linux/time.h> 18 17 19 18 #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) 20 19 /* get pool statistics */
-2
include/uapi/linux/bpqether.h
··· 5 5 * Defines for the BPQETHER pseudo device driver 6 6 */ 7 7 8 - #ifndef __LINUX_IF_ETHER_H 9 8 #include <linux/if_ether.h> 10 - #endif 11 9 12 10 #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ 13 11 #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1)
+7
include/uapi/linux/kvm.h
··· 972 972 __u8 pad[16]; 973 973 }; 974 974 975 + /* For KVM_CAP_ADJUST_CLOCK */ 976 + 977 + /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ 978 + #define KVM_CLOCK_TSC_STABLE 2 979 + 975 980 struct kvm_clock_data { 976 981 __u64 clock; 977 982 __u32 flags; 978 983 __u32 pad[9]; 979 984 }; 985 + 986 + /* For KVM_CAP_SW_TLB */ 980 987 981 988 #define KVM_MMU_FSL_BOOKE_NOHV 0 982 989 #define KVM_MMU_FSL_BOOKE_HV 1
+2 -1
kernel/bpf/hashtab.c
··· 687 687 688 688 hlist_for_each_entry_safe(l, n, head, hash_node) { 689 689 hlist_del_rcu(&l->hash_node); 690 - htab_elem_free(htab, l); 690 + if (l->state != HTAB_EXTRA_ELEM_USED) 691 + htab_elem_free(htab, l); 691 692 } 692 693 } 693 694 }
+3 -1
kernel/bpf/syscall.c
··· 194 194 195 195 err = bpf_map_charge_memlock(map); 196 196 if (err) 197 - goto free_map; 197 + goto free_map_nouncharge; 198 198 199 199 err = bpf_map_new_fd(map); 200 200 if (err < 0) ··· 204 204 return err; 205 205 206 206 free_map: 207 + bpf_map_uncharge_memlock(map); 208 + free_map_nouncharge: 207 209 map->ops->map_free(map); 208 210 return err; 209 211 }
+47 -23
kernel/bpf/verifier.c
··· 216 216 reg->map_ptr->key_size, 217 217 reg->map_ptr->value_size); 218 218 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 219 - verbose(",min_value=%llu", 220 - (unsigned long long)reg->min_value); 219 + verbose(",min_value=%lld", 220 + (long long)reg->min_value); 221 221 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 222 222 verbose(",max_value=%llu", 223 223 (unsigned long long)reg->max_value); ··· 758 758 * index'es we need to make sure that whatever we use 759 759 * will have a set floor within our range. 760 760 */ 761 - if ((s64)reg->min_value < 0) { 761 + if (reg->min_value < 0) { 762 762 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 763 763 regno); 764 764 return -EACCES; ··· 1468 1468 { 1469 1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1470 1470 reg->max_value = BPF_REGISTER_MAX_RANGE; 1471 - if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) 1471 + if (reg->min_value < BPF_REGISTER_MIN_RANGE || 1472 + reg->min_value > BPF_REGISTER_MAX_RANGE) 1472 1473 reg->min_value = BPF_REGISTER_MIN_RANGE; 1473 1474 } 1474 1475 ··· 1477 1476 struct bpf_insn *insn) 1478 1477 { 1479 1478 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1480 - u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; 1479 + s64 min_val = BPF_REGISTER_MIN_RANGE; 1480 + u64 max_val = BPF_REGISTER_MAX_RANGE; 1481 1481 bool min_set = false, max_set = false; 1482 1482 u8 opcode = BPF_OP(insn->code); 1483 1483 ··· 1514 1512 return; 1515 1513 } 1516 1514 1515 + /* If one of our values was at the end of our ranges then we can't just 1516 + * do our normal operations to the register, we need to set the values 1517 + * to the min/max since they are undefined. 1518 + */ 1519 + if (min_val == BPF_REGISTER_MIN_RANGE) 1520 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1521 + if (max_val == BPF_REGISTER_MAX_RANGE) 1522 + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1523 + 1517 1524 switch (opcode) { 1518 1525 case BPF_ADD: 1519 - dst_reg->min_value += min_val; 1520 - dst_reg->max_value += max_val; 1526 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1527 + dst_reg->min_value += min_val; 1528 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1529 + dst_reg->max_value += max_val; 1521 1530 break; 1522 1531 case BPF_SUB: 1523 - dst_reg->min_value -= min_val; 1524 - dst_reg->max_value -= max_val; 1532 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1533 + dst_reg->min_value -= min_val; 1534 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1535 + dst_reg->max_value -= max_val; 1525 1536 break; 1526 1537 case BPF_MUL: 1527 - dst_reg->min_value *= min_val; 1528 - dst_reg->max_value *= max_val; 1538 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1539 + dst_reg->min_value *= min_val; 1540 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1541 + dst_reg->max_value *= max_val; 1529 1542 break; 1530 1543 case BPF_AND: 1531 - /* & is special since it could end up with 0 bits set. */ 1532 - dst_reg->min_value &= min_val; 1544 + /* Disallow AND'ing of negative numbers, ain't nobody got time 1545 + * for that. Otherwise the minimum is 0 and the max is the max 1546 + * value we could AND against. 1547 + */ 1548 + if (min_val < 0) 1549 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1550 + else 1551 + dst_reg->min_value = 0; 1533 1552 dst_reg->max_value = max_val; 1534 1553 break; 1535 1554 case BPF_LSH: ··· 1560 1537 */ 1561 1538 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1562 1539 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1563 - else 1540 + else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1564 1541 dst_reg->min_value <<= min_val; 1565 1542 1566 1543 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1567 1544 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1568 - else 1545 + else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1569 1546 dst_reg->max_value <<= max_val; 1570 1547 break; 1571 1548 case BPF_RSH: 1572 - dst_reg->min_value >>= min_val; 1573 - dst_reg->max_value >>= max_val; 1574 - break; 1575 - case BPF_MOD: 1576 - /* % is special since it is an unsigned modulus, so the floor 1577 - * will always be 0. 1549 + /* RSH by a negative number is undefined, and the BPF_RSH is an 1550 + * unsigned shift, so make the appropriate casts. 1578 1551 */ 1579 - dst_reg->min_value = 0; 1580 - dst_reg->max_value = max_val - 1; 1552 + if (min_val < 0 || dst_reg->min_value < 0) 1553 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1554 + else 1555 + dst_reg->min_value = 1556 + (u64)(dst_reg->min_value) >> min_val; 1557 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1558 + dst_reg->max_value >>= max_val; 1581 1559 break; 1582 1560 default: 1583 1561 reset_reg_range_values(regs, insn->dst_reg);
+2 -2
kernel/irq/manage.c
··· 1341 1341 1342 1342 } else if (new->flags & IRQF_TRIGGER_MASK) { 1343 1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1344 - unsigned int omsk = irq_settings_get_trigger_mask(desc); 1344 + unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); 1345 1345 1346 1346 if (nmsk != omsk) 1347 1347 /* hope the handler works with current trigger mode */ 1348 1348 pr_warn("irq %d uses trigger mode %u; requested %u\n", 1349 - irq, nmsk, omsk); 1349 + irq, omsk, nmsk); 1350 1350 } 1351 1351 1352 1352 *old_ptr = new;
+17 -3
kernel/locking/lockdep_internals.h
··· 46 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 47 47 48 48 /* 49 + * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, 50 + * .data and .bss to fit in required 32MB limit for the kernel. With 51 + * PROVE_LOCKING we could go over this limit and cause system boot-up problems. 52 + * So, reduce the static allocations for lockdeps related structures so that 53 + * everything fits in current required size limit. 54 + */ 55 + #ifdef CONFIG_PROVE_LOCKING_SMALL 56 + /* 49 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 50 58 * we track. 51 59 * ··· 62 54 * table (if it's not there yet), and we check it for lock order 63 55 * conflicts and deadlocks. 64 56 */ 57 + #define MAX_LOCKDEP_ENTRIES 16384UL 58 + #define MAX_LOCKDEP_CHAINS_BITS 15 59 + #define MAX_STACK_TRACE_ENTRIES 262144UL 60 + #else 65 61 #define MAX_LOCKDEP_ENTRIES 32768UL 66 62 67 63 #define MAX_LOCKDEP_CHAINS_BITS 16 68 - #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 69 - 70 - #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 71 64 72 65 /* 73 66 * Stack-trace: tightly packed array of stack backtrace 74 67 * addresses. Protected by the hash_lock. 75 68 */ 76 69 #define MAX_STACK_TRACE_ENTRIES 524288UL 70 + #endif 71 + 72 + #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 73 + 74 + #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 77 75 78 76 extern struct list_head all_lock_classes; 79 77 extern struct lock_chain lock_chains[];
+3 -1
kernel/power/suspend_test.c
··· 203 203 204 204 /* RTCs have initialized by now too ... can we use one? */ 205 205 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); 206 - if (dev) 206 + if (dev) { 207 207 rtc = rtc_class_open(dev_name(dev)); 208 + put_device(dev); 209 + } 208 210 if (!rtc) { 209 211 printk(warn_no_rtc); 210 212 return 0;
+1 -23
kernel/printk/printk.c
··· 253 253 int console_set_on_cmdline; 254 254 EXPORT_SYMBOL(console_set_on_cmdline); 255 255 256 - #ifdef CONFIG_OF 257 - static bool of_specified_console; 258 - 259 - void console_set_by_of(void) 260 - { 261 - of_specified_console = true; 262 - } 263 - #else 264 - # define of_specified_console false 265 - #endif 266 - 267 256 /* Flag: console code may call schedule() */ 268 257 static int console_may_schedule; 269 258 ··· 783 794 return ret; 784 795 } 785 796 786 - static void cont_flush(void); 787 - 788 797 static ssize_t devkmsg_read(struct file *file, char __user *buf, 789 798 size_t count, loff_t *ppos) 790 799 { ··· 798 811 if (ret) 799 812 return ret; 800 813 raw_spin_lock_irq(&logbuf_lock); 801 - cont_flush(); 802 814 while (user->seq == log_next_seq) { 803 815 if (file->f_flags & O_NONBLOCK) { 804 816 ret = -EAGAIN; ··· 860 874 return -ESPIPE; 861 875 862 876 raw_spin_lock_irq(&logbuf_lock); 863 - cont_flush(); 864 877 switch (whence) { 865 878 case SEEK_SET: 866 879 /* the first record */ ··· 898 913 poll_wait(file, &log_wait, wait); 899 914 900 915 raw_spin_lock_irq(&logbuf_lock); 901 - cont_flush(); 902 916 if (user->seq < log_next_seq) { 903 917 /* return error when data has vanished underneath us */ 904 918 if (user->seq < log_first_seq) ··· 1284 1300 size_t skip; 1285 1301 1286 1302 raw_spin_lock_irq(&logbuf_lock); 1287 - cont_flush(); 1288 1303 if (syslog_seq < log_first_seq) { 1289 1304 /* messages are gone, move to first one */ 1290 1305 syslog_seq = log_first_seq; ··· 1343 1360 return -ENOMEM; 1344 1361 1345 1362 raw_spin_lock_irq(&logbuf_lock); 1346 - cont_flush(); 1347 1363 if (buf) { 1348 1364 u64 next_seq; 1349 1365 u64 seq; ··· 1504 1522 /* Number of chars in the log buffer */ 1505 1523 case SYSLOG_ACTION_SIZE_UNREAD: 1506 1524 raw_spin_lock_irq(&logbuf_lock); 1507 - cont_flush(); 1508 1525 if (syslog_seq < log_first_seq) { 1509 1526 /* messages are gone, move to first one */ 1510 1527 syslog_seq = log_first_seq; ··· 2638 2657 * didn't select a console we take the first one 2639 2658 * that registers here. 2640 2659 */ 2641 - if (preferred_console < 0 && !of_specified_console) { 2660 + if (preferred_console < 0) { 2642 2661 if (newcon->index < 0) 2643 2662 newcon->index = 0; 2644 2663 if (newcon->setup == NULL || ··· 3020 3039 dumper->active = true; 3021 3040 3022 3041 raw_spin_lock_irqsave(&logbuf_lock, flags); 3023 - cont_flush(); 3024 3042 dumper->cur_seq = clear_seq; 3025 3043 dumper->cur_idx = clear_idx; 3026 3044 dumper->next_seq = log_next_seq; ··· 3110 3130 bool ret; 3111 3131 3112 3132 raw_spin_lock_irqsave(&logbuf_lock, flags); 3113 - cont_flush(); 3114 3133 ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); 3115 3134 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 3116 3135 ··· 3152 3173 goto out; 3153 3174 3154 3175 raw_spin_lock_irqsave(&logbuf_lock, flags); 3155 - cont_flush(); 3156 3176 if (dumper->cur_seq < log_first_seq) { 3157 3177 /* messages are gone, move to first available one */ 3158 3178 dumper->cur_seq = log_first_seq;
+5 -1
kernel/taskstats.c
··· 54 54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, 55 55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; 56 56 57 - static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { 57 + /* 58 + * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family. 59 + * Make sure they are always aligned. 60 + */ 61 + static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { 58 62 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, 59 63 }; 60 64
+23 -1
kernel/trace/ftrace.c
··· 1862 1862 1863 1863 /* Update rec->flags */ 1864 1864 do_for_each_ftrace_rec(pg, rec) { 1865 + 1866 + if (rec->flags & FTRACE_FL_DISABLED) 1867 + continue; 1868 + 1865 1869 /* We need to update only differences of filter_hash */ 1866 1870 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1867 1871 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); ··· 1888 1884 1889 1885 /* Roll back what we did above */ 1890 1886 do_for_each_ftrace_rec(pg, rec) { 1887 + 1888 + if (rec->flags & FTRACE_FL_DISABLED) 1889 + continue; 1890 + 1891 1891 if (rec == end) 1892 1892 goto err_out; 1893 1893 ··· 2405 2397 return; 2406 2398 2407 2399 do_for_each_ftrace_rec(pg, rec) { 2400 + 2401 + if (rec->flags & FTRACE_FL_DISABLED) 2402 + continue; 2403 + 2408 2404 failed = __ftrace_replace_code(rec, enable); 2409 2405 if (failed) { 2410 2406 ftrace_bug(failed, rec); ··· 2775 2763 struct dyn_ftrace *rec; 2776 2764 2777 2765 do_for_each_ftrace_rec(pg, rec) { 2778 - if (FTRACE_WARN_ON_ONCE(rec->flags)) 2766 + if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2779 2767 pr_warn(" %pS flags:%lx\n", 2780 2768 (void *)rec->ip, rec->flags); 2781 2769 } while_for_each_ftrace_rec(); ··· 3610 3598 goto out_unlock; 3611 3599 3612 3600 do_for_each_ftrace_rec(pg, rec) { 3601 + 3602 + if (rec->flags & FTRACE_FL_DISABLED) 3603 + continue; 3604 + 3613 3605 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3614 3606 ret = enter_record(hash, rec, clear_filter); 3615 3607 if (ret < 0) { ··· 3808 3792 mutex_lock(&ftrace_lock); 3809 3793 3810 3794 do_for_each_ftrace_rec(pg, rec) { 3795 + 3796 + if (rec->flags & FTRACE_FL_DISABLED) 3797 + continue; 3811 3798 3812 3799 if (!ftrace_match_record(rec, &func_g, NULL, 0)) 3813 3800 continue; ··· 4703 4684 } 4704 4685 4705 4686 do_for_each_ftrace_rec(pg, rec) { 4687 + 4688 + if (rec->flags & FTRACE_FL_DISABLED) 4689 + continue; 4706 4690 4707 4691 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 4708 4692 /* if it is in the array */
+3
lib/Kconfig.debug
··· 1085 1085 1086 1086 For more details, see Documentation/locking/lockdep-design.txt. 1087 1087 1088 + config PROVE_LOCKING_SMALL 1089 + bool 1090 + 1088 1091 config LOCKDEP 1089 1092 bool 1090 1093 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+3 -1
lib/iov_iter.c
··· 683 683 struct pipe_inode_info *pipe = i->pipe; 684 684 struct pipe_buffer *buf; 685 685 int idx = i->idx; 686 - size_t off = i->iov_offset; 686 + size_t off = i->iov_offset, orig_sz; 687 687 688 688 if (unlikely(i->count < size)) 689 689 size = i->count; 690 + orig_sz = size; 690 691 691 692 if (size) { 692 693 if (off) /* make it relative to the beginning of buffer */ ··· 714 713 pipe->nrbufs--; 715 714 } 716 715 } 716 + i->count -= orig_sz; 717 717 } 718 718 719 719 void iov_iter_advance(struct iov_iter *i, size_t size)
+2
lib/stackdepot.c
··· 192 192 trace->entries = stack->entries; 193 193 trace->skip = 0; 194 194 } 195 + EXPORT_SYMBOL_GPL(depot_fetch_stack); 195 196 196 197 /** 197 198 * depot_save_stack - save stack in a stack depot. ··· 284 283 fast_exit: 285 284 return retval; 286 285 } 286 + EXPORT_SYMBOL_GPL(depot_save_stack);
+3
mm/cma.c
··· 385 385 bitmap_maxno = cma_bitmap_maxno(cma); 386 386 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 387 387 388 + if (bitmap_count > bitmap_maxno) 389 + return NULL; 390 + 388 391 for (;;) { 389 392 mutex_lock(&cma->lock); 390 393 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
+3
mm/filemap.c
··· 1732 1732 if (inode->i_blkbits == PAGE_SHIFT || 1733 1733 !mapping->a_ops->is_partially_uptodate) 1734 1734 goto page_not_up_to_date; 1735 + /* pipes can't handle partially uptodate pages */ 1736 + if (unlikely(iter->type & ITER_PIPE)) 1737 + goto page_not_up_to_date; 1735 1738 if (!trylock_page(page)) 1736 1739 goto page_not_up_to_date; 1737 1740 /* Did it get truncated before we got the lock? */
+8 -1
mm/huge_memory.c
··· 1426 1426 1427 1427 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1428 1428 unsigned long new_addr, unsigned long old_end, 1429 - pmd_t *old_pmd, pmd_t *new_pmd) 1429 + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) 1430 1430 { 1431 1431 spinlock_t *old_ptl, *new_ptl; 1432 1432 pmd_t pmd; 1433 1433 struct mm_struct *mm = vma->vm_mm; 1434 + bool force_flush = false; 1434 1435 1435 1436 if ((old_addr & ~HPAGE_PMD_MASK) || 1436 1437 (new_addr & ~HPAGE_PMD_MASK) || ··· 1456 1455 new_ptl = pmd_lockptr(mm, new_pmd); 1457 1456 if (new_ptl != old_ptl) 1458 1457 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1458 + if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd)) 1459 + force_flush = true; 1459 1460 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1460 1461 VM_BUG_ON(!pmd_none(*new_pmd)); 1461 1462 ··· 1470 1467 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1471 1468 if (new_ptl != old_ptl) 1472 1469 spin_unlock(new_ptl); 1470 + if (force_flush) 1471 + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1472 + else 1473 + *need_flush = true; 1473 1474 spin_unlock(old_ptl); 1474 1475 return true; 1475 1476 }
+66
mm/hugetlb.c
··· 1826 1826 * is not the case is if a reserve map was changed between calls. It 1827 1827 * is the responsibility of the caller to notice the difference and 1828 1828 * take appropriate action. 1829 + * 1830 + * vma_add_reservation is used in error paths where a reservation must 1831 + * be restored when a newly allocated huge page must be freed. It is 1832 + * to be called after calling vma_needs_reservation to determine if a 1833 + * reservation exists. 1829 1834 */ 1830 1835 enum vma_resv_mode { 1831 1836 VMA_NEEDS_RESV, 1832 1837 VMA_COMMIT_RESV, 1833 1838 VMA_END_RESV, 1839 + VMA_ADD_RESV, 1834 1840 }; 1835 1841 static long __vma_reservation_common(struct hstate *h, 1836 1842 struct vm_area_struct *vma, unsigned long addr, ··· 1861 1855 case VMA_END_RESV: 1862 1856 region_abort(resv, idx, idx + 1); 1863 1857 ret = 0; 1858 + break; 1859 + case VMA_ADD_RESV: 1860 + if (vma->vm_flags & VM_MAYSHARE) 1861 + ret = region_add(resv, idx, idx + 1); 1862 + else { 1863 + region_abort(resv, idx, idx + 1); 1864 + ret = region_del(resv, idx, idx + 1); 1865 + } 1864 1866 break; 1865 1867 default: 1866 1868 BUG(); ··· 1915 1901 struct vm_area_struct *vma, unsigned long addr) 1916 1902 { 1917 1903 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 1904 + } 1905 + 1906 + static long vma_add_reservation(struct hstate *h, 1907 + struct vm_area_struct *vma, unsigned long addr) 1908 + { 1909 + return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 1910 + } 1911 + 1912 + /* 1913 + * This routine is called to restore a reservation on error paths. In the 1914 + * specific error paths, a huge page was allocated (via alloc_huge_page) 1915 + * and is about to be freed. If a reservation for the page existed, 1916 + * alloc_huge_page would have consumed the reservation and set PagePrivate 1917 + * in the newly allocated page. When the page is freed via free_huge_page, 1918 + * the global reservation count will be incremented if PagePrivate is set. 1919 + * However, free_huge_page can not adjust the reserve map. Adjust the 1920 + * reserve map here to be consistent with global reserve count adjustments 1921 + * to be made by free_huge_page. 1922 + */ 1923 + static void restore_reserve_on_error(struct hstate *h, 1924 + struct vm_area_struct *vma, unsigned long address, 1925 + struct page *page) 1926 + { 1927 + if (unlikely(PagePrivate(page))) { 1928 + long rc = vma_needs_reservation(h, vma, address); 1929 + 1930 + if (unlikely(rc < 0)) { 1931 + /* 1932 + * Rare out of memory condition in reserve map 1933 + * manipulation. Clear PagePrivate so that 1934 + * global reserve count will not be incremented 1935 + * by free_huge_page. This will make it appear 1936 + * as though the reservation for this page was 1937 + * consumed. This may prevent the task from 1938 + * faulting in the page at a later time. This 1939 + * is better than inconsistent global huge page 1940 + * accounting of reserve counts. 1941 + */ 1942 + ClearPagePrivate(page); 1943 + } else if (rc) { 1944 + rc = vma_add_reservation(h, vma, address); 1945 + if (unlikely(rc < 0)) 1946 + /* 1947 + * See above comment about rare out of 1948 + * memory condition. 1949 + */ 1950 + ClearPagePrivate(page); 1951 + } else 1952 + vma_end_reservation(h, vma, address); 1953 + } 1918 1954 } 1919 1955 1920 1956 struct page *alloc_huge_page(struct vm_area_struct *vma, ··· 3562 3498 spin_unlock(ptl); 3563 3499 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3564 3500 out_release_all: 3501 + restore_reserve_on_error(h, vma, address, new_page); 3565 3502 put_page(new_page); 3566 3503 out_release_old: 3567 3504 put_page(old_page); ··· 3745 3680 spin_unlock(ptl); 3746 3681 backout_unlocked: 3747 3682 unlock_page(page); 3683 + restore_reserve_on_error(h, vma, address, page); 3748 3684 put_page(page); 3749 3685 goto out; 3750 3686 }
+1
mm/kmemleak.c
··· 1414 1414 /* data/bss scanning */ 1415 1415 scan_large_block(_sdata, _edata); 1416 1416 scan_large_block(__bss_start, __bss_stop); 1417 + scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1417 1418 1418 1419 #ifdef CONFIG_SMP 1419 1420 /* per-cpu sections scanning */
+5 -7
mm/memory-failure.c
··· 1112 1112 } 1113 1113 1114 1114 if (!PageHuge(p) && PageTransHuge(hpage)) { 1115 - lock_page(hpage); 1116 - if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) { 1117 - unlock_page(hpage); 1118 - if (!PageAnon(hpage)) 1115 + lock_page(p); 1116 + if (!PageAnon(p) || unlikely(split_huge_page(p))) { 1117 + unlock_page(p); 1118 + if (!PageAnon(p)) 1119 1119 pr_err("Memory failure: %#lx: non anonymous thp\n", 1120 1120 pfn); 1121 1121 else ··· 1126 1126 put_hwpoison_page(p); 1127 1127 return -EBUSY; 1128 1128 } 1129 - unlock_page(hpage); 1130 - get_hwpoison_page(p); 1131 - put_hwpoison_page(hpage); 1129 + unlock_page(p); 1132 1130 VM_BUG_ON_PAGE(!page_count(p), p); 1133 1131 hpage = compound_head(p); 1134 1132 }
+21 -9
mm/mremap.c
··· 104 104 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 105 105 unsigned long old_addr, unsigned long old_end, 106 106 struct vm_area_struct *new_vma, pmd_t *new_pmd, 107 - unsigned long new_addr, bool need_rmap_locks) 107 + unsigned long new_addr, bool need_rmap_locks, bool *need_flush) 108 108 { 109 109 struct mm_struct *mm = vma->vm_mm; 110 110 pte_t *old_pte, *new_pte, pte; 111 111 spinlock_t *old_ptl, *new_ptl; 112 + bool force_flush = false; 113 + unsigned long len = old_end - old_addr; 112 114 113 115 /* 114 116 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma ··· 148 146 new_pte++, new_addr += PAGE_SIZE) { 149 147 if (pte_none(*old_pte)) 150 148 continue; 149 + 150 + /* 151 + * We are remapping a dirty PTE, make sure to 152 + * flush TLB before we drop the PTL for the 153 + * old PTE or we may race with page_mkclean(). 154 + */ 155 + if (pte_present(*old_pte) && pte_dirty(*old_pte)) 156 + force_flush = true; 151 157 pte = ptep_get_and_clear(mm, old_addr, old_pte); 152 158 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 153 159 pte = move_soft_dirty_pte(pte); ··· 166 156 if (new_ptl != old_ptl) 167 157 spin_unlock(new_ptl); 168 158 pte_unmap(new_pte - 1); 159 + if (force_flush) 160 + flush_tlb_range(vma, old_end - len, old_end); 161 + else 162 + *need_flush = true; 169 163 pte_unmap_unlock(old_pte - 1, old_ptl); 170 164 if (need_rmap_locks) 171 165 drop_rmap_locks(vma); ··· 215 201 if (need_rmap_locks) 216 202 take_rmap_locks(vma); 217 203 moved = move_huge_pmd(vma, old_addr, new_addr, 218 - old_end, old_pmd, new_pmd); 204 + old_end, old_pmd, new_pmd, 205 + &need_flush); 219 206 if (need_rmap_locks) 220 207 drop_rmap_locks(vma); 221 - if (moved) { 222 - need_flush = true; 208 + if (moved) 223 209 continue; 224 - } 225 210 } 226 211 split_huge_pmd(vma, old_pmd, old_addr); 227 212 if (pmd_trans_unstable(old_pmd)) ··· 233 220 extent = next - new_addr; 234 221 if (extent > LATENCY_LIMIT) 235 222 extent = LATENCY_LIMIT; 236 - move_ptes(vma, old_pmd, old_addr, old_addr + extent, 237 - new_vma, new_pmd, new_addr, need_rmap_locks); 238 - need_flush = true; 223 + move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 224 + new_pmd, new_addr, need_rmap_locks, &need_flush); 239 225 } 240 - if (likely(need_flush)) 226 + if (need_flush) 241 227 flush_tlb_range(vma, old_end-len, old_addr); 242 228 243 229 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+1 -1
mm/page_alloc.c
··· 3658 3658 /* Make sure we know about allocations which stall for too long */ 3659 3659 if (time_after(jiffies, alloc_start + stall_timeout)) { 3660 3660 warn_alloc(gfp_mask, 3661 - "page alloction stalls for %ums, order:%u\n", 3661 + "page allocation stalls for %ums, order:%u", 3662 3662 jiffies_to_msecs(jiffies-alloc_start), order); 3663 3663 stall_timeout += 10 * HZ; 3664 3664 }
+2
mm/shmem.c
··· 1483 1483 copy_highpage(newpage, oldpage); 1484 1484 flush_dcache_page(newpage); 1485 1485 1486 + __SetPageLocked(newpage); 1487 + __SetPageSwapBacked(newpage); 1486 1488 SetPageUptodate(newpage); 1487 1489 set_page_private(newpage, swap_index); 1488 1490 SetPageSwapCache(newpage);
+2 -2
mm/slab_common.c
··· 533 533 534 534 s = create_cache(cache_name, root_cache->object_size, 535 535 root_cache->size, root_cache->align, 536 - root_cache->flags, root_cache->ctor, 537 - memcg, root_cache); 536 + root_cache->flags & CACHE_CREATE_MASK, 537 + root_cache->ctor, memcg, root_cache); 538 538 /* 539 539 * If we could not create a memcg cache, do not complain, because 540 540 * that's not critical at all as we can always proceed with the root
+2
mm/swapfile.c
··· 2224 2224 swab32s(&swap_header->info.version); 2225 2225 swab32s(&swap_header->info.last_page); 2226 2226 swab32s(&swap_header->info.nr_badpages); 2227 + if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2228 + return 0; 2227 2229 for (i = 0; i < swap_header->info.nr_badpages; i++) 2228 2230 swab32s(&swap_header->info.badpages[i]); 2229 2231 }
+1
net/batman-adv/hard-interface.c
··· 652 652 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 653 653 } 654 654 655 + hard_iface->soft_iface = NULL; 655 656 batadv_hardif_put(hard_iface); 656 657 657 658 out:
+1
net/batman-adv/tp_meter.c
··· 837 837 primary_if = batadv_primary_if_get_selected(bat_priv); 838 838 if (unlikely(!primary_if)) { 839 839 err = BATADV_TP_REASON_DST_UNREACHABLE; 840 + tp_vars->reason = err; 840 841 goto out; 841 842 } 842 843
+23 -9
net/can/bcm.c
··· 1549 1549 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1550 1550 struct sock *sk = sock->sk; 1551 1551 struct bcm_sock *bo = bcm_sk(sk); 1552 + int ret = 0; 1552 1553 1553 1554 if (len < sizeof(*addr)) 1554 1555 return -EINVAL; 1555 1556 1556 - if (bo->bound) 1557 - return -EISCONN; 1557 + lock_sock(sk); 1558 + 1559 + if (bo->bound) { 1560 + ret = -EISCONN; 1561 + goto fail; 1562 + } 1558 1563 1559 1564 /* bind a device to this socket */ 1560 1565 if (addr->can_ifindex) { 1561 1566 struct net_device *dev; 1562 1567 1563 1568 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1564 - if (!dev) 1565 - return -ENODEV; 1566 - 1569 + if (!dev) { 1570 + ret = -ENODEV; 1571 + goto fail; 1572 + } 1567 1573 if (dev->type != ARPHRD_CAN) { 1568 1574 dev_put(dev); 1569 - return -ENODEV; 1575 + ret = -ENODEV; 1576 + goto fail; 1570 1577 } 1571 1578 1572 1579 bo->ifindex = dev->ifindex; ··· 1584 1577 bo->ifindex = 0; 1585 1578 } 1586 1579 1587 - bo->bound = 1; 1588 - 1589 1580 if (proc_dir) { 1590 1581 /* unique socket address as filename */ 1591 1582 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1592 1583 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1593 1584 proc_dir, 1594 1585 &bcm_proc_fops, sk); 1586 + if (!bo->bcm_proc_read) { 1587 + ret = -ENOMEM; 1588 + goto fail; 1589 + } 1595 1590 } 1596 1591 1597 - return 0; 1592 + bo->bound = 1; 1593 + 1594 + fail: 1595 + release_sock(sk); 1596 + 1597 + return ret; 1598 1598 } 1599 1599 1600 1600 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+2 -1
net/ceph/ceph_fs.c
··· 34 34 fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); 35 35 fl->object_size = le32_to_cpu(legacy->fl_object_size); 36 36 fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); 37 - if (fl->pool_id == 0) 37 + if (fl->pool_id == 0 && fl->stripe_unit == 0 && 38 + fl->stripe_count == 0 && fl->object_size == 0) 38 39 fl->pool_id = -1; 39 40 } 40 41 EXPORT_SYMBOL(ceph_file_layout_from_legacy);
+1
net/ceph/osd_client.c
··· 4094 4094 osd_init(&osdc->homeless_osd); 4095 4095 osdc->homeless_osd.o_osdc = osdc; 4096 4096 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 4097 + osdc->last_linger_id = CEPH_LINGER_ID_START; 4097 4098 osdc->linger_requests = RB_ROOT; 4098 4099 osdc->map_checks = RB_ROOT; 4099 4100 osdc->linger_map_checks = RB_ROOT;
+7 -12
net/core/dev.c
··· 1766 1766 1767 1767 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1768 1768 { 1769 - if (skb_orphan_frags(skb, GFP_ATOMIC) || 1770 - unlikely(!is_skb_forwardable(dev, skb))) { 1771 - atomic_long_inc(&dev->rx_dropped); 1772 - kfree_skb(skb); 1773 - return NET_RX_DROP; 1769 + int ret = ____dev_forward_skb(dev, skb); 1770 + 1771 + if (likely(!ret)) { 1772 + skb->protocol = eth_type_trans(skb, dev); 1773 + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1774 1774 } 1775 1775 1776 - skb_scrub_packet(skb, true); 1777 - skb->priority = 0; 1778 - skb->protocol = eth_type_trans(skb, dev); 1779 - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1780 - 1781 - return 0; 1776 + return ret; 1782 1777 } 1783 1778 EXPORT_SYMBOL_GPL(__dev_forward_skb); 1784 1779 ··· 2479 2484 goto out; 2480 2485 } 2481 2486 2482 - *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2487 + *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 2483 2488 out_set_summed: 2484 2489 skb->ip_summed = CHECKSUM_NONE; 2485 2490 out:
+60 -8
net/core/filter.c
··· 1628 1628 return dev_forward_skb(dev, skb); 1629 1629 } 1630 1630 1631 + static inline int __bpf_rx_skb_no_mac(struct net_device *dev, 1632 + struct sk_buff *skb) 1633 + { 1634 + int ret = ____dev_forward_skb(dev, skb); 1635 + 1636 + if (likely(!ret)) { 1637 + skb->dev = dev; 1638 + ret = netif_rx(skb); 1639 + } 1640 + 1641 + return ret; 1642 + } 1643 + 1631 1644 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 1632 1645 { 1633 1646 int ret; ··· 1658 1645 __this_cpu_dec(xmit_recursion); 1659 1646 1660 1647 return ret; 1648 + } 1649 + 1650 + static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 1651 + u32 flags) 1652 + { 1653 + /* skb->mac_len is not set on normal egress */ 1654 + unsigned int mlen = skb->network_header - skb->mac_header; 1655 + 1656 + __skb_pull(skb, mlen); 1657 + 1658 + /* At ingress, the mac header has already been pulled once. 1659 + * At egress, skb_pospull_rcsum has to be done in case that 1660 + * the skb is originated from ingress (i.e. a forwarded skb) 1661 + * to ensure that rcsum starts at net header. 1662 + */ 1663 + if (!skb_at_tc_ingress(skb)) 1664 + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 1665 + skb_pop_mac_header(skb); 1666 + skb_reset_mac_len(skb); 1667 + return flags & BPF_F_INGRESS ? 1668 + __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); 1669 + } 1670 + 1671 + static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, 1672 + u32 flags) 1673 + { 1674 + bpf_push_mac_rcsum(skb); 1675 + return flags & BPF_F_INGRESS ? 1676 + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1677 + } 1678 + 1679 + static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, 1680 + u32 flags) 1681 + { 1682 + switch (dev->type) { 1683 + case ARPHRD_TUNNEL: 1684 + case ARPHRD_TUNNEL6: 1685 + case ARPHRD_SIT: 1686 + case ARPHRD_IPGRE: 1687 + case ARPHRD_VOID: 1688 + case ARPHRD_NONE: 1689 + return __bpf_redirect_no_mac(skb, dev, flags); 1690 + default: 1691 + return __bpf_redirect_common(skb, dev, flags); 1692 + } 1661 1693 } 1662 1694 1663 1695 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) ··· 1733 1675 return -ENOMEM; 1734 1676 } 1735 1677 1736 - bpf_push_mac_rcsum(clone); 1737 - 1738 - return flags & BPF_F_INGRESS ? 1739 - __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone); 1678 + return __bpf_redirect(clone, dev, flags); 1740 1679 } 1741 1680 1742 1681 static const struct bpf_func_proto bpf_clone_redirect_proto = { ··· 1777 1722 return -EINVAL; 1778 1723 } 1779 1724 1780 - bpf_push_mac_rcsum(skb); 1781 - 1782 - return ri->flags & BPF_F_INGRESS ? 1783 - __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1725 + return __bpf_redirect(skb, dev, ri->flags); 1784 1726 } 1785 1727 1786 1728 static const struct bpf_func_proto bpf_redirect_proto = {
+8 -3
net/core/flow_dissector.c
··· 122 122 struct flow_dissector_key_keyid *key_keyid; 123 123 bool skip_vlan = false; 124 124 u8 ip_proto = 0; 125 - bool ret = false; 125 + bool ret; 126 126 127 127 if (!data) { 128 128 data = skb->data; ··· 549 549 out_good: 550 550 ret = true; 551 551 552 - out_bad: 552 + key_control->thoff = (u16)nhoff; 553 + out: 553 554 key_basic->n_proto = proto; 554 555 key_basic->ip_proto = ip_proto; 555 - key_control->thoff = (u16)nhoff; 556 556 557 557 return ret; 558 + 559 + out_bad: 560 + ret = false; 561 + key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); 562 + goto out; 558 563 } 559 564 EXPORT_SYMBOL(__skb_flow_dissect); 560 565
+2
net/core/net_namespace.c
··· 219 219 bool alloc; 220 220 int id; 221 221 222 + if (atomic_read(&net->count) == 0) 223 + return NETNSA_NSID_NOT_ASSIGNED; 222 224 spin_lock_irqsave(&net->nsid_lock, flags); 223 225 alloc = atomic_read(&peer->count) == 0 ? false : true; 224 226 id = __peernet2id_alloc(net, peer, &alloc);
+15 -8
net/core/rtnetlink.c
··· 275 275 276 276 rtnl_msg_handlers[protocol][msgindex].doit = NULL; 277 277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; 278 + rtnl_msg_handlers[protocol][msgindex].calcit = NULL; 278 279 279 280 return 0; 280 281 } ··· 840 839 if (dev->dev.parent && dev_is_pci(dev->dev.parent) && 841 840 (ext_filter_mask & RTEXT_FILTER_VF)) { 842 841 int num_vfs = dev_num_vf(dev->dev.parent); 843 - size_t size = nla_total_size(sizeof(struct nlattr)); 844 - size += nla_total_size(num_vfs * sizeof(struct nlattr)); 842 + size_t size = nla_total_size(0); 845 843 size += num_vfs * 846 - (nla_total_size(sizeof(struct ifla_vf_mac)) + 847 - nla_total_size(MAX_VLAN_LIST_LEN * 848 - sizeof(struct nlattr)) + 844 + (nla_total_size(0) + 845 + nla_total_size(sizeof(struct ifla_vf_mac)) + 846 + nla_total_size(sizeof(struct ifla_vf_vlan)) + 847 + nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 849 848 nla_total_size(MAX_VLAN_LIST_LEN * 850 849 sizeof(struct ifla_vf_vlan_info)) + 851 850 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 851 + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 852 852 nla_total_size(sizeof(struct ifla_vf_rate)) + 853 853 nla_total_size(sizeof(struct ifla_vf_link_state)) + 854 854 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 855 + nla_total_size(0) + /* nest IFLA_VF_STATS */ 855 856 /* IFLA_VF_STATS_RX_PACKETS */ 856 857 nla_total_size_64bit(sizeof(__u64)) + 857 858 /* IFLA_VF_STATS_TX_PACKETS */ ··· 901 898 902 899 static size_t rtnl_xdp_size(const struct net_device *dev) 903 900 { 904 - size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ 901 + size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 902 + nla_total_size(1); /* XDP_ATTACHED */ 905 903 906 904 if (!dev->netdev_ops->ndo_xdp) 907 905 return 0; ··· 1609 1605 head = &net->dev_index_head[h]; 1610 1606 hlist_for_each_entry(dev, head, index_hlist) { 1611 1607 if (link_dump_filtered(dev, master_idx, kind_ops)) 1612 - continue; 1608 + goto cont; 1613 1609 if (idx < s_idx) 1614 1610 goto cont; 1615 1611 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, ··· 2852 2848 2853 2849 static inline size_t rtnl_fdb_nlmsg_size(void) 2854 2850 { 2855 - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); 2851 + return NLMSG_ALIGN(sizeof(struct ndmsg)) + 2852 + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 2853 + nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 2854 + 0; 2856 2855 } 2857 2856 2858 2857 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
+4 -2
net/core/sock.c
··· 453 453 EXPORT_SYMBOL(sock_queue_rcv_skb); 454 454 455 455 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 456 - const int nested, unsigned int trim_cap) 456 + const int nested, unsigned int trim_cap, bool refcounted) 457 457 { 458 458 int rc = NET_RX_SUCCESS; 459 459 ··· 487 487 488 488 bh_unlock_sock(sk); 489 489 out: 490 - sock_put(sk); 490 + if (refcounted) 491 + sock_put(sk); 491 492 return rc; 492 493 discard_and_relse: 493 494 kfree_skb(skb); ··· 1544 1543 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1545 1544 1546 1545 newsk->sk_err = 0; 1546 + newsk->sk_err_soft = 0; 1547 1547 newsk->sk_priority = 0; 1548 1548 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1549 1549 atomic64_set(&newsk->sk_cookie, 0);
+9 -7
net/dccp/ipv4.c
··· 235 235 { 236 236 const struct iphdr *iph = (struct iphdr *)skb->data; 237 237 const u8 offset = iph->ihl << 2; 238 - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 238 + const struct dccp_hdr *dh; 239 239 struct dccp_sock *dp; 240 240 struct inet_sock *inet; 241 241 const int type = icmp_hdr(skb)->type; ··· 245 245 int err; 246 246 struct net *net = dev_net(skb->dev); 247 247 248 - if (skb->len < offset + sizeof(*dh) || 249 - skb->len < offset + __dccp_basic_hdr_len(dh)) { 250 - __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 251 - return; 252 - } 248 + /* Only need dccph_dport & dccph_sport which are the first 249 + * 4 bytes in dccp header. 250 + * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. 251 + */ 252 + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); 253 + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); 254 + dh = (struct dccp_hdr *)(skb->data + offset); 253 255 254 256 sk = __inet_lookup_established(net, &dccp_hashinfo, 255 257 iph->daddr, dh->dccph_dport, ··· 870 868 goto discard_and_relse; 871 869 nf_reset(skb); 872 870 873 - return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); 871 + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted); 874 872 875 873 no_dccp_socket: 876 874 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+11 -8
net/dccp/ipv6.c
··· 70 70 u8 type, u8 code, int offset, __be32 info) 71 71 { 72 72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 73 - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 73 + const struct dccp_hdr *dh; 74 74 struct dccp_sock *dp; 75 75 struct ipv6_pinfo *np; 76 76 struct sock *sk; ··· 78 78 __u64 seq; 79 79 struct net *net = dev_net(skb->dev); 80 80 81 - if (skb->len < offset + sizeof(*dh) || 82 - skb->len < offset + __dccp_basic_hdr_len(dh)) { 83 - __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 84 - ICMP6_MIB_INERRORS); 85 - return; 86 - } 81 + /* Only need dccph_dport & dccph_sport which are the first 82 + * 4 bytes in dccp header. 83 + * Our caller (icmpv6_notify()) already pulled 8 bytes for us. 84 + */ 85 + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); 86 + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); 87 + dh = (struct dccp_hdr *)(skb->data + offset); 87 88 88 89 sk = __inet6_lookup_established(net, &dccp_hashinfo, 89 90 &hdr->daddr, dh->dccph_dport, ··· 739 738 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 740 739 goto discard_and_relse; 741 740 742 - return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; 741 + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, 742 + refcounted) ? -1 : 0; 743 743 744 744 no_dccp_socket: 745 745 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) ··· 958 956 .getsockopt = ipv6_getsockopt, 959 957 .addr2sockaddr = inet6_csk_addr2sockaddr, 960 958 .sockaddr_len = sizeof(struct sockaddr_in6), 959 + .bind_conflict = inet6_csk_bind_conflict, 961 960 #ifdef CONFIG_COMPAT 962 961 .compat_setsockopt = compat_ipv6_setsockopt, 963 962 .compat_getsockopt = compat_ipv6_getsockopt,
+4
net/dccp/proto.c
··· 1009 1009 __kfree_skb(skb); 1010 1010 } 1011 1011 1012 + /* If socket has been already reset kill it. */ 1013 + if (sk->sk_state == DCCP_CLOSED) 1014 + goto adjudge_to_death; 1015 + 1012 1016 if (data_was_unread) { 1013 1017 /* Unread data was tossed, send an appropriate Reset Code */ 1014 1018 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
+4 -5
net/ipv4/af_inet.c
··· 533 533 534 534 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) 535 535 { 536 - DEFINE_WAIT(wait); 536 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 537 537 538 - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 538 + add_wait_queue(sk_sleep(sk), &wait); 539 539 sk->sk_write_pending += writebias; 540 540 541 541 /* Basic assumption: if someone sets sk->sk_err, he _must_ ··· 545 545 */ 546 546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 547 547 release_sock(sk); 548 - timeo = schedule_timeout(timeo); 548 + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); 549 549 lock_sock(sk); 550 550 if (signal_pending(current) || !timeo) 551 551 break; 552 - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 553 552 } 554 - finish_wait(sk_sleep(sk), &wait); 553 + remove_wait_queue(sk_sleep(sk), &wait); 555 554 sk->sk_write_pending -= writebias; 556 555 return timeo; 557 556 }
+15 -5
net/ipv4/fib_frontend.c
··· 151 151 152 152 int fib_unmerge(struct net *net) 153 153 { 154 - struct fib_table *old, *new; 154 + struct fib_table *old, *new, *main_table; 155 155 156 156 /* attempt to fetch local table if it has been allocated */ 157 157 old = fib_get_table(net, RT_TABLE_LOCAL); ··· 162 162 if (!new) 163 163 return -ENOMEM; 164 164 165 + /* table is already unmerged */ 166 + if (new == old) 167 + return 0; 168 + 165 169 /* replace merged table with clean table */ 166 - if (new != old) { 167 - fib_replace_table(net, old, new); 168 - fib_free_table(old); 169 - } 170 + fib_replace_table(net, old, new); 171 + fib_free_table(old); 172 + 173 + /* attempt to fetch main table if it has been allocated */ 174 + main_table = fib_get_table(net, RT_TABLE_MAIN); 175 + if (!main_table) 176 + return 0; 177 + 178 + /* flush local entries from main table */ 179 + fib_table_flush_external(main_table); 170 180 171 181 return 0; 172 182 }
+77 -13
net/ipv4/fib_trie.c
··· 1743 1743 local_l = fib_find_node(lt, &local_tp, l->key); 1744 1744 1745 1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa, 1746 - NULL, l->key)) 1746 + NULL, l->key)) { 1747 + kmem_cache_free(fn_alias_kmem, new_fa); 1747 1748 goto out; 1749 + } 1748 1750 } 1749 1751 1750 1752 /* stop loop if key wrapped back to 0 */ ··· 1760 1758 fib_trie_free(local_tb); 1761 1759 1762 1760 return NULL; 1761 + } 1762 + 1763 + /* Caller must hold RTNL */ 1764 + void fib_table_flush_external(struct fib_table *tb) 1765 + { 1766 + struct trie *t = (struct trie *)tb->tb_data; 1767 + struct key_vector *pn = t->kv; 1768 + unsigned long cindex = 1; 1769 + struct hlist_node *tmp; 1770 + struct fib_alias *fa; 1771 + 1772 + /* walk trie in reverse order */ 1773 + for (;;) { 1774 + unsigned char slen = 0; 1775 + struct key_vector *n; 1776 + 1777 + if (!(cindex--)) { 1778 + t_key pkey = pn->key; 1779 + 1780 + /* cannot resize the trie vector */ 1781 + if (IS_TRIE(pn)) 1782 + break; 1783 + 1784 + /* resize completed node */ 1785 + pn = resize(t, pn); 1786 + cindex = get_index(pkey, pn); 1787 + 1788 + continue; 1789 + } 1790 + 1791 + /* grab the next available node */ 1792 + n = get_child(pn, cindex); 1793 + if (!n) 1794 + continue; 1795 + 1796 + if (IS_TNODE(n)) { 1797 + /* record pn and cindex for leaf walking */ 1798 + pn = n; 1799 + cindex = 1ul << n->bits; 1800 + 1801 + continue; 1802 + } 1803 + 1804 + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1805 + /* if alias was cloned to local then we just 1806 + * need to remove the local copy from main 1807 + */ 1808 + if (tb->tb_id != fa->tb_id) { 1809 + hlist_del_rcu(&fa->fa_list); 1810 + alias_free_mem_rcu(fa); 1811 + continue; 1812 + } 1813 + 1814 + /* record local slen */ 1815 + slen = fa->fa_slen; 1816 + } 1817 + 1818 + /* update leaf slen */ 1819 + n->slen = slen; 1820 + 1821 + if (hlist_empty(&n->leaf)) { 1822 + put_child_root(pn, n->key, NULL); 1823 + node_free(n); 1824 + } 1825 + } 1763 1826 } 1764 1827 1765 1828 /* Caller must hold RTNL. */ ··· 2480 2413 struct key_vector *l, **tp = &iter->tnode; 2481 2414 t_key key; 2482 2415 2483 - /* use cache location of next-to-find key */ 2416 + /* use cached location of previously found key */ 2484 2417 if (iter->pos > 0 && pos >= iter->pos) { 2485 - pos -= iter->pos; 2486 2418 key = iter->key; 2487 2419 } else { 2488 - iter->pos = 0; 2420 + iter->pos = 1; 2489 2421 key = 0; 2490 2422 } 2491 2423 2492 - while ((l = leaf_walk_rcu(tp, key)) != NULL) { 2424 + pos -= iter->pos; 2425 + 2426 + while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { 2493 2427 key = l->key + 1; 2494 2428 iter->pos++; 2495 - 2496 - if (--pos <= 0) 2497 - break; 2498 - 2499 2429 l = NULL; 2500 2430 2501 2431 /* handle unlikely case of a key wrap */ ··· 2501 2437 } 2502 2438 2503 2439 if (l) 2504 - iter->key = key; /* remember it */ 2440 + iter->key = l->key; /* remember it */ 2505 2441 else 2506 2442 iter->pos = 0; /* forget it */ 2507 2443 ··· 2529 2465 return fib_route_get_idx(iter, *pos); 2530 2466 2531 2467 iter->pos = 0; 2532 - iter->key = 0; 2468 + iter->key = KEY_MAX; 2533 2469 2534 2470 return SEQ_START_TOKEN; 2535 2471 } ··· 2538 2474 { 2539 2475 struct fib_route_iter *iter = seq->private; 2540 2476 struct key_vector *l = NULL; 2541 - t_key key = iter->key; 2477 + t_key key = iter->key + 1; 2542 2478 2543 2479 ++*pos; 2544 2480 ··· 2547 2483 l = leaf_walk_rcu(&iter->tnode, key); 2548 2484 2549 2485 if (l) { 2550 - iter->key = l->key + 1; 2486 + iter->key = l->key; 2551 2487 iter->pos++; 2552 2488 } else { 2553 2489 iter->pos = 0;
+2 -2
net/ipv4/icmp.c
··· 477 477 fl4->flowi4_proto = IPPROTO_ICMP; 478 478 fl4->fl4_icmp_type = type; 479 479 fl4->fl4_icmp_code = code; 480 - fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); 480 + fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); 481 481 482 482 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 483 483 rt = __ip_route_output_key_hash(net, fl4, ··· 502 502 if (err) 503 503 goto relookup_failed; 504 504 505 - if (inet_addr_type_dev_table(net, skb_in->dev, 505 + if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, 506 506 fl4_dec.saddr) == RTN_LOCAL) { 507 507 rt2 = __ip_route_output_key(net, &fl4_dec); 508 508 if (IS_ERR(rt2))
+36 -14
net/ipv4/igmp.c
··· 162 162 } 163 163 164 164 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); 165 - static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); 165 + static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); 166 166 static void igmpv3_clear_delrec(struct in_device *in_dev); 167 167 static int sf_setstate(struct ip_mc_list *pmc); 168 168 static void sf_markstate(struct ip_mc_list *pmc); ··· 1130 1130 spin_unlock_bh(&in_dev->mc_tomb_lock); 1131 1131 } 1132 1132 1133 - static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) 1133 + /* 1134 + * restore ip_mc_list deleted records 1135 + */ 1136 + static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) 1134 1137 { 1135 1138 struct ip_mc_list *pmc, *pmc_prev; 1136 - struct ip_sf_list *psf, *psf_next; 1139 + struct ip_sf_list *psf; 1140 + struct net *net = dev_net(in_dev->dev); 1141 + __be32 multiaddr = im->multiaddr; 1137 1142 1138 1143 spin_lock_bh(&in_dev->mc_tomb_lock); 1139 1144 pmc_prev = NULL; ··· 1154 1149 in_dev->mc_tomb = pmc->next; 1155 1150 } 1156 1151 spin_unlock_bh(&in_dev->mc_tomb_lock); 1152 + 1153 + spin_lock_bh(&im->lock); 1157 1154 if (pmc) { 1158 - for (psf = pmc->tomb; psf; psf = psf_next) { 1159 - psf_next = psf->sf_next; 1160 - kfree(psf); 1155 + im->interface = pmc->interface; 1156 + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; 1157 + im->sfmode = pmc->sfmode; 1158 + if (pmc->sfmode == MCAST_INCLUDE) { 1159 + im->tomb = pmc->tomb; 1160 + im->sources = pmc->sources; 1161 + for (psf = im->sources; psf; psf = psf->sf_next) 1162 + psf->sf_crcount = im->crcount; 1161 1163 } 1162 1164 in_dev_put(pmc->interface); 1163 - kfree(pmc); 1164 1165 } 1166 + spin_unlock_bh(&im->lock); 1165 1167 } 1166 1168 1169 + /* 1170 + * flush ip_mc_list deleted records 1171 + */ 1167 1172 static void igmpv3_clear_delrec(struct in_device *in_dev) 1168 1173 { 1169 1174 struct ip_mc_list *pmc, *nextpmc; ··· 1381 1366 ip_mc_hash_add(in_dev, im); 1382 1367 1383 1368 #ifdef CONFIG_IP_MULTICAST 1384 - igmpv3_del_delrec(in_dev, im->multiaddr); 1369 + igmpv3_del_delrec(in_dev, im); 1385 1370 #endif 1386 1371 igmp_group_added(im); 1387 1372 if (!in_dev->dead) ··· 1641 1626 1642 1627 ASSERT_RTNL(); 1643 1628 1644 - for_each_pmc_rtnl(in_dev, pmc) 1629 + for_each_pmc_rtnl(in_dev, pmc) { 1630 + #ifdef CONFIG_IP_MULTICAST 1631 + igmpv3_del_delrec(in_dev, pmc); 1632 + #endif 1645 1633 igmp_group_added(pmc); 1634 + } 1646 1635 } 1647 1636 1648 1637 /* Device going down */ ··· 1667 1648 in_dev->mr_gq_running = 0; 1668 1649 if (del_timer(&in_dev->mr_gq_timer)) 1669 1650 __in_dev_put(in_dev); 1670 - igmpv3_clear_delrec(in_dev); 1671 1651 #endif 1672 1652 1673 1653 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); ··· 1706 1688 #endif 1707 1689 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1708 1690 1709 - for_each_pmc_rtnl(in_dev, pmc) 1691 + for_each_pmc_rtnl(in_dev, pmc) { 1692 + #ifdef CONFIG_IP_MULTICAST 1693 + igmpv3_del_delrec(in_dev, pmc); 1694 + #endif 1710 1695 igmp_group_added(pmc); 1696 + } 1711 1697 } 1712 1698 1713 1699 /* ··· 1726 1704 1727 1705 /* Deactivate timers */ 1728 1706 ip_mc_down(in_dev); 1707 + #ifdef CONFIG_IP_MULTICAST 1708 + igmpv3_clear_delrec(in_dev); 1709 + #endif 1729 1710 1730 1711 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1731 1712 in_dev->mc_list = i->next_rcu; 1732 1713 in_dev->mc_count--; 1733 - 1734 - /* We've dropped the groups in ip_mc_down already */ 1735 - ip_mc_clear_src(i); 1736 1714 ip_ma_put(i); 1737 1715 } 1738 1716 }
+1 -1
net/ipv4/ip_forward.c
··· 117 117 if (opt->is_strictroute && rt->rt_uses_gateway) 118 118 goto sr_failed; 119 119 120 - IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 120 + IPCB(skb)->flags |= IPSKB_FORWARDED; 121 121 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 122 122 if (ip_exceeds_mtu(skb, mtu)) { 123 123 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
+15 -10
net/ipv4/ip_output.c
··· 239 239 struct sk_buff *segs; 240 240 int ret = 0; 241 241 242 - /* common case: fragmentation of segments is not allowed, 243 - * or seglen is <= mtu 242 + /* common case: seglen is <= mtu 244 243 */ 245 - if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || 246 - skb_gso_validate_mtu(skb, mtu)) 244 + if (skb_gso_validate_mtu(skb, mtu)) 247 245 return ip_finish_output2(net, sk, skb); 248 246 249 - /* Slowpath - GSO segment length is exceeding the dst MTU. 247 + /* Slowpath - GSO segment length exceeds the egress MTU. 250 248 * 251 - * This can happen in two cases: 252 - * 1) TCP GRO packet, DF bit not set 253 - * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly 254 - * from host network stack. 249 + * This can happen in several cases: 250 + * - Forwarding of a TCP GRO skb, when DF flag is not set. 251 + * - Forwarding of an skb that arrived on a virtualization interface 252 + * (virtio-net/vhost/tap) with TSO/GSO size set by other network 253 + * stack. 254 + * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an 255 + * interface with a smaller MTU. 256 + * - Arriving GRO skb (or GSO skb in a virtualized environment) that is 257 + * bridged to a NETIF_F_TSO tunnel stacked over an interface with an 258 + * insufficent MTU. 255 259 */ 256 260 features = netif_skb_features(skb); 257 261 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); ··· 1583 1579 } 1584 1580 1585 1581 oif = arg->bound_dev_if; 1586 - oif = oif ? : skb->skb_iif; 1582 + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 1583 + oif = skb->skb_iif; 1587 1584 1588 1585 flowi4_init_output(&fl4, oif, 1589 1586 IP4_REPLY_MARK(net, skb->mark),
-11
net/ipv4/ip_tunnel_core.c
··· 63 63 int pkt_len = skb->len - skb_inner_network_offset(skb); 64 64 struct net *net = dev_net(rt->dst.dev); 65 65 struct net_device *dev = skb->dev; 66 - int skb_iif = skb->skb_iif; 67 66 struct iphdr *iph; 68 67 int err; 69 68 ··· 71 72 skb_clear_hash_if_not_l4(skb); 72 73 skb_dst_set(skb, &rt->dst); 73 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 - 75 - if (skb_iif && !(df & htons(IP_DF))) { 76 - /* Arrived from an ingress interface, got encapsulated, with 77 - * fragmentation of encapulating frames allowed. 78 - * If skb is gso, the resulting encapsulated network segments 79 - * may exceed dst mtu. 80 - * Allow IP Fragmentation of segments. 81 - */ 82 - IPCB(skb)->flags |= IPSKB_FRAG_SEGS; 83 - } 84 75 85 76 /* Push down and install the IP header. */ 86 77 skb_push(skb, sizeof(struct iphdr));
+1 -1
net/ipv4/ipmr.c
··· 1749 1749 vif->dev->stats.tx_bytes += skb->len; 1750 1750 } 1751 1751 1752 - IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 1752 + IPCB(skb)->flags |= IPSKB_FORWARDED; 1753 1753 1754 1754 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1755 1755 * not only before forwarding, but after forwarding on all output
+4 -2
net/ipv4/netfilter/nft_dup_ipv4.c
··· 28 28 struct in_addr gw = { 29 29 .s_addr = (__force __be32)regs->data[priv->sreg_addr], 30 30 }; 31 - int oif = regs->data[priv->sreg_dev]; 31 + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; 32 32 33 33 nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); 34 34 } ··· 59 59 { 60 60 struct nft_dup_ipv4 *priv = nft_expr_priv(expr); 61 61 62 - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 62 + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) 63 + goto nla_put_failure; 64 + if (priv->sreg_dev && 63 65 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 64 66 goto nla_put_failure; 65 67
+3 -1
net/ipv4/route.c
··· 753 753 goto reject_redirect; 754 754 } 755 755 756 - n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); 756 + n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); 757 + if (!n) 758 + n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); 757 759 if (!IS_ERR(n)) { 758 760 if (!(n->nud_state & NUD_VALID)) { 759 761 neigh_event_send(n, NULL);
+2 -2
net/ipv4/tcp.c
··· 1164 1164 1165 1165 err = -EPIPE; 1166 1166 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1167 - goto out_err; 1167 + goto do_error; 1168 1168 1169 1169 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1170 1170 ··· 1241 1241 1242 1242 if (!skb_can_coalesce(skb, i, pfrag->page, 1243 1243 pfrag->offset)) { 1244 - if (i == sysctl_max_skb_frags || !sg) { 1244 + if (i >= sysctl_max_skb_frags || !sg) { 1245 1245 tcp_mark_push(tp, skb); 1246 1246 goto new_segment; 1247 1247 }
+3 -1
net/ipv4/tcp_cong.c
··· 200 200 icsk->icsk_ca_ops = ca; 201 201 icsk->icsk_ca_setsockopt = 1; 202 202 203 - if (sk->sk_state != TCP_CLOSE) 203 + if (sk->sk_state != TCP_CLOSE) { 204 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 204 205 tcp_init_congestion_control(sk); 206 + } 205 207 } 206 208 207 209 /* Manage refcounts on socket close. */
+12 -1
net/ipv4/tcp_dctcp.c
··· 56 56 u32 next_seq; 57 57 u32 ce_state; 58 58 u32 delayed_ack_reserved; 59 + u32 loss_cwnd; 59 60 }; 60 61 61 62 static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ ··· 97 96 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); 98 97 99 98 ca->delayed_ack_reserved = 0; 99 + ca->loss_cwnd = 0; 100 100 ca->ce_state = 0; 101 101 102 102 dctcp_reset(tp, ca); ··· 113 111 114 112 static u32 dctcp_ssthresh(struct sock *sk) 115 113 { 116 - const struct dctcp *ca = inet_csk_ca(sk); 114 + struct dctcp *ca = inet_csk_ca(sk); 117 115 struct tcp_sock *tp = tcp_sk(sk); 118 116 117 + ca->loss_cwnd = tp->snd_cwnd; 119 118 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); 120 119 } 121 120 ··· 311 308 return 0; 312 309 } 313 310 311 + static u32 dctcp_cwnd_undo(struct sock *sk) 312 + { 313 + const struct dctcp *ca = inet_csk_ca(sk); 314 + 315 + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 316 + } 317 + 314 318 static struct tcp_congestion_ops dctcp __read_mostly = { 315 319 .init = dctcp_init, 316 320 .in_ack_event = dctcp_update_alpha, 317 321 .cwnd_event = dctcp_cwnd_event, 318 322 .ssthresh = dctcp_ssthresh, 319 323 .cong_avoid = tcp_reno_cong_avoid, 324 + .undo_cwnd = dctcp_cwnd_undo, 320 325 .set_state = dctcp_state, 321 326 .get_info = dctcp_get_info, 322 327 .flags = TCP_CONG_NEEDS_ECN,
+18 -1
net/ipv4/tcp_ipv4.c
··· 1564 1564 } 1565 1565 EXPORT_SYMBOL(tcp_add_backlog); 1566 1566 1567 + int tcp_filter(struct sock *sk, struct sk_buff *skb) 1568 + { 1569 + struct tcphdr *th = (struct tcphdr *)skb->data; 1570 + unsigned int eaten = skb->len; 1571 + int err; 1572 + 1573 + err = sk_filter_trim_cap(sk, skb, th->doff * 4); 1574 + if (!err) { 1575 + eaten -= skb->len; 1576 + TCP_SKB_CB(skb)->end_seq -= eaten; 1577 + } 1578 + return err; 1579 + } 1580 + EXPORT_SYMBOL(tcp_filter); 1581 + 1567 1582 /* 1568 1583 * From tcp_input.c 1569 1584 */ ··· 1691 1676 1692 1677 nf_reset(skb); 1693 1678 1694 - if (sk_filter(sk, skb)) 1679 + if (tcp_filter(sk, skb)) 1695 1680 goto discard_and_relse; 1681 + th = (const struct tcphdr *)skb->data; 1682 + iph = ip_hdr(skb); 1696 1683 1697 1684 skb->dev = NULL; 1698 1685
+3 -3
net/ipv4/udp.c
··· 1652 1652 1653 1653 if (use_hash2) { 1654 1654 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1655 - udp_table.mask; 1656 - hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; 1655 + udptable->mask; 1656 + hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; 1657 1657 start_lookup: 1658 - hslot = &udp_table.hash2[hash2]; 1658 + hslot = &udptable->hash2[hash2]; 1659 1659 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1660 1660 } 1661 1661
+1 -1
net/ipv6/icmp.c
··· 448 448 if (__ipv6_addr_needs_scope_id(addr_type)) 449 449 iif = skb->dev->ifindex; 450 450 else 451 - iif = l3mdev_master_ifindex(skb->dev); 451 + iif = l3mdev_master_ifindex(skb_dst(skb)->dev); 452 452 453 453 /* 454 454 * Must not send error if the source does not uniquely
+1 -1
net/ipv6/ip6_output.c
··· 1366 1366 if (((length > mtu) || 1367 1367 (skb && skb_is_gso(skb))) && 1368 1368 (sk->sk_protocol == IPPROTO_UDP) && 1369 - (rt->dst.dev->features & NETIF_F_UFO) && 1369 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 1370 1370 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1371 1371 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1372 1372 hh_len, fragheaderlen, exthdrlen,
+11 -2
net/ipv6/ip6_tunnel.c
··· 1034 1034 int mtu; 1035 1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1036 1036 unsigned int max_headroom = psh_hlen; 1037 + bool use_cache = false; 1037 1038 u8 hop_limit; 1038 1039 int err = -1; 1039 1040 ··· 1067 1066 1068 1067 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1069 1068 neigh_release(neigh); 1070 - } else if (!fl6->flowi6_mark) 1069 + } else if (!(t->parms.flags & 1070 + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1071 + /* enable the cache only only if the routing decision does 1072 + * not depend on the current inner header value 1073 + */ 1074 + use_cache = true; 1075 + } 1076 + 1077 + if (use_cache) 1071 1078 dst = dst_cache_get(&t->dst_cache); 1072 1079 1073 1080 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) ··· 1159 1150 if (t->encap.type != TUNNEL_ENCAP_NONE) 1160 1151 goto tx_err_dst_release; 1161 1152 } else { 1162 - if (!fl6->flowi6_mark && ndst) 1153 + if (use_cache && ndst) 1163 1154 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1164 1155 } 1165 1156 skb_dst_set(skb, dst);
-3
net/ipv6/ip6_udp_tunnel.c
··· 88 88 89 89 uh->len = htons(skb->len); 90 90 91 - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 92 - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 93 - | IPSKB_REROUTED); 94 91 skb_dst_set(skb, dst); 95 92 96 93 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
+4 -2
net/ipv6/netfilter/nft_dup_ipv6.c
··· 26 26 { 27 27 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 28 28 struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr]; 29 - int oif = regs->data[priv->sreg_dev]; 29 + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; 30 30 31 31 nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); 32 32 } ··· 57 57 { 58 58 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 59 59 60 - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 60 + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) 61 + goto nla_put_failure; 62 + if (priv->sreg_dev && 61 63 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 62 64 goto nla_put_failure; 63 65
+4
net/ipv6/route.c
··· 1364 1364 if (rt6->rt6i_flags & RTF_LOCAL) 1365 1365 return; 1366 1366 1367 + if (dst_metric_locked(dst, RTAX_MTU)) 1368 + return; 1369 + 1367 1370 dst_confirm(dst); 1368 1371 mtu = max_t(u32, mtu, IPV6_MIN_MTU); 1369 1372 if (mtu >= dst_mtu(dst)) ··· 2761 2758 PMTU discouvery. 2762 2759 */ 2763 2760 if (rt->dst.dev == arg->dev && 2761 + dst_metric_raw(&rt->dst, RTAX_MTU) && 2764 2762 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 2765 2763 if (rt->rt6i_flags & RTF_CACHE) { 2766 2764 /* For RTF_CACHE with rt6i_pmtu == 0
+10 -4
net/ipv6/tcp_ipv6.c
··· 818 818 fl6.flowi6_proto = IPPROTO_TCP; 819 819 if (rt6_need_strict(&fl6.daddr) && !oif) 820 820 fl6.flowi6_oif = tcp_v6_iif(skb); 821 - else 822 - fl6.flowi6_oif = oif ? : skb->skb_iif; 821 + else { 822 + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 823 + oif = skb->skb_iif; 824 + 825 + fl6.flowi6_oif = oif; 826 + } 823 827 824 828 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 825 829 fl6.fl6_dport = t1->dest; ··· 1229 1225 if (skb->protocol == htons(ETH_P_IP)) 1230 1226 return tcp_v4_do_rcv(sk, skb); 1231 1227 1232 - if (sk_filter(sk, skb)) 1228 + if (tcp_filter(sk, skb)) 1233 1229 goto discard; 1234 1230 1235 1231 /* ··· 1457 1453 if (tcp_v6_inbound_md5_hash(sk, skb)) 1458 1454 goto discard_and_relse; 1459 1455 1460 - if (sk_filter(sk, skb)) 1456 + if (tcp_filter(sk, skb)) 1461 1457 goto discard_and_relse; 1458 + th = (const struct tcphdr *)skb->data; 1459 + hdr = ipv6_hdr(skb); 1462 1460 1463 1461 skb->dev = NULL; 1464 1462
+3 -3
net/ipv6/udp.c
··· 706 706 707 707 if (use_hash2) { 708 708 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & 709 - udp_table.mask; 710 - hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; 709 + udptable->mask; 710 + hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; 711 711 start_lookup: 712 - hslot = &udp_table.hash2[hash2]; 712 + hslot = &udptable->hash2[hash2]; 713 713 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 714 714 } 715 715
+1 -1
net/l2tp/l2tp_eth.c
··· 97 97 unsigned int len = skb->len; 98 98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len); 99 99 100 - if (likely(ret == NET_XMIT_SUCCESS)) { 100 + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 101 101 atomic_long_add(len, &priv->tx_bytes); 102 102 atomic_long_inc(&priv->tx_packets); 103 103 } else {
+3 -2
net/l2tp/l2tp_ip.c
··· 251 251 int ret; 252 252 int chk_addr_ret; 253 253 254 - if (!sock_flag(sk, SOCK_ZAPPED)) 255 - return -EINVAL; 256 254 if (addr_len < sizeof(struct sockaddr_l2tpip)) 257 255 return -EINVAL; 258 256 if (addr->l2tp_family != AF_INET) ··· 265 267 read_unlock_bh(&l2tp_ip_lock); 266 268 267 269 lock_sock(sk); 270 + if (!sock_flag(sk, SOCK_ZAPPED)) 271 + goto out; 272 + 268 273 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 269 274 goto out; 270 275
+3 -2
net/l2tp/l2tp_ip6.c
··· 269 269 int addr_type; 270 270 int err; 271 271 272 - if (!sock_flag(sk, SOCK_ZAPPED)) 273 - return -EINVAL; 274 272 if (addr->l2tp_family != AF_INET6) 275 273 return -EINVAL; 276 274 if (addr_len < sizeof(*addr)) ··· 294 296 lock_sock(sk); 295 297 296 298 err = -EINVAL; 299 + if (!sock_flag(sk, SOCK_ZAPPED)) 300 + goto out_unlock; 301 + 297 302 if (sk->sk_state != TCP_CLOSE) 298 303 goto out_unlock; 299 304
+1 -1
net/mac80211/sta_info.c
··· 688 688 } 689 689 690 690 /* No need to do anything if the driver does all */ 691 - if (!local->ops->set_tim) 691 + if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) 692 692 return; 693 693 694 694 if (sta->dead)
+10 -4
net/mac80211/tx.c
··· 1501 1501 struct sta_info *sta, 1502 1502 struct sk_buff *skb) 1503 1503 { 1504 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1505 1504 struct fq *fq = &local->fq; 1506 1505 struct ieee80211_vif *vif; 1507 1506 struct txq_info *txqi; ··· 1524 1525 1525 1526 if (!txqi) 1526 1527 return false; 1527 - 1528 - info->control.vif = vif; 1529 1528 1530 1529 spin_lock_bh(&fq->lock); 1531 1530 ieee80211_txq_enqueue(local, txqi, skb); ··· 3210 3213 3211 3214 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3212 3215 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3213 - *ieee80211_get_qos_ctl(hdr) = tid; 3214 3216 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); 3215 3217 } else { 3216 3218 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; ··· 3334 3338 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); 3335 3339 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; 3336 3340 3341 + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3342 + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3343 + *ieee80211_get_qos_ctl(hdr) = tid; 3344 + } 3345 + 3337 3346 __skb_queue_head_init(&tx.skbs); 3338 3347 3339 3348 tx.flags = IEEE80211_TX_UNICAST; ··· 3426 3425 ieee80211_free_txskb(&local->hw, skb); 3427 3426 goto begin; 3428 3427 } 3428 + 3429 + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) 3430 + info->flags |= IEEE80211_TX_CTL_AMPDU; 3431 + else 3432 + info->flags &= ~IEEE80211_TX_CTL_AMPDU; 3429 3433 3430 3434 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { 3431 3435 struct sta_info *sta = container_of(txq->sta, struct sta_info,
+16
net/mac80211/vht.c
··· 270 270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); 271 271 } 272 272 273 + /* 274 + * This is a workaround for VHT-enabled STAs which break the spec 275 + * and have the VHT-MCS Rx map filled in with value 3 for all eight 276 + * spacial streams, an example is AR9462. 277 + * 278 + * As per spec, in section 22.1.1 Introduction to the VHT PHY 279 + * A VHT STA shall support at least single spactial stream VHT-MCSs 280 + * 0 to 7 (transmit and receive) in all supported channel widths. 281 + */ 282 + if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { 283 + vht_cap->vht_supported = false; 284 + sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n", 285 + sta->addr); 286 + return; 287 + } 288 + 273 289 /* finally set up the bandwidth */ 274 290 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 275 291 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+1 -1
net/netfilter/ipvs/ip_vs_ctl.c
··· 2845 2845 .hdrsize = 0, 2846 2846 .name = IPVS_GENL_NAME, 2847 2847 .version = IPVS_GENL_VERSION, 2848 - .maxattr = IPVS_CMD_MAX, 2848 + .maxattr = IPVS_CMD_ATTR_MAX, 2849 2849 .netnsok = true, /* Make ipvsadm to work on netns */ 2850 2850 }; 2851 2851
+5 -2
net/netfilter/ipvs/ip_vs_sync.c
··· 283 283 */ 284 284 static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) 285 285 { 286 + memset(ho, 0, sizeof(*ho)); 286 287 ho->init_seq = get_unaligned_be32(&no->init_seq); 287 288 ho->delta = get_unaligned_be32(&no->delta); 288 289 ho->previous_delta = get_unaligned_be32(&no->previous_delta); ··· 918 917 kfree(param->pe_data); 919 918 } 920 919 921 - if (opt) 922 - memcpy(&cp->in_seq, opt, sizeof(*opt)); 920 + if (opt) { 921 + cp->in_seq = opt->in_seq; 922 + cp->out_seq = opt->out_seq; 923 + } 923 924 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); 924 925 cp->state = state; 925 926 cp->old_state = cp->state;
+41 -8
net/netfilter/nf_conntrack_core.c
··· 76 76 struct delayed_work dwork; 77 77 u32 last_bucket; 78 78 bool exiting; 79 + long next_gc_run; 79 80 }; 80 81 81 82 static __read_mostly struct kmem_cache *nf_conntrack_cachep; ··· 84 83 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 85 84 static __read_mostly bool nf_conntrack_locks_all; 86 85 86 + /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ 87 87 #define GC_MAX_BUCKETS_DIV 64u 88 - #define GC_MAX_BUCKETS 8192u 89 - #define GC_INTERVAL (5 * HZ) 88 + /* upper bound of scan intervals */ 89 + #define GC_INTERVAL_MAX (2 * HZ) 90 + /* maximum conntracks to evict per gc run */ 90 91 #define GC_MAX_EVICTS 256u 91 92 92 93 static struct conntrack_gc_work conntrack_gc_work; ··· 939 936 static void gc_worker(struct work_struct *work) 940 937 { 941 938 unsigned int i, goal, buckets = 0, expired_count = 0; 942 - unsigned long next_run = GC_INTERVAL; 943 - unsigned int ratio, scanned = 0; 944 939 struct conntrack_gc_work *gc_work; 940 + unsigned int ratio, scanned = 0; 941 + unsigned long next_run; 945 942 946 943 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); 947 944 948 - goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); 945 + goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; 949 946 i = gc_work->last_bucket; 950 947 951 948 do { ··· 985 982 if (gc_work->exiting) 986 983 return; 987 984 985 + /* 986 + * Eviction will normally happen from the packet path, and not 987 + * from this gc worker. 988 + * 989 + * This worker is only here to reap expired entries when system went 990 + * idle after a busy period. 991 + * 992 + * The heuristics below are supposed to balance conflicting goals: 993 + * 994 + * 1. Minimize time until we notice a stale entry 995 + * 2. Maximize scan intervals to not waste cycles 996 + * 997 + * Normally, expired_count will be 0, this increases the next_run time 998 + * to priorize 2) above. 999 + * 1000 + * As soon as a timed-out entry is found, move towards 1) and increase 1001 + * the scan frequency. 1002 + * In case we have lots of evictions next scan is done immediately. 1003 + */ 988 1004 ratio = scanned ? expired_count * 100 / scanned : 0; 989 - if (ratio >= 90 || expired_count == GC_MAX_EVICTS) 1005 + if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { 1006 + gc_work->next_gc_run = 0; 990 1007 next_run = 0; 1008 + } else if (expired_count) { 1009 + gc_work->next_gc_run /= 2U; 1010 + next_run = msecs_to_jiffies(1); 1011 + } else { 1012 + if (gc_work->next_gc_run < GC_INTERVAL_MAX) 1013 + gc_work->next_gc_run += msecs_to_jiffies(1); 1014 + 1015 + next_run = gc_work->next_gc_run; 1016 + } 991 1017 992 1018 gc_work->last_bucket = i; 993 - schedule_delayed_work(&gc_work->dwork, next_run); 1019 + queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); 994 1020 } 995 1021 996 1022 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 997 1023 { 998 1024 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1025 + gc_work->next_gc_run = GC_INTERVAL_MAX; 999 1026 gc_work->exiting = false; 1000 1027 } 1001 1028 ··· 1918 1885 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1919 1886 1920 1887 conntrack_gc_work_init(&conntrack_gc_work); 1921 - schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); 1888 + queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); 1922 1889 1923 1890 return 0; 1924 1891
+8 -3
net/netfilter/nf_conntrack_helper.c
··· 138 138 139 139 for (i = 0; i < nf_ct_helper_hsize; i++) { 140 140 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { 141 - if (!strcmp(h->name, name) && 142 - h->tuple.src.l3num == l3num && 143 - h->tuple.dst.protonum == protonum) 141 + if (strcmp(h->name, name)) 142 + continue; 143 + 144 + if (h->tuple.src.l3num != NFPROTO_UNSPEC && 145 + h->tuple.src.l3num != l3num) 146 + continue; 147 + 148 + if (h->tuple.dst.protonum == protonum) 144 149 return h; 145 150 } 146 151 }
+4 -1
net/netfilter/nf_conntrack_sip.c
··· 1436 1436 handler = &sip_handlers[i]; 1437 1437 if (handler->request == NULL) 1438 1438 continue; 1439 - if (*datalen < handler->len || 1439 + if (*datalen < handler->len + 2 || 1440 1440 strncasecmp(*dptr, handler->method, handler->len)) 1441 + continue; 1442 + if ((*dptr)[handler->len] != ' ' || 1443 + !isalpha((*dptr)[handler->len+1])) 1441 1444 continue; 1442 1445 1443 1446 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
+11 -7
net/netfilter/nf_tables_api.c
··· 2956 2956 2957 2957 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); 2958 2958 if (err < 0) 2959 - goto err2; 2959 + goto err3; 2960 2960 2961 2961 list_add_tail_rcu(&set->list, &table->sets); 2962 2962 table->use++; 2963 2963 return 0; 2964 2964 2965 + err3: 2966 + ops->destroy(set); 2965 2967 err2: 2966 2968 kfree(set); 2967 2969 err1: ··· 3454 3452 return elem; 3455 3453 } 3456 3454 3457 - void nft_set_elem_destroy(const struct nft_set *set, void *elem) 3455 + void nft_set_elem_destroy(const struct nft_set *set, void *elem, 3456 + bool destroy_expr) 3458 3457 { 3459 3458 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3460 3459 3461 3460 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3462 3461 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3463 3462 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3464 - if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3463 + if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3465 3464 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3466 3465 3467 3466 kfree(elem); ··· 3568 3565 dreg = nft_type_to_reg(set->dtype); 3569 3566 list_for_each_entry(binding, &set->bindings, list) { 3570 3567 struct nft_ctx bind_ctx = { 3568 + .net = ctx->net, 3571 3569 .afi = ctx->afi, 3572 3570 .table = ctx->table, 3573 3571 .chain = (struct nft_chain *)binding->chain, ··· 3816 3812 3817 3813 gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); 3818 3814 for (i = 0; i < gcb->head.cnt; i++) 3819 - nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); 3815 + nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); 3820 3816 kfree(gcb); 3821 3817 } 3822 3818 EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); ··· 4034 4030 break; 4035 4031 case NFT_MSG_DELSETELEM: 4036 4032 nft_set_elem_destroy(nft_trans_elem_set(trans), 4037 - nft_trans_elem(trans).priv); 4033 + nft_trans_elem(trans).priv, true); 4038 4034 break; 4039 4035 } 4040 4036 kfree(trans); ··· 4175 4171 break; 4176 4172 case NFT_MSG_NEWSETELEM: 4177 4173 nft_set_elem_destroy(nft_trans_elem_set(trans), 4178 - nft_trans_elem(trans).priv); 4174 + nft_trans_elem(trans).priv, true); 4179 4175 break; 4180 4176 } 4181 4177 kfree(trans); ··· 4425 4421 * Otherwise a 0 is returned and the attribute value is stored in the 4426 4422 * destination variable. 4427 4423 */ 4428 - unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4424 + int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4429 4425 { 4430 4426 u32 val; 4431 4427
+13 -6
net/netfilter/nft_dynset.c
··· 44 44 &regs->data[priv->sreg_key], 45 45 &regs->data[priv->sreg_data], 46 46 timeout, GFP_ATOMIC); 47 - if (elem == NULL) { 48 - if (set->size) 49 - atomic_dec(&set->nelems); 50 - return NULL; 51 - } 47 + if (elem == NULL) 48 + goto err1; 52 49 53 50 ext = nft_set_elem_ext(set, elem); 54 51 if (priv->expr != NULL && 55 52 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) 56 - return NULL; 53 + goto err2; 57 54 58 55 return elem; 56 + 57 + err2: 58 + nft_set_elem_destroy(set, elem, false); 59 + err1: 60 + if (set->size) 61 + atomic_dec(&set->nelems); 62 + return NULL; 59 63 } 60 64 61 65 static void nft_dynset_eval(const struct nft_expr *expr, ··· 142 138 if (IS_ERR(set)) 143 139 return PTR_ERR(set); 144 140 } 141 + 142 + if (set->ops->update == NULL) 143 + return -EOPNOTSUPP; 145 144 146 145 if (set->flags & NFT_SET_CONSTANT) 147 146 return -EBUSY;
+14 -5
net/netfilter/nft_set_hash.c
··· 98 98 const struct nft_set_ext **ext) 99 99 { 100 100 struct nft_hash *priv = nft_set_priv(set); 101 - struct nft_hash_elem *he; 101 + struct nft_hash_elem *he, *prev; 102 102 struct nft_hash_cmp_arg arg = { 103 103 .genmask = NFT_GENMASK_ANY, 104 104 .set = set, ··· 112 112 he = new(set, expr, regs); 113 113 if (he == NULL) 114 114 goto err1; 115 - if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, 116 - nft_hash_params)) 115 + 116 + prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, 117 + nft_hash_params); 118 + if (IS_ERR(prev)) 117 119 goto err2; 120 + 121 + /* Another cpu may race to insert the element with the same key */ 122 + if (prev) { 123 + nft_set_elem_destroy(set, he, true); 124 + he = prev; 125 + } 126 + 118 127 out: 119 128 *ext = &he->ext; 120 129 return true; 121 130 122 131 err2: 123 - nft_set_elem_destroy(set, he); 132 + nft_set_elem_destroy(set, he, true); 124 133 err1: 125 134 return false; 126 135 } ··· 341 332 342 333 static void nft_hash_elem_destroy(void *ptr, void *arg) 343 334 { 344 - nft_set_elem_destroy((const struct nft_set *)arg, ptr); 335 + nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); 345 336 } 346 337 347 338 static void nft_hash_destroy(const struct nft_set *set)
+1 -1
net/netfilter/nft_set_rbtree.c
··· 266 266 while ((node = priv->root.rb_node) != NULL) { 267 267 rb_erase(node, &priv->root); 268 268 rbe = rb_entry(node, struct nft_rbtree_elem, node); 269 - nft_set_elem_destroy(set, rbe); 269 + nft_set_elem_destroy(set, rbe, true); 270 270 } 271 271 } 272 272
+2 -2
net/netfilter/xt_connmark.c
··· 44 44 u_int32_t newmark; 45 45 46 46 ct = nf_ct_get(skb, &ctinfo); 47 - if (ct == NULL) 47 + if (ct == NULL || nf_ct_is_untracked(ct)) 48 48 return XT_CONTINUE; 49 49 50 50 switch (info->mode) { ··· 97 97 const struct nf_conn *ct; 98 98 99 99 ct = nf_ct_get(skb, &ctinfo); 100 - if (ct == NULL) 100 + if (ct == NULL || nf_ct_is_untracked(ct)) 101 101 return false; 102 102 103 103 return ((ct->mark & info->mask) == info->mark) ^ info->invert;
+1 -4
net/netlink/diag.c
··· 178 178 } 179 179 cb->args[1] = i; 180 180 } else { 181 - if (req->sdiag_protocol >= MAX_LINKS) { 182 - read_unlock(&nl_table_lock); 183 - rcu_read_unlock(); 181 + if (req->sdiag_protocol >= MAX_LINKS) 184 182 return -ENOENT; 185 - } 186 183 187 184 err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); 188 185 }
+3 -1
net/netlink/genetlink.c
··· 404 404 405 405 err = genl_validate_assign_mc_groups(family); 406 406 if (err) 407 - goto errout_locked; 407 + goto errout_free; 408 408 409 409 list_add_tail(&family->family_list, genl_family_chain(family->id)); 410 410 genl_unlock_all(); ··· 417 417 418 418 return 0; 419 419 420 + errout_free: 421 + kfree(family->attrbuf); 420 422 errout_locked: 421 423 genl_unlock_all(); 422 424 errout:
+3 -2
net/sched/cls_api.c
··· 112 112 113 113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; 114 114 it_chain = &tp->next) 115 - tfilter_notify(net, oskb, n, tp, 0, event, false); 115 + tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false); 116 116 } 117 117 118 118 /* Select new prio value from the range, managed by kernel. */ ··· 430 430 if (!skb) 431 431 return -ENOBUFS; 432 432 433 - if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 433 + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 434 + n->nlmsg_flags, event) <= 0) { 434 435 kfree_skb(skb); 435 436 return -EINVAL; 436 437 }
+17 -18
net/sctp/input.c
··· 181 181 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB 182 182 */ 183 183 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { 184 - if (asoc) { 185 - sctp_association_put(asoc); 184 + if (transport) { 185 + sctp_transport_put(transport); 186 186 asoc = NULL; 187 + transport = NULL; 187 188 } else { 188 189 sctp_endpoint_put(ep); 189 190 ep = NULL; ··· 270 269 bh_unlock_sock(sk); 271 270 272 271 /* Release the asoc/ep ref we took in the lookup calls. */ 273 - if (asoc) 274 - sctp_association_put(asoc); 272 + if (transport) 273 + sctp_transport_put(transport); 275 274 else 276 275 sctp_endpoint_put(ep); 277 276 ··· 284 283 285 284 discard_release: 286 285 /* Release the asoc/ep ref we took in the lookup calls. */ 287 - if (asoc) 288 - sctp_association_put(asoc); 286 + if (transport) 287 + sctp_transport_put(transport); 289 288 else 290 289 sctp_endpoint_put(ep); 291 290 ··· 301 300 { 302 301 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 303 302 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 303 + struct sctp_transport *t = chunk->transport; 304 304 struct sctp_ep_common *rcvr = NULL; 305 305 int backloged = 0; 306 306 ··· 353 351 done: 354 352 /* Release the refs we took in sctp_add_backlog */ 355 353 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 356 - sctp_association_put(sctp_assoc(rcvr)); 354 + sctp_transport_put(t); 357 355 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 358 356 sctp_endpoint_put(sctp_ep(rcvr)); 359 357 else ··· 365 363 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 366 364 { 367 365 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 366 + struct sctp_transport *t = chunk->transport; 368 367 struct sctp_ep_common *rcvr = chunk->rcvr; 369 368 int ret; 370 369 ··· 376 373 * from us 377 374 */ 378 375 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 379 - sctp_association_hold(sctp_assoc(rcvr)); 376 + sctp_transport_hold(t); 380 377 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 381 378 sctp_endpoint_hold(sctp_ep(rcvr)); 382 379 else ··· 540 537 return sk; 541 538 542 539 out: 543 - sctp_association_put(asoc); 540 + sctp_transport_put(transport); 544 541 return NULL; 545 542 } 546 543 547 544 /* Common cleanup code for icmp/icmpv6 error handler. */ 548 - void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 545 + void sctp_err_finish(struct sock *sk, struct sctp_transport *t) 549 546 { 550 547 bh_unlock_sock(sk); 551 - sctp_association_put(asoc); 548 + sctp_transport_put(t); 552 549 } 553 550 554 551 /* ··· 644 641 } 645 642 646 643 out_unlock: 647 - sctp_err_finish(sk, asoc); 644 + sctp_err_finish(sk, transport); 648 645 } 649 646 650 647 /* ··· 955 952 goto out; 956 953 957 954 asoc = t->asoc; 958 - sctp_association_hold(asoc); 959 955 *pt = t; 960 - 961 - sctp_transport_put(t); 962 956 963 957 out: 964 958 return asoc; ··· 986 986 struct sctp_transport *transport; 987 987 988 988 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { 989 - sctp_association_put(asoc); 989 + sctp_transport_put(transport); 990 990 return 1; 991 991 } 992 992 ··· 1021 1021 struct sctphdr *sh = sctp_hdr(skb); 1022 1022 union sctp_params params; 1023 1023 sctp_init_chunk_t *init; 1024 - struct sctp_transport *transport; 1025 1024 struct sctp_af *af; 1026 1025 1027 1026 /* ··· 1051 1052 1052 1053 af->from_addr_param(paddr, params.addr, sh->source, 0); 1053 1054 1054 - asoc = __sctp_lookup_association(net, laddr, paddr, &transport); 1055 + asoc = __sctp_lookup_association(net, laddr, paddr, transportp); 1055 1056 if (asoc) 1056 1057 return asoc; 1057 1058 }
+1 -1
net/sctp/ipv6.c
··· 198 198 } 199 199 200 200 out_unlock: 201 - sctp_err_finish(sk, asoc); 201 + sctp_err_finish(sk, transport); 202 202 out: 203 203 if (likely(idev != NULL)) 204 204 in6_dev_put(idev);
+13 -14
net/sctp/socket.c
··· 1214 1214 1215 1215 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1216 1216 1217 - err = sctp_wait_for_connect(asoc, &timeo); 1218 - if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1217 + if (assoc_id) 1219 1218 *assoc_id = asoc->assoc_id; 1219 + err = sctp_wait_for_connect(asoc, &timeo); 1220 + /* Note: the asoc may be freed after the return of 1221 + * sctp_wait_for_connect. 1222 + */ 1220 1223 1221 1224 /* Don't free association on exit. */ 1222 1225 asoc = NULL; ··· 4285 4282 { 4286 4283 struct net *net = sock_net(sk); 4287 4284 struct sctp_endpoint *ep; 4288 - struct sctp_association *asoc; 4289 4285 4290 4286 if (!sctp_style(sk, TCP)) 4291 4287 return; 4292 4288 4293 - if (how & SEND_SHUTDOWN) { 4289 + ep = sctp_sk(sk)->ep; 4290 + if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { 4291 + struct sctp_association *asoc; 4292 + 4294 4293 sk->sk_state = SCTP_SS_CLOSING; 4295 - ep = sctp_sk(sk)->ep; 4296 - if (!list_empty(&ep->asocs)) { 4297 - asoc = list_entry(ep->asocs.next, 4298 - struct sctp_association, asocs); 4299 - sctp_primitive_SHUTDOWN(net, asoc, NULL); 4300 - } 4294 + asoc = list_entry(ep->asocs.next, 4295 + struct sctp_association, asocs); 4296 + sctp_primitive_SHUTDOWN(net, asoc, NULL); 4301 4297 } 4302 4298 } 4303 4299 ··· 4482 4480 if (!transport || !sctp_transport_hold(transport)) 4483 4481 goto out; 4484 4482 4485 - sctp_association_hold(transport->asoc); 4486 - sctp_transport_put(transport); 4487 - 4488 4483 rcu_read_unlock(); 4489 4484 err = cb(transport, p); 4490 - sctp_association_put(transport->asoc); 4485 + sctp_transport_put(transport); 4491 4486 4492 4487 out: 4493 4488 return err;
+17
net/socket.c
··· 341 341 .get = sockfs_xattr_get, 342 342 }; 343 343 344 + static int sockfs_security_xattr_set(const struct xattr_handler *handler, 345 + struct dentry *dentry, struct inode *inode, 346 + const char *suffix, const void *value, 347 + size_t size, int flags) 348 + { 349 + /* Handled by LSM. */ 350 + return -EAGAIN; 351 + } 352 + 353 + static const struct xattr_handler sockfs_security_xattr_handler = { 354 + .prefix = XATTR_SECURITY_PREFIX, 355 + .set = sockfs_security_xattr_set, 356 + }; 357 + 344 358 static const struct xattr_handler *sockfs_xattr_handlers[] = { 345 359 &sockfs_xattr_handler, 360 + &sockfs_security_xattr_handler, 346 361 NULL 347 362 }; 348 363 ··· 2053 2038 if (err) 2054 2039 break; 2055 2040 ++datagrams; 2041 + if (msg_data_left(&msg_sys)) 2042 + break; 2056 2043 cond_resched(); 2057 2044 } 2058 2045
+5 -2
net/sunrpc/clnt.c
··· 2753 2753 2754 2754 void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) 2755 2755 { 2756 + rcu_read_lock(); 2756 2757 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 2758 + rcu_read_unlock(); 2757 2759 } 2758 2760 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); 2759 2761 2760 2762 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 2761 2763 { 2764 + rcu_read_lock(); 2762 2765 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 2763 2766 xprt); 2767 + rcu_read_unlock(); 2764 2768 } 2765 2769 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 2766 2770 ··· 2774 2770 struct rpc_xprt_switch *xps; 2775 2771 bool ret; 2776 2772 2777 - xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 2778 - 2779 2773 rcu_read_lock(); 2774 + xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 2780 2775 ret = rpc_xprt_switch_has_addr(xps, sap); 2781 2776 rcu_read_unlock(); 2782 2777 return ret;
+1 -10
net/sunrpc/svc_xprt.c
··· 1002 1002 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 1003 1003 { 1004 1004 struct svc_xprt *xprt; 1005 - struct svc_sock *svsk; 1006 - struct socket *sock; 1007 1005 struct list_head *le, *next; 1008 1006 LIST_HEAD(to_be_closed); 1009 - struct linger no_linger = { 1010 - .l_onoff = 1, 1011 - .l_linger = 0, 1012 - }; 1013 1007 1014 1008 spin_lock_bh(&serv->sv_lock); 1015 1009 list_for_each_safe(le, next, &serv->sv_tempsocks) { ··· 1021 1027 list_del_init(le); 1022 1028 xprt = list_entry(le, struct svc_xprt, xpt_list); 1023 1029 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 1024 - svsk = container_of(xprt, struct svc_sock, sk_xprt); 1025 - sock = svsk->sk_sock; 1026 - kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 1027 - (char *)&no_linger, sizeof(no_linger)); 1030 + xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 1028 1031 svc_close_xprt(xprt); 1029 1032 } 1030 1033 }
+21
net/sunrpc/svcsock.c
··· 438 438 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 439 439 } 440 440 441 + static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) 442 + { 443 + struct svc_sock *svsk; 444 + struct socket *sock; 445 + struct linger no_linger = { 446 + .l_onoff = 1, 447 + .l_linger = 0, 448 + }; 449 + 450 + svsk = container_of(xprt, struct svc_sock, sk_xprt); 451 + sock = svsk->sk_sock; 452 + kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 453 + (char *)&no_linger, sizeof(no_linger)); 454 + } 455 + 441 456 /* 442 457 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo 443 458 */ ··· 663 648 return NULL; 664 649 } 665 650 651 + static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) 652 + { 653 + } 654 + 666 655 static struct svc_xprt *svc_udp_create(struct svc_serv *serv, 667 656 struct net *net, 668 657 struct sockaddr *sa, int salen, ··· 686 667 .xpo_has_wspace = svc_udp_has_wspace, 687 668 .xpo_accept = svc_udp_accept, 688 669 .xpo_secure_port = svc_sock_secure_port, 670 + .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, 689 671 }; 690 672 691 673 static struct svc_xprt_class svc_udp_class = { ··· 1262 1242 .xpo_has_wspace = svc_tcp_has_wspace, 1263 1243 .xpo_accept = svc_tcp_accept, 1264 1244 .xpo_secure_port = svc_sock_secure_port, 1245 + .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, 1265 1246 }; 1266 1247 1267 1248 static struct svc_xprt_class svc_tcp_class = {
+22 -15
net/sunrpc/xprtrdma/frwr_ops.c
··· 44 44 * being done. 45 45 * 46 46 * When the underlying transport disconnects, MRs are left in one of 47 - * three states: 47 + * four states: 48 48 * 49 49 * INVALID: The MR was not in use before the QP entered ERROR state. 50 - * (Or, the LOCAL_INV WR has not completed or flushed yet). 51 - * 52 - * STALE: The MR was being registered or unregistered when the QP 53 - * entered ERROR state, and the pending WR was flushed. 54 50 * 55 51 * VALID: The MR was registered before the QP entered ERROR state. 56 52 * 57 - * When frwr_op_map encounters STALE and VALID MRs, they are recovered 58 - * with ib_dereg_mr and then are re-initialized. Beause MR recovery 53 + * FLUSHED_FR: The MR was being registered when the QP entered ERROR 54 + * state, and the pending WR was flushed. 55 + * 56 + * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR 57 + * state, and the pending WR was flushed. 58 + * 59 + * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered 60 + * with ib_dereg_mr and then are re-initialized. Because MR recovery 59 61 * allocates fresh resources, it is deferred to a workqueue, and the 60 62 * recovered MRs are placed back on the rb_mws list when recovery is 61 63 * complete. frwr_op_map allocates another MR for the current RPC while ··· 179 177 static void 180 178 frwr_op_recover_mr(struct rpcrdma_mw *mw) 181 179 { 180 + enum rpcrdma_frmr_state state = mw->frmr.fr_state; 182 181 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 183 182 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 184 183 int rc; 185 184 186 185 rc = __frwr_reset_mr(ia, mw); 187 - ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir); 186 + if (state != FRMR_FLUSHED_LI) 187 + ib_dma_unmap_sg(ia->ri_device, 188 + mw->mw_sg, mw->mw_nents, mw->mw_dir); 188 189 if (rc) 189 190 goto out_release; 190 191 ··· 267 262 } 268 263 269 264 static void 270 - __frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr, 271 - const char *wr) 265 + __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) 272 266 { 273 - frmr->fr_state = FRMR_IS_STALE; 274 267 if (wc->status != IB_WC_WR_FLUSH_ERR) 275 268 pr_err("rpcrdma: %s: %s (%u/0x%x)\n", 276 269 wr, ib_wc_status_msg(wc->status), ··· 291 288 if (wc->status != IB_WC_SUCCESS) { 292 289 cqe = wc->wr_cqe; 293 290 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 294 - __frwr_sendcompletion_flush(wc, frmr, "fastreg"); 291 + frmr->fr_state = FRMR_FLUSHED_FR; 292 + __frwr_sendcompletion_flush(wc, "fastreg"); 295 293 } 296 294 } 297 295 ··· 312 308 if (wc->status != IB_WC_SUCCESS) { 313 309 cqe = wc->wr_cqe; 314 310 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 315 - __frwr_sendcompletion_flush(wc, frmr, "localinv"); 311 + frmr->fr_state = FRMR_FLUSHED_LI; 312 + __frwr_sendcompletion_flush(wc, "localinv"); 316 313 } 317 314 } 318 315 ··· 333 328 /* WARNING: Only wr_cqe and status are reliable at this point */ 334 329 cqe = wc->wr_cqe; 335 330 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 336 - if (wc->status != IB_WC_SUCCESS) 337 - __frwr_sendcompletion_flush(wc, frmr, "localinv"); 331 + if (wc->status != IB_WC_SUCCESS) { 332 + frmr->fr_state = FRMR_FLUSHED_LI; 333 + __frwr_sendcompletion_flush(wc, "localinv"); 334 + } 338 335 complete(&frmr->fr_linv_done); 339 336 } 340 337
+6
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 67 67 static void svc_rdma_free(struct svc_xprt *xprt); 68 68 static int svc_rdma_has_wspace(struct svc_xprt *xprt); 69 69 static int svc_rdma_secure_port(struct svc_rqst *); 70 + static void svc_rdma_kill_temp_xprt(struct svc_xprt *); 70 71 71 72 static struct svc_xprt_ops svc_rdma_ops = { 72 73 .xpo_create = svc_rdma_create, ··· 80 79 .xpo_has_wspace = svc_rdma_has_wspace, 81 80 .xpo_accept = svc_rdma_accept, 82 81 .xpo_secure_port = svc_rdma_secure_port, 82 + .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt, 83 83 }; 84 84 85 85 struct svc_xprt_class svc_rdma_class = { ··· 1317 1315 static int svc_rdma_secure_port(struct svc_rqst *rqstp) 1318 1316 { 1319 1317 return 1; 1318 + } 1319 + 1320 + static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) 1321 + { 1320 1322 } 1321 1323 1322 1324 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+2 -1
net/sunrpc/xprtrdma/xprt_rdma.h
··· 216 216 enum rpcrdma_frmr_state { 217 217 FRMR_IS_INVALID, /* ready to be used */ 218 218 FRMR_IS_VALID, /* in use */ 219 - FRMR_IS_STALE, /* failed completion */ 219 + FRMR_FLUSHED_FR, /* flushed FASTREG WR */ 220 + FRMR_FLUSHED_LI, /* flushed LOCALINV WR */ 220 221 }; 221 222 222 223 struct rpcrdma_frmr {
+1 -47
net/tipc/socket.c
··· 1 1 /* 2 2 * net/tipc/socket.c: TIPC socket API 3 3 * 4 - * Copyright (c) 2001-2007, 2012-2015, Ericsson AB 4 + * Copyright (c) 2001-2007, 2012-2016, Ericsson AB 5 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 6 * All rights reserved. 7 7 * ··· 129 129 static const struct proto_ops stream_ops; 130 130 static const struct proto_ops msg_ops; 131 131 static struct proto tipc_proto; 132 - 133 132 static const struct rhashtable_params tsk_rht_params; 134 - 135 - /* 136 - * Revised TIPC socket locking policy: 137 - * 138 - * Most socket operations take the standard socket lock when they start 139 - * and hold it until they finish (or until they need to sleep). Acquiring 140 - * this lock grants the owner exclusive access to the fields of the socket 141 - * data structures, with the exception of the backlog queue. A few socket 142 - * operations can be done without taking the socket lock because they only 143 - * read socket information that never changes during the life of the socket. 144 - * 145 - * Socket operations may acquire the lock for the associated TIPC port if they 146 - * need to perform an operation on the port. If any routine needs to acquire 147 - * both the socket lock and the port lock it must take the socket lock first 148 - * to avoid the risk of deadlock. 149 - * 150 - * The dispatcher handling incoming messages cannot grab the socket lock in 151 - * the standard fashion, since invoked it runs at the BH level and cannot block. 152 - * Instead, it checks to see if the socket lock is currently owned by someone, 153 - * and either handles the message itself or adds it to the socket's backlog 154 - * queue; in the latter case the queued message is processed once the process 155 - * owning the socket lock releases it. 156 - * 157 - * NOTE: Releasing the socket lock while an operation is sleeping overcomes 158 - * the problem of a blocked socket operation preventing any other operations 159 - * from occurring. However, applications must be careful if they have 160 - * multiple threads trying to send (or receive) on the same socket, as these 161 - * operations might interfere with each other. For example, doing a connect 162 - * and a receive at the same time might allow the receive to consume the 163 - * ACK message meant for the connect. While additional work could be done 164 - * to try and overcome this, it doesn't seem to be worthwhile at the present. 165 - * 166 - * NOTE: Releasing the socket lock while an operation is sleeping also ensures 167 - * that another operation that must be performed in a non-blocking manner is 168 - * not delayed for very long because the lock has already been taken. 169 - * 170 - * NOTE: This code assumes that certain fields of a port/socket pair are 171 - * constant over its lifetime; such fields can be examined without taking 172 - * the socket lock and/or port lock, and do not need to be re-read even 173 - * after resuming processing after waiting. These fields include: 174 - * - socket type 175 - * - pointer to socket sk structure (aka tipc_sock structure) 176 - * - pointer to port structure 177 - * - port reference 178 - */ 179 133 180 134 static u32 tsk_own_node(struct tipc_sock *tsk) 181 135 {
+13 -7
net/unix/af_unix.c
··· 2199 2199 * Sleep until more data has arrived. But check for races.. 2200 2200 */ 2201 2201 static long unix_stream_data_wait(struct sock *sk, long timeo, 2202 - struct sk_buff *last, unsigned int last_len) 2202 + struct sk_buff *last, unsigned int last_len, 2203 + bool freezable) 2203 2204 { 2204 2205 struct sk_buff *tail; 2205 2206 DEFINE_WAIT(wait); ··· 2221 2220 2222 2221 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2223 2222 unix_state_unlock(sk); 2224 - timeo = freezable_schedule_timeout(timeo); 2223 + if (freezable) 2224 + timeo = freezable_schedule_timeout(timeo); 2225 + else 2226 + timeo = schedule_timeout(timeo); 2225 2227 unix_state_lock(sk); 2226 2228 2227 2229 if (sock_flag(sk, SOCK_DEAD)) ··· 2254 2250 unsigned int splice_flags; 2255 2251 }; 2256 2252 2257 - static int unix_stream_read_generic(struct unix_stream_read_state *state) 2253 + static int unix_stream_read_generic(struct unix_stream_read_state *state, 2254 + bool freezable) 2258 2255 { 2259 2256 struct scm_cookie scm; 2260 2257 struct socket *sock = state->socket; ··· 2335 2330 mutex_unlock(&u->iolock); 2336 2331 2337 2332 timeo = unix_stream_data_wait(sk, timeo, last, 2338 - last_len); 2333 + last_len, freezable); 2339 2334 2340 2335 if (signal_pending(current)) { 2341 2336 err = sock_intr_errno(timeo); ··· 2477 2472 .flags = flags 2478 2473 }; 2479 2474 2480 - return unix_stream_read_generic(&state); 2475 + return unix_stream_read_generic(&state, true); 2481 2476 } 2482 2477 2483 2478 static int unix_stream_splice_actor(struct sk_buff *skb, ··· 2508 2503 flags & SPLICE_F_NONBLOCK) 2509 2504 state.flags = MSG_DONTWAIT; 2510 2505 2511 - return unix_stream_read_generic(&state); 2506 + return unix_stream_read_generic(&state, false); 2512 2507 } 2513 2508 2514 2509 static int unix_shutdown(struct socket *sock, int mode) ··· 2817 2812 i++; 2818 2813 } 2819 2814 for ( ; i < len; i++) 2820 - seq_putc(seq, u->addr->name->sun_path[i]); 2815 + seq_putc(seq, u->addr->name->sun_path[i] ?: 2816 + '@'); 2821 2817 } 2822 2818 unix_state_unlock(s); 2823 2819 seq_putc(seq, '\n');
+1
net/wireless/core.h
··· 71 71 struct list_head bss_list; 72 72 struct rb_root bss_tree; 73 73 u32 bss_generation; 74 + u32 bss_entries; 74 75 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 75 76 struct sk_buff *scan_msg; 76 77 struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+69
net/wireless/scan.c
··· 57 57 * also linked into the probe response struct. 58 58 */ 59 59 60 + /* 61 + * Limit the number of BSS entries stored in mac80211. Each one is 62 + * a bit over 4k at most, so this limits to roughly 4-5M of memory. 63 + * If somebody wants to really attack this though, they'd likely 64 + * use small beacons, and only one type of frame, limiting each of 65 + * the entries to a much smaller size (in order to generate more 66 + * entries in total, so overhead is bigger.) 67 + */ 68 + static int bss_entries_limit = 1000; 69 + module_param(bss_entries_limit, int, 0644); 70 + MODULE_PARM_DESC(bss_entries_limit, 71 + "limit to number of scan BSS entries (per wiphy, default 1000)"); 72 + 60 73 #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 61 74 62 75 static void bss_free(struct cfg80211_internal_bss *bss) ··· 150 137 151 138 list_del_init(&bss->list); 152 139 rb_erase(&bss->rbn, &rdev->bss_tree); 140 + rdev->bss_entries--; 141 + WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), 142 + "rdev bss entries[%d]/list[empty:%d] corruption\n", 143 + rdev->bss_entries, list_empty(&rdev->bss_list)); 153 144 bss_ref_put(rdev, bss); 154 145 return true; 155 146 } ··· 178 161 179 162 if (expired) 180 163 rdev->bss_generation++; 164 + } 165 + 166 + static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) 167 + { 168 + struct cfg80211_internal_bss *bss, *oldest = NULL; 169 + bool ret; 170 + 171 + lockdep_assert_held(&rdev->bss_lock); 172 + 173 + list_for_each_entry(bss, &rdev->bss_list, list) { 174 + if (atomic_read(&bss->hold)) 175 + continue; 176 + 177 + if (!list_empty(&bss->hidden_list) && 178 + !bss->pub.hidden_beacon_bss) 179 + continue; 180 + 181 + if (oldest && time_before(oldest->ts, bss->ts)) 182 + continue; 183 + oldest = bss; 184 + } 185 + 186 + if (WARN_ON(!oldest)) 187 + return false; 188 + 189 + /* 190 + * The callers make sure to increase rdev->bss_generation if anything 191 + * gets removed (and a new entry added), so there's no need to also do 192 + * it here. 193 + */ 194 + 195 + ret = __cfg80211_unlink_bss(rdev, oldest); 196 + WARN_ON(!ret); 197 + return ret; 181 198 } 182 199 183 200 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, ··· 740 689 const u8 *ie; 741 690 int i, ssidlen; 742 691 u8 fold = 0; 692 + u32 n_entries = 0; 743 693 744 694 ies = rcu_access_pointer(new->pub.beacon_ies); 745 695 if (WARN_ON(!ies)) ··· 764 712 /* This is the bad part ... */ 765 713 766 714 list_for_each_entry(bss, &rdev->bss_list, list) { 715 + /* 716 + * we're iterating all the entries anyway, so take the 717 + * opportunity to validate the list length accounting 718 + */ 719 + n_entries++; 720 + 767 721 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 768 722 continue; 769 723 if (bss->pub.channel != new->pub.channel) ··· 797 739 rcu_assign_pointer(bss->pub.beacon_ies, 798 740 new->pub.beacon_ies); 799 741 } 742 + 743 + WARN_ONCE(n_entries != rdev->bss_entries, 744 + "rdev bss entries[%d]/list[len:%d] corruption\n", 745 + rdev->bss_entries, n_entries); 800 746 801 747 return true; 802 748 } ··· 956 894 } 957 895 } 958 896 897 + if (rdev->bss_entries >= bss_entries_limit && 898 + !cfg80211_bss_expire_oldest(rdev)) { 899 + kfree(new); 900 + goto drop; 901 + } 902 + 959 903 list_add_tail(&new->list, &rdev->bss_list); 904 + rdev->bss_entries++; 960 905 rb_insert_bss(rdev, new); 961 906 found = new; 962 907 }
+2 -1
net/wireless/util.c
··· 1158 1158 58500000, 1159 1159 65000000, 1160 1160 78000000, 1161 - 0, 1161 + /* not in the spec, but some devices use this: */ 1162 + 86500000, 1162 1163 }, 1163 1164 { 13500000, 1164 1165 27000000,
+4
samples/bpf/Makefile
··· 27 27 hostprogs-y += test_current_task_under_cgroup 28 28 hostprogs-y += trace_event 29 29 hostprogs-y += sampleip 30 + hostprogs-y += tc_l2_redirect 30 31 31 32 test_verifier-objs := test_verifier.o libbpf.o 32 33 test_maps-objs := test_maps.o libbpf.o ··· 57 56 test_current_task_under_cgroup_user.o 58 57 trace_event-objs := bpf_load.o libbpf.o trace_event_user.o 59 58 sampleip-objs := bpf_load.o libbpf.o sampleip_user.o 59 + tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o 60 60 61 61 # Tell kbuild to always build the programs 62 62 always := $(hostprogs-y) ··· 74 72 always += trace_output_kern.o 75 73 always += tcbpf1_kern.o 76 74 always += tcbpf2_kern.o 75 + always += tc_l2_redirect_kern.o 77 76 always += lathist_kern.o 78 77 always += offwaketime_kern.o 79 78 always += spintest_kern.o ··· 114 111 HOSTLOADLIBES_test_current_task_under_cgroup += -lelf 115 112 HOSTLOADLIBES_trace_event += -lelf 116 113 HOSTLOADLIBES_sampleip += -lelf 114 + HOSTLOADLIBES_tc_l2_redirect += -l elf 117 115 118 116 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: 119 117 # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+173
samples/bpf/tc_l2_redirect.sh
··· 1 + #!/bin/bash 2 + 3 + [[ -z $TC ]] && TC='tc' 4 + [[ -z $IP ]] && IP='ip' 5 + 6 + REDIRECT_USER='./tc_l2_redirect' 7 + REDIRECT_BPF='./tc_l2_redirect_kern.o' 8 + 9 + RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter) 10 + IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding) 11 + 12 + function config_common { 13 + local tun_type=$1 14 + 15 + $IP netns add ns1 16 + $IP netns add ns2 17 + $IP link add ve1 type veth peer name vens1 18 + $IP link add ve2 type veth peer name vens2 19 + $IP link set dev ve1 up 20 + $IP link set dev ve2 up 21 + $IP link set dev ve1 mtu 1500 22 + $IP link set dev ve2 mtu 1500 23 + $IP link set dev vens1 netns ns1 24 + $IP link set dev vens2 netns ns2 25 + 26 + $IP -n ns1 link set dev lo up 27 + $IP -n ns1 link set dev vens1 up 28 + $IP -n ns1 addr add 10.1.1.101/24 dev vens1 29 + $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad 30 + $IP -n ns1 route add default via 10.1.1.1 dev vens1 31 + $IP -n ns1 route add default via 2401:db01::1 dev vens1 32 + 33 + $IP -n ns2 link set dev lo up 34 + $IP -n ns2 link set dev vens2 up 35 + $IP -n ns2 addr add 10.2.1.102/24 dev vens2 36 + $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad 37 + $IP -n ns2 addr add 10.10.1.102 dev lo 38 + $IP -n ns2 addr add 2401:face::66/64 dev lo nodad 39 + $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1 40 + $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1 41 + $IP -n ns2 link set dev ipt2 up 42 + $IP -n ns2 link set dev ip6t2 up 43 + $IP netns exec ns2 $TC qdisc add dev vens2 clsact 44 + $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip 45 + if [[ $tun_type == "ipip" ]]; then 46 + $IP -n ns2 route add 10.1.1.0/24 dev ipt2 47 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 48 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0 49 + else 50 + $IP -n ns2 route add 10.1.1.0/24 dev ip6t2 51 + $IP -n ns2 route add 2401:db01::/64 dev ip6t2 52 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 53 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0 54 + fi 55 + 56 + $IP addr add 10.1.1.1/24 dev ve1 57 + $IP addr add 2401:db01::1/64 dev ve1 nodad 58 + $IP addr add 10.2.1.1/24 dev ve2 59 + $IP addr add 2401:db02::1/64 dev ve2 nodad 60 + 61 + $TC qdisc add dev ve2 clsact 62 + $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward 63 + 64 + sysctl -q -w net.ipv4.conf.all.rp_filter=0 65 + sysctl -q -w net.ipv6.conf.all.forwarding=1 66 + } 67 + 68 + function cleanup { 69 + set +e 70 + [[ -z $DEBUG ]] || set +x 71 + $IP netns delete ns1 >& /dev/null 72 + $IP netns delete ns2 >& /dev/null 73 + $IP link del ve1 >& /dev/null 74 + $IP link del ve2 >& /dev/null 75 + $IP link del ipt >& /dev/null 76 + $IP link del ip6t >& /dev/null 77 + sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER 78 + sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING 79 + rm -f /sys/fs/bpf/tc/globals/tun_iface 80 + [[ -z $DEBUG ]] || set -x 81 + set -e 82 + } 83 + 84 + function l2_to_ipip { 85 + echo -n "l2_to_ipip $1: " 86 + 87 + local dir=$1 88 + 89 + config_common ipip 90 + 91 + $IP link add ipt type ipip external 92 + $IP link set dev ipt up 93 + sysctl -q -w net.ipv4.conf.ipt.rp_filter=0 94 + sysctl -q -w net.ipv4.conf.ipt.forwarding=1 95 + 96 + if [[ $dir == "egress" ]]; then 97 + $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 98 + $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect 99 + sysctl -q -w net.ipv4.conf.ve1.forwarding=1 100 + else 101 + $TC qdisc add dev ve1 clsact 102 + $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect 103 + fi 104 + 105 + $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex) 106 + 107 + $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null 108 + 109 + if [[ $dir == "egress" ]]; then 110 + # test direct egress to ve2 (i.e. not forwarding from 111 + # ve1 to ve2). 112 + ping -c1 10.10.1.102 >& /dev/null 113 + fi 114 + 115 + cleanup 116 + 117 + echo "OK" 118 + } 119 + 120 + function l2_to_ip6tnl { 121 + echo -n "l2_to_ip6tnl $1: " 122 + 123 + local dir=$1 124 + 125 + config_common ip6tnl 126 + 127 + $IP link add ip6t type ip6tnl mode any external 128 + $IP link set dev ip6t up 129 + sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0 130 + sysctl -q -w net.ipv4.conf.ip6t.forwarding=1 131 + 132 + if [[ $dir == "egress" ]]; then 133 + $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 134 + $IP route add 2401:face::/64 via 2401:db02::66 dev ve2 135 + $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect 136 + sysctl -q -w net.ipv4.conf.ve1.forwarding=1 137 + else 138 + $TC qdisc add dev ve1 clsact 139 + $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect 140 + fi 141 + 142 + $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex) 143 + 144 + $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null 145 + $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null 146 + 147 + if [[ $dir == "egress" ]]; then 148 + # test direct egress to ve2 (i.e. not forwarding from 149 + # ve1 to ve2). 150 + ping -c1 10.10.1.102 >& /dev/null 151 + ping -6 -c1 2401:face::66 >& /dev/null 152 + fi 153 + 154 + cleanup 155 + 156 + echo "OK" 157 + } 158 + 159 + cleanup 160 + test_names="l2_to_ipip l2_to_ip6tnl" 161 + test_dirs="ingress egress" 162 + if [[ $# -ge 2 ]]; then 163 + test_names=$1 164 + test_dirs=$2 165 + elif [[ $# -ge 1 ]]; then 166 + test_names=$1 167 + fi 168 + 169 + for t in $test_names; do 170 + for d in $test_dirs; do 171 + $t $d 172 + done 173 + done
+236
samples/bpf/tc_l2_redirect_kern.c
··· 1 + /* Copyright (c) 2016 Facebook 2 + * 3 + * This program is free software; you can redistribute it and/or 4 + * modify it under the terms of version 2 of the GNU General Public 5 + * License as published by the Free Software Foundation. 6 + */ 7 + #include <uapi/linux/bpf.h> 8 + #include <uapi/linux/if_ether.h> 9 + #include <uapi/linux/if_packet.h> 10 + #include <uapi/linux/ip.h> 11 + #include <uapi/linux/ipv6.h> 12 + #include <uapi/linux/in.h> 13 + #include <uapi/linux/tcp.h> 14 + #include <uapi/linux/filter.h> 15 + #include <uapi/linux/pkt_cls.h> 16 + #include <net/ipv6.h> 17 + #include "bpf_helpers.h" 18 + 19 + #define _htonl __builtin_bswap32 20 + 21 + #define PIN_GLOBAL_NS 2 22 + struct bpf_elf_map { 23 + __u32 type; 24 + __u32 size_key; 25 + __u32 size_value; 26 + __u32 max_elem; 27 + __u32 flags; 28 + __u32 id; 29 + __u32 pinning; 30 + }; 31 + 32 + /* copy of 'struct ethhdr' without __packed */ 33 + struct eth_hdr { 34 + unsigned char h_dest[ETH_ALEN]; 35 + unsigned char h_source[ETH_ALEN]; 36 + unsigned short h_proto; 37 + }; 38 + 39 + struct bpf_elf_map SEC("maps") tun_iface = { 40 + .type = BPF_MAP_TYPE_ARRAY, 41 + .size_key = sizeof(int), 42 + .size_value = sizeof(int), 43 + .pinning = PIN_GLOBAL_NS, 44 + .max_elem = 1, 45 + }; 46 + 47 + static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) 48 + { 49 + if (eth_proto == htons(ETH_P_IP)) 50 + return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); 51 + else if (eth_proto == htons(ETH_P_IPV6)) 52 + return (daddr == _htonl(0x2401face)); 53 + 54 + return false; 55 + } 56 + 57 + SEC("l2_to_iptun_ingress_forward") 58 + int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) 59 + { 60 + struct bpf_tunnel_key tkey = {}; 61 + void *data = (void *)(long)skb->data; 62 + struct eth_hdr *eth = data; 63 + void *data_end = (void *)(long)skb->data_end; 64 + int key = 0, *ifindex; 65 + 66 + int ret; 67 + 68 + if (data + sizeof(*eth) > data_end) 69 + return TC_ACT_OK; 70 + 71 + ifindex = bpf_map_lookup_elem(&tun_iface, &key); 72 + if (!ifindex) 73 + return TC_ACT_OK; 74 + 75 + if (eth->h_proto == htons(ETH_P_IP)) { 76 + char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; 77 + struct iphdr *iph = data + sizeof(*eth); 78 + 79 + if (data + sizeof(*eth) + sizeof(*iph) > data_end) 80 + return TC_ACT_OK; 81 + 82 + if (iph->protocol != IPPROTO_IPIP) 83 + return TC_ACT_OK; 84 + 85 + bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, 86 + _htonl(iph->daddr)); 87 + return bpf_redirect(*ifindex, BPF_F_INGRESS); 88 + } else if (eth->h_proto == htons(ETH_P_IPV6)) { 89 + char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; 90 + struct ipv6hdr *ip6h = data + sizeof(*eth); 91 + 92 + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) 93 + return TC_ACT_OK; 94 + 95 + if (ip6h->nexthdr != IPPROTO_IPIP && 96 + ip6h->nexthdr != IPPROTO_IPV6) 97 + return TC_ACT_OK; 98 + 99 + bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, 100 + _htonl(ip6h->daddr.s6_addr32[0]), 101 + _htonl(ip6h->daddr.s6_addr32[3])); 102 + return bpf_redirect(*ifindex, BPF_F_INGRESS); 103 + } 104 + 105 + return TC_ACT_OK; 106 + } 107 + 108 + SEC("l2_to_iptun_ingress_redirect") 109 + int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) 110 + { 111 + struct bpf_tunnel_key tkey = {}; 112 + void *data = (void *)(long)skb->data; 113 + struct eth_hdr *eth = data; 114 + void *data_end = (void *)(long)skb->data_end; 115 + int key = 0, *ifindex; 116 + 117 + int ret; 118 + 119 + if (data + sizeof(*eth) > data_end) 120 + return TC_ACT_OK; 121 + 122 + ifindex = bpf_map_lookup_elem(&tun_iface, &key); 123 + if (!ifindex) 124 + return TC_ACT_OK; 125 + 126 + if (eth->h_proto == htons(ETH_P_IP)) { 127 + char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; 128 + struct iphdr *iph = data + sizeof(*eth); 129 + __be32 daddr = iph->daddr; 130 + 131 + if (data + sizeof(*eth) + sizeof(*iph) > data_end) 132 + return TC_ACT_OK; 133 + 134 + if (!is_vip_addr(eth->h_proto, daddr)) 135 + return TC_ACT_OK; 136 + 137 + bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); 138 + } else { 139 + return TC_ACT_OK; 140 + } 141 + 142 + tkey.tunnel_id = 10000; 143 + tkey.tunnel_ttl = 64; 144 + tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ 145 + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); 146 + return bpf_redirect(*ifindex, 0); 147 + } 148 + 149 + SEC("l2_to_ip6tun_ingress_redirect") 150 + int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) 151 + { 152 + struct bpf_tunnel_key tkey = {}; 153 + void *data = (void *)(long)skb->data; 154 + struct eth_hdr *eth = data; 155 + void *data_end = (void *)(long)skb->data_end; 156 + int key = 0, *ifindex; 157 + 158 + if (data + sizeof(*eth) > data_end) 159 + return TC_ACT_OK; 160 + 161 + ifindex = bpf_map_lookup_elem(&tun_iface, &key); 162 + if (!ifindex) 163 + return TC_ACT_OK; 164 + 165 + if (eth->h_proto == htons(ETH_P_IP)) { 166 + char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; 167 + struct iphdr *iph = data + sizeof(*eth); 168 + 169 + if (data + sizeof(*eth) + sizeof(*iph) > data_end) 170 + return TC_ACT_OK; 171 + 172 + if (!is_vip_addr(eth->h_proto, iph->daddr)) 173 + return TC_ACT_OK; 174 + 175 + bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), 176 + *ifindex); 177 + } else if (eth->h_proto == htons(ETH_P_IPV6)) { 178 + char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; 179 + struct ipv6hdr *ip6h = data + sizeof(*eth); 180 + 181 + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) 182 + return TC_ACT_OK; 183 + 184 + if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) 185 + return TC_ACT_OK; 186 + 187 + bpf_trace_printk(fmt6, sizeof(fmt6), 188 + _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); 189 + } else { 190 + return TC_ACT_OK; 191 + } 192 + 193 + tkey.tunnel_id = 10000; 194 + tkey.tunnel_ttl = 64; 195 + /* 2401:db02:0:0:0:0:0:66 */ 196 + tkey.remote_ipv6[0] = _htonl(0x2401db02); 197 + tkey.remote_ipv6[1] = 0; 198 + tkey.remote_ipv6[2] = 0; 199 + tkey.remote_ipv6[3] = _htonl(0x00000066); 200 + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); 201 + return bpf_redirect(*ifindex, 0); 202 + } 203 + 204 + SEC("drop_non_tun_vip") 205 + int _drop_non_tun_vip(struct __sk_buff *skb) 206 + { 207 + struct bpf_tunnel_key tkey = {}; 208 + void *data = (void *)(long)skb->data; 209 + struct eth_hdr *eth = data; 210 + void *data_end = (void *)(long)skb->data_end; 211 + 212 + if (data + sizeof(*eth) > data_end) 213 + return TC_ACT_OK; 214 + 215 + if (eth->h_proto == htons(ETH_P_IP)) { 216 + struct iphdr *iph = data + sizeof(*eth); 217 + 218 + if (data + sizeof(*eth) + sizeof(*iph) > data_end) 219 + return TC_ACT_OK; 220 + 221 + if (is_vip_addr(eth->h_proto, iph->daddr)) 222 + return TC_ACT_SHOT; 223 + } else if (eth->h_proto == htons(ETH_P_IPV6)) { 224 + struct ipv6hdr *ip6h = data + sizeof(*eth); 225 + 226 + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) 227 + return TC_ACT_OK; 228 + 229 + if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) 230 + return TC_ACT_SHOT; 231 + } 232 + 233 + return TC_ACT_OK; 234 + } 235 + 236 + char _license[] SEC("license") = "GPL";
+73
samples/bpf/tc_l2_redirect_user.c
··· 1 + /* Copyright (c) 2016 Facebook 2 + * 3 + * This program is free software; you can redistribute it and/or 4 + * modify it under the terms of version 2 of the GNU General Public 5 + * License as published by the Free Software Foundation. 6 + */ 7 + #include <linux/unistd.h> 8 + #include <linux/bpf.h> 9 + 10 + #include <stdlib.h> 11 + #include <stdio.h> 12 + #include <unistd.h> 13 + #include <string.h> 14 + #include <errno.h> 15 + 16 + #include "libbpf.h" 17 + 18 + static void usage(void) 19 + { 20 + printf("Usage: tc_l2_ipip_redirect [...]\n"); 21 + printf(" -U <file> Update an already pinned BPF array\n"); 22 + printf(" -i <ifindex> Interface index\n"); 23 + printf(" -h Display this help\n"); 24 + } 25 + 26 + int main(int argc, char **argv) 27 + { 28 + const char *pinned_file = NULL; 29 + int ifindex = -1; 30 + int array_key = 0; 31 + int array_fd = -1; 32 + int ret = -1; 33 + int opt; 34 + 35 + while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { 36 + switch (opt) { 37 + /* General args */ 38 + case 'U': 39 + pinned_file = optarg; 40 + break; 41 + case 'i': 42 + ifindex = atoi(optarg); 43 + break; 44 + default: 45 + usage(); 46 + goto out; 47 + } 48 + } 49 + 50 + if (ifindex < 0 || !pinned_file) { 51 + usage(); 52 + goto out; 53 + } 54 + 55 + array_fd = bpf_obj_get(pinned_file); 56 + if (array_fd < 0) { 57 + fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", 58 + pinned_file, strerror(errno), errno); 59 + goto out; 60 + } 61 + 62 + /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ 63 + ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0); 64 + if (ret) { 65 + perror("bpf_update_elem"); 66 + goto out; 67 + } 68 + 69 + out: 70 + if (array_fd != -1) 71 + close(array_fd); 72 + return ret; 73 + }
+75 -6
scripts/Makefile.build
··· 159 159 $(obj)/%.i: $(src)/%.c FORCE 160 160 $(call if_changed_dep,cpp_i_c) 161 161 162 - cmd_gensymtypes = \ 162 + # These mirror gensymtypes_S and co below, keep them in synch. 163 + cmd_gensymtypes_c = \ 163 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 165 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ ··· 170 169 quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ 171 170 cmd_cc_symtypes_c = \ 172 171 set -e; \ 173 - $(call cmd_gensymtypes,true,$@) >/dev/null; \ 172 + $(call cmd_gensymtypes_c,true,$@) >/dev/null; \ 174 173 test -s $@ || rm -f $@ 175 174 176 175 $(obj)/%.symtypes : $(src)/%.c FORCE ··· 199 198 # the actual value of the checksum generated by genksyms 200 199 201 200 cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< 202 - cmd_modversions = \ 201 + 202 + cmd_modversions_c = \ 203 203 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 204 - $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 204 + $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 205 205 > $(@D)/.tmp_$(@F:.o=.ver); \ 206 206 \ 207 207 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ ··· 270 268 define rule_cc_o_c 271 269 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 272 270 $(call cmd_and_fixdep,cc_o_c) \ 273 - $(cmd_modversions) \ 271 + $(cmd_modversions_c) \ 274 272 $(cmd_objtool) \ 275 273 $(call echo-cmd,record_mcount) $(cmd_record_mcount) 276 274 endef 277 275 278 276 define rule_as_o_S 279 277 $(call cmd_and_fixdep,as_o_S) \ 278 + $(cmd_modversions_S) \ 280 279 $(cmd_objtool) 281 280 endef 282 281 ··· 317 314 $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 318 315 $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 319 316 317 + # .S file exports must have their C prototypes defined in asm/asm-prototypes.h 318 + # or a file that it includes, in order to get versioned symbols. We build a 319 + # dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from 320 + # the .S file (with trailing ';'), and run genksyms on that, to extract vers. 321 + # 322 + # This is convoluted. The .S file must first be preprocessed to run guards and 323 + # expand names, then the resulting exports must be constructed into plain 324 + # EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed 325 + # to make the genksyms input. 326 + # 327 + # These mirror gensymtypes_c and co above, keep them in synch. 328 + cmd_gensymtypes_S = \ 329 + (echo "\#include <linux/kernel.h>" ; \ 330 + echo "\#include <asm/asm-prototypes.h>" ; \ 331 + $(CPP) $(a_flags) $< | \ 332 + grep "\<___EXPORT_SYMBOL\>" | \ 333 + sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ 334 + $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 335 + $(GENKSYMS) $(if $(1), -T $(2)) \ 336 + $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 337 + $(if $(KBUILD_PRESERVE),-p) \ 338 + -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 339 + 340 + quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@ 341 + cmd_cc_symtypes_S = \ 342 + set -e; \ 343 + $(call cmd_gensymtypes_S,true,$@) >/dev/null; \ 344 + test -s $@ || rm -f $@ 345 + 346 + $(obj)/%.symtypes : $(src)/%.S FORCE 347 + $(call cmd,cc_symtypes_S) 348 + 349 + 320 350 quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ 321 351 cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< 322 352 ··· 357 321 $(call if_changed_dep,cpp_s_S) 358 322 359 323 quiet_cmd_as_o_S = AS $(quiet_modtag) $@ 360 - cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 324 + 325 + ifndef CONFIG_MODVERSIONS 326 + cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 327 + 328 + else 329 + 330 + ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) 331 + 332 + ifeq ($(ASM_PROTOTYPES),) 333 + cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 334 + 335 + else 336 + 337 + # versioning matches the C process described above, with difference that 338 + # we parse asm-prototypes.h C header to get function definitions. 339 + 340 + cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $< 341 + 342 + cmd_modversions_S = \ 343 + if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 344 + $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 345 + > $(@D)/.tmp_$(@F:.o=.ver); \ 346 + \ 347 + $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ 348 + -T $(@D)/.tmp_$(@F:.o=.ver); \ 349 + rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ 350 + else \ 351 + mv -f $(@D)/.tmp_$(@F) $@; \ 352 + fi; 353 + endif 354 + endif 361 355 362 356 $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE 363 357 $(call if_changed_rule,as_o_S) ··· 496 430 497 431 $(obj)/lib-ksyms.o: $(lib-target) FORCE 498 432 $(call if_changed,export_list) 433 + 434 + targets += $(obj)/lib-ksyms.o 435 + 499 436 endif 500 437 501 438 #
+1
scripts/Makefile.extrawarn
··· 36 36 warning-2 += $(call cc-option, -Wlogical-op) 37 37 warning-2 += $(call cc-option, -Wmissing-field-initializers) 38 38 warning-2 += $(call cc-option, -Wsign-compare) 39 + warning-2 += $(call cc-option, -Wmaybe-uninitialized) 39 40 40 41 warning-3 := -Wbad-function-cast 41 42 warning-3 += -Wcast-qual
+4
scripts/Makefile.ubsan
··· 17 17 ifdef CONFIG_UBSAN_NULL 18 18 CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) 19 19 endif 20 + 21 + # -fsanitize=* options makes GCC less smart than usual and 22 + # increase number of 'maybe-uninitialized false-positives 23 + CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized) 20 24 endif
+3
scripts/bloat-o-meter
··· 8 8 # of the GNU General Public License, incorporated herein by reference. 9 9 10 10 import sys, os, re 11 + from signal import signal, SIGPIPE, SIG_DFL 12 + 13 + signal(SIGPIPE, SIG_DFL) 11 14 12 15 if len(sys.argv) != 3: 13 16 sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
+1 -1
scripts/gcc-x86_64-has-stack-protector.sh
··· 1 1 #!/bin/sh 2 2 3 - echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 3 + echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 4 4 if [ "$?" -eq "0" ] ; then 5 5 echo y 6 6 else
+4 -2
security/apparmor/domain.c
··· 621 621 /* released below */ 622 622 cred = get_current_cred(); 623 623 cxt = cred_cxt(cred); 624 - profile = aa_cred_profile(cred); 625 - previous_profile = cxt->previous; 624 + profile = aa_get_newest_profile(aa_cred_profile(cred)); 625 + previous_profile = aa_get_newest_profile(cxt->previous); 626 626 627 627 if (unconfined(profile)) { 628 628 info = "unconfined"; ··· 718 718 out: 719 719 aa_put_profile(hat); 720 720 kfree(name); 721 + aa_put_profile(profile); 722 + aa_put_profile(previous_profile); 721 723 put_cred(cred); 722 724 723 725 return error;
-2
sound/pci/hda/patch_realtek.c
··· 6907 6907 .v.pins = (const struct hda_pintbl[]) { 6908 6908 { 0x15, 0x40f000f0 }, /* disabled */ 6909 6909 { 0x16, 0x40f000f0 }, /* disabled */ 6910 - { 0x18, 0x01014011 }, /* LO */ 6911 - { 0x1a, 0x01014012 }, /* LO */ 6912 6910 { } 6913 6911 } 6914 6912 },
+2 -1
sound/pci/hda/thinkpad_helper.c
··· 13 13 static bool is_thinkpad(struct hda_codec *codec) 14 14 { 15 15 return (codec->core.subsystem_id >> 16 == 0x17aa) && 16 - (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068")); 16 + (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") || 17 + acpi_dev_found("IBM0068")); 17 18 } 18 19 19 20 static void update_tpacpi_mute_led(void *private_data, int enabled)
+1
sound/soc/qcom/lpass-platform.c
··· 75 75 data->i2s_port = cpu_dai->driver->id; 76 76 runtime->private_data = data; 77 77 78 + dma_ch = 0; 78 79 if (v->alloc_dma_channel) 79 80 dma_ch = v->alloc_dma_channel(drvdata, dir); 80 81 if (dma_ch < 0)
+2 -1
sound/usb/card.c
··· 315 315 snd_usb_endpoint_free(ep); 316 316 317 317 mutex_destroy(&chip->mutex); 318 - dev_set_drvdata(&chip->dev->dev, NULL); 318 + if (!atomic_read(&chip->shutdown)) 319 + dev_set_drvdata(&chip->dev->dev, NULL); 319 320 kfree(chip); 320 321 return 0; 321 322 }
+37 -11
tools/perf/ui/browsers/hists.c
··· 1337 1337 } 1338 1338 1339 1339 if (first) { 1340 - ui_browser__printf(&browser->b, "%c", folded_sign); 1341 - width--; 1340 + ui_browser__printf(&browser->b, "%c ", folded_sign); 1341 + width -= 2; 1342 1342 first = false; 1343 1343 } else { 1344 1344 ui_browser__printf(&browser->b, " "); ··· 1361 1361 width -= hpp.buf - s; 1362 1362 } 1363 1363 1364 - ui_browser__write_nstring(&browser->b, "", hierarchy_indent); 1365 - width -= hierarchy_indent; 1364 + if (!first) { 1365 + ui_browser__write_nstring(&browser->b, "", hierarchy_indent); 1366 + width -= hierarchy_indent; 1367 + } 1366 1368 1367 1369 if (column >= browser->b.horiz_scroll) { 1368 1370 char s[2048]; ··· 1383 1381 } 1384 1382 1385 1383 perf_hpp_list__for_each_format(entry->hpp_list, fmt) { 1386 - ui_browser__write_nstring(&browser->b, "", 2); 1384 + if (first) { 1385 + ui_browser__printf(&browser->b, "%c ", folded_sign); 1386 + first = false; 1387 + } else { 1388 + ui_browser__write_nstring(&browser->b, "", 2); 1389 + } 1390 + 1387 1391 width -= 2; 1388 1392 1389 1393 /* ··· 1563 1555 int indent = hists->nr_hpp_node - 2; 1564 1556 bool first_node, first_col; 1565 1557 1566 - ret = scnprintf(buf, size, " "); 1558 + ret = scnprintf(buf, size, " "); 1567 1559 if (advance_hpp_check(&dummy_hpp, ret)) 1568 1560 return ret; 1569 1561 1562 + first_node = true; 1570 1563 /* the first hpp_list_node is for overhead columns */ 1571 1564 fmt_node = list_first_entry(&hists->hpp_formats, 1572 1565 struct perf_hpp_list_node, list); ··· 1582 1573 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); 1583 1574 if (advance_hpp_check(&dummy_hpp, ret)) 1584 1575 break; 1576 + 1577 + first_node = false; 1585 1578 } 1586 1579 1587 - ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", 1588 - indent * HIERARCHY_INDENT, ""); 1589 - if (advance_hpp_check(&dummy_hpp, ret)) 1590 - return ret; 1580 + if (!first_node) { 1581 + ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", 1582 + indent * HIERARCHY_INDENT, ""); 1583 + if (advance_hpp_check(&dummy_hpp, ret)) 1584 + return ret; 1585 + } 1591 1586 1592 1587 first_node = true; 1593 1588 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { ··· 2089 2076 browser->b.use_navkeypressed = true; 2090 2077 browser->show_headers = symbol_conf.show_hist_headers; 2091 2078 2092 - hists__for_each_format(hists, fmt) 2079 + if (symbol_conf.report_hierarchy) { 2080 + struct perf_hpp_list_node *fmt_node; 2081 + 2082 + /* count overhead columns (in the first node) */ 2083 + fmt_node = list_first_entry(&hists->hpp_formats, 2084 + struct perf_hpp_list_node, list); 2085 + perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) 2086 + ++browser->b.columns; 2087 + 2088 + /* add a single column for whole hierarchy sort keys*/ 2093 2089 ++browser->b.columns; 2090 + } else { 2091 + hists__for_each_format(hists, fmt) 2092 + ++browser->b.columns; 2093 + } 2094 2094 2095 2095 hists__reset_column_width(hists); 2096 2096 }
+6 -6
tools/perf/util/hist.c
··· 1600 1600 if (prog) 1601 1601 ui_progress__update(prog, 1); 1602 1602 1603 + hists->nr_entries++; 1604 + if (!he->filtered) { 1605 + hists->nr_non_filtered_entries++; 1606 + hists__calc_col_len(hists, he); 1607 + } 1608 + 1603 1609 if (!he->leaf) { 1604 1610 hists__hierarchy_output_resort(hists, prog, 1605 1611 &he->hroot_in, 1606 1612 &he->hroot_out, 1607 1613 min_callchain_hits, 1608 1614 use_callchain); 1609 - hists->nr_entries++; 1610 - if (!he->filtered) { 1611 - hists->nr_non_filtered_entries++; 1612 - hists__calc_col_len(hists, he); 1613 - } 1614 - 1615 1615 continue; 1616 1616 } 1617 1617
+12 -11
tools/power/acpi/Makefile.config
··· 8 8 # as published by the Free Software Foundation; version 2 9 9 # of the License. 10 10 11 - include ../../../../scripts/Makefile.include 11 + ifeq ($(srctree),) 12 + srctree := $(patsubst %/,%,$(dir $(shell pwd))) 13 + srctree := $(patsubst %/,%,$(dir $(srctree))) 14 + #$(info Determined 'srctree' to be $(srctree)) 15 + endif 12 16 13 - OUTPUT=./ 17 + include $(srctree)/../../scripts/Makefile.include 18 + 19 + OUTPUT=$(srctree)/ 14 20 ifeq ("$(origin O)", "command line") 15 - OUTPUT := $(O)/ 21 + OUTPUT := $(O)/power/acpi/ 16 22 endif 17 - 18 - ifneq ($(OUTPUT),) 19 - # check that the output directory actually exists 20 - OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 21 - $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 22 - endif 23 + #$(info Determined 'OUTPUT' to be $(OUTPUT)) 23 24 24 25 # --- CONFIGURATION BEGIN --- 25 26 ··· 71 70 WARNINGS += $(call cc-supports,-Wstrict-prototypes) 72 71 WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) 73 72 74 - KERNEL_INCLUDE := ../../../include 75 - ACPICA_INCLUDE := ../../../drivers/acpi/acpica 73 + KERNEL_INCLUDE := $(OUTPUT)include 74 + ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica 76 75 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) 77 76 CFLAGS += $(WARNINGS) 78 77
+27 -13
tools/power/acpi/Makefile.rules
··· 8 8 # as published by the Free Software Foundation; version 2 9 9 # of the License. 10 10 11 - $(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE 12 - $(ECHO) " LD " $@ 13 - $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@ 11 + objdir := $(OUTPUT)tools/$(TOOL)/ 12 + toolobjs := $(addprefix $(objdir),$(TOOL_OBJS)) 13 + $(OUTPUT)$(TOOL): $(toolobjs) FORCE 14 + $(ECHO) " LD " $(subst $(OUTPUT),,$@) 15 + $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@ 16 + $(ECHO) " STRIP " $(subst $(OUTPUT),,$@) 14 17 $(QUIET) $(STRIPCMD) $@ 15 18 16 - $(OUTPUT)%.o: %.c 17 - $(ECHO) " CC " $@ 19 + $(KERNEL_INCLUDE): 20 + $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@) 21 + $(QUIET) mkdir -p $(KERNEL_INCLUDE) 22 + $(ECHO) " CP " $(subst $(OUTPUT),,$@) 23 + $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/ 24 + 25 + $(objdir)%.o: %.c $(KERNEL_INCLUDE) 26 + $(ECHO) " CC " $(subst $(OUTPUT),,$@) 18 27 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< 19 28 20 29 all: $(OUTPUT)$(TOOL) 21 30 clean: 22 - -find $(OUTPUT) \( -not -type d \) \ 23 - -and \( -name '*~' -o -name '*.[oas]' \) \ 24 - -type f -print \ 25 - | xargs rm -f 26 - -rm -f $(OUTPUT)$(TOOL) 31 + $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir)) 32 + $(QUIET) find $(objdir) \( -not -type d \)\ 33 + -and \( -name '*~' -o -name '*.[oas]' \)\ 34 + -type f -print | xargs rm -f 35 + $(ECHO) " RM " $(TOOL) 36 + $(QUIET) rm -f $(OUTPUT)$(TOOL) 37 + $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE)) 38 + $(QUIET) rm -rf $(KERNEL_INCLUDE) 27 39 28 40 install-tools: 29 - $(INSTALL) -d $(DESTDIR)${sbindir} 30 - $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir} 41 + $(ECHO) " INST " $(TOOL) 42 + $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir) 43 + $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir) 31 44 uninstall-tools: 32 - - rm -f $(DESTDIR)${sbindir}/$(TOOL) 45 + $(ECHO) " UNINST " $(TOOL) 46 + $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL) 33 47 34 48 install: all install-tools $(EXTRA_INSTALL) 35 49 uninstall: uninstall-tools $(EXTRA_UNINSTALL)
+1 -3
tools/power/acpi/tools/acpidbg/Makefile
··· 17 17 ../../os_specific/service_layers\ 18 18 . 19 19 CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ 20 - -I.\ 21 - -I../../../../../drivers/acpi/acpica\ 22 - -I../../../../../include 20 + -I. 23 21 LDFLAGS += -lpthread 24 22 TOOL_OBJS = \ 25 23 acpidbg.o
+7 -1
tools/power/acpi/tools/acpidbg/acpidbg.c
··· 12 12 #include <acpi/acpi.h> 13 13 14 14 /* Headers not included by include/acpi/platform/aclinux.h */ 15 + #include <unistd.h> 16 + #include <stdio.h> 17 + #include <stdlib.h> 18 + #include <string.h> 19 + #include <error.h> 15 20 #include <stdbool.h> 16 21 #include <fcntl.h> 17 22 #include <assert.h> 18 - #include <linux/circ_buf.h> 23 + #include <sys/select.h> 24 + #include "../../../../../include/linux/circ_buf.h" 19 25 20 26 #define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" 21 27 #define ACPI_AML_SEC_TICK 1
+6 -6
tools/power/acpi/tools/acpidump/Makefile
··· 19 19 ./\ 20 20 ../../common\ 21 21 ../../os_specific/service_layers 22 - CFLAGS += -DACPI_DUMP_APP -I.\ 23 - -I../../../../../drivers/acpi/acpica\ 24 - -I../../../../../include 22 + CFLAGS += -DACPI_DUMP_APP -I. 25 23 TOOL_OBJS = \ 26 24 apdump.o\ 27 25 apfiles.o\ ··· 47 49 48 50 include ../../Makefile.rules 49 51 50 - install-man: ../../man/acpidump.8 51 - $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8 52 + install-man: $(srctree)/man/acpidump.8 53 + $(ECHO) " INST " acpidump.8 54 + $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8 52 55 uninstall-man: 53 - - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 56 + $(ECHO) " UNINST " acpidump.8 57 + $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8
+2 -5
tools/power/cpupower/utils/cpufreq-set.c
··· 296 296 struct cpufreq_affected_cpus *cpus; 297 297 298 298 if (!bitmask_isbitset(cpus_chosen, cpu) || 299 - cpupower_is_cpu_online(cpu)) 299 + cpupower_is_cpu_online(cpu) != 1) 300 300 continue; 301 301 302 302 cpus = cpufreq_get_related_cpus(cpu); ··· 316 316 cpu <= bitmask_last(cpus_chosen); cpu++) { 317 317 318 318 if (!bitmask_isbitset(cpus_chosen, cpu) || 319 - cpupower_is_cpu_online(cpu)) 320 - continue; 321 - 322 - if (cpupower_is_cpu_online(cpu) != 1) 319 + cpupower_is_cpu_online(cpu) != 1) 323 320 continue; 324 321 325 322 printf(_("Setting cpu: %d\n"), cpu);
+5 -3
virt/kvm/arm/pmu.c
··· 305 305 continue; 306 306 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) 307 307 & ARMV8_PMU_EVTYPE_EVENT; 308 - if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 308 + if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) 309 309 && (enable & BIT(i))) { 310 310 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; 311 311 reg = lower_32_bits(reg); ··· 379 379 eventsel = data & ARMV8_PMU_EVTYPE_EVENT; 380 380 381 381 /* Software increment event does't need to be backed by a perf event */ 382 - if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 382 + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && 383 + select_idx != ARMV8_PMU_CYCLE_IDX) 383 384 return; 384 385 385 386 memset(&attr, 0, sizeof(struct perf_event_attr)); ··· 392 391 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; 393 392 attr.exclude_hv = 1; /* Don't count EL2 events */ 394 393 attr.exclude_host = 1; /* Don't count host events */ 395 - attr.config = eventsel; 394 + attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? 395 + ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; 396 396 397 397 counter = kvm_pmu_get_counter_value(vcpu, select_idx); 398 398 /* The initial sample period (overflow count) of an event. */
+27 -14
virt/kvm/arm/vgic/vgic-mmio.c
··· 453 453 return container_of(dev, struct vgic_io_device, dev); 454 454 } 455 455 456 - static bool check_region(const struct vgic_register_region *region, 456 + static bool check_region(const struct kvm *kvm, 457 + const struct vgic_register_region *region, 457 458 gpa_t addr, int len) 458 459 { 459 - if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) 460 - return true; 461 - if ((region->access_flags & VGIC_ACCESS_32bit) && 462 - len == sizeof(u32) && !(addr & 3)) 463 - return true; 464 - if ((region->access_flags & VGIC_ACCESS_64bit) && 465 - len == sizeof(u64) && !(addr & 7)) 466 - return true; 460 + int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 461 + 462 + switch (len) { 463 + case sizeof(u8): 464 + flags = VGIC_ACCESS_8bit; 465 + break; 466 + case sizeof(u32): 467 + flags = VGIC_ACCESS_32bit; 468 + break; 469 + case sizeof(u64): 470 + flags = VGIC_ACCESS_64bit; 471 + break; 472 + default: 473 + return false; 474 + } 475 + 476 + if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { 477 + if (!region->bits_per_irq) 478 + return true; 479 + 480 + /* Do we access a non-allocated IRQ? */ 481 + return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; 482 + } 467 483 468 484 return false; 469 485 } ··· 493 477 494 478 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 495 479 addr - iodev->base_addr); 496 - if (!region || !check_region(region, addr, len)) { 480 + if (!region || !check_region(vcpu->kvm, region, addr, len)) { 497 481 memset(val, 0, len); 498 482 return 0; 499 483 } ··· 526 510 527 511 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 528 512 addr - iodev->base_addr); 529 - if (!region) 530 - return 0; 531 - 532 - if (!check_region(region, addr, len)) 513 + if (!region || !check_region(vcpu->kvm, region, addr, len)) 533 514 return 0; 534 515 535 516 switch (iodev->iodev_type) {
+7 -7
virt/kvm/arm/vgic/vgic-mmio.h
··· 50 50 #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) 51 51 52 52 /* 53 - * (addr & mask) gives us the byte offset for the INT ID, so we want to 54 - * divide this with 'bytes per irq' to get the INT ID, which is given 55 - * by '(bits) / 8'. But we do this with fixed-point-arithmetic and 56 - * take advantage of the fact that division by a fraction equals 57 - * multiplication with the inverted fraction, and scale up both the 58 - * numerator and denominator with 8 to support at most 64 bits per IRQ: 53 + * (addr & mask) gives us the _byte_ offset for the INT ID. 54 + * We multiply this by 8 the get the _bit_ offset, then divide this by 55 + * the number of bits to learn the actual INT ID. 56 + * But instead of a division (which requires a "long long div" implementation), 57 + * we shift by the binary logarithm of <bits>. 58 + * This assumes that <bits> is a power of two. 59 59 */ 60 60 #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ 61 - 64 / (bits) / 8) 61 + 8 >> ilog2(bits)) 62 62 63 63 /* 64 64 * Some VGIC registers store per-IRQ information, with a different number
+12
virt/kvm/arm/vgic/vgic.c
··· 273 273 * no more work for us to do. 274 274 */ 275 275 spin_unlock(&irq->irq_lock); 276 + 277 + /* 278 + * We have to kick the VCPU here, because we could be 279 + * queueing an edge-triggered interrupt for which we 280 + * get no EOI maintenance interrupt. In that case, 281 + * while the IRQ is already on the VCPU's AP list, the 282 + * VCPU could have EOI'ed the original interrupt and 283 + * won't see this one until it exits for some other 284 + * reason. 285 + */ 286 + if (vcpu) 287 + kvm_vcpu_kick(vcpu); 276 288 return false; 277 289 } 278 290
+12 -1
virt/kvm/async_pf.c
··· 91 91 92 92 spin_lock(&vcpu->async_pf.lock); 93 93 list_add_tail(&apf->link, &vcpu->async_pf.done); 94 + apf->vcpu = NULL; 94 95 spin_unlock(&vcpu->async_pf.lock); 95 96 96 97 /* ··· 114 113 115 114 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) 116 115 { 116 + spin_lock(&vcpu->async_pf.lock); 117 + 117 118 /* cancel outstanding work queue item */ 118 119 while (!list_empty(&vcpu->async_pf.queue)) { 119 120 struct kvm_async_pf *work = ··· 123 120 typeof(*work), queue); 124 121 list_del(&work->queue); 125 122 123 + /* 124 + * We know it's present in vcpu->async_pf.done, do 125 + * nothing here. 126 + */ 127 + if (!work->vcpu) 128 + continue; 129 + 130 + spin_unlock(&vcpu->async_pf.lock); 126 131 #ifdef CONFIG_KVM_ASYNC_PF_SYNC 127 132 flush_work(&work->work); 128 133 #else ··· 140 129 kmem_cache_free(async_pf_cache, work); 141 130 } 142 131 #endif 132 + spin_lock(&vcpu->async_pf.lock); 143 133 } 144 134 145 - spin_lock(&vcpu->async_pf.lock); 146 135 while (!list_empty(&vcpu->async_pf.done)) { 147 136 struct kvm_async_pf *work = 148 137 list_first_entry(&vcpu->async_pf.done,