Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+3190 -2042
+6
Documentation/admin-guide/kernel-parameters.txt
··· 3523 3523 ramdisk_size= [RAM] Sizes of RAM disks in kilobytes 3524 3524 See Documentation/blockdev/ramdisk.txt. 3525 3525 3526 + random.trust_cpu={on,off} 3527 + [KNL] Enable or disable trusting the use of the 3528 + CPU's random number generator (if available) to 3529 + fully seed the kernel's CRNG. Default is controlled 3530 + by CONFIG_RANDOM_TRUST_CPU. 3531 + 3526 3532 ras=option[,option,...] [KNL] RAS-specific options 3527 3533 3528 3534 cec_disable [X86]
+1 -2
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
··· 3 3 Required properties: 4 4 - compatible : 5 5 - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc 6 - - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc 7 6 - reg : address and length of the lpi2c master registers 8 7 - interrupts : lpi2c interrupt 9 8 - clocks : lpi2c clock specifier ··· 10 11 Examples: 11 12 12 13 lpi2c7: lpi2c7@40a50000 { 13 - compatible = "fsl,imx8dv-lpi2c"; 14 + compatible = "fsl,imx7ulp-lpi2c"; 14 15 reg = <0x40A50000 0x10000>; 15 16 interrupt-parent = <&intc>; 16 17 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+1 -1
Documentation/process/changes.rst
··· 86 86 87 87 The build system, as of 4.18, requires pkg-config to check for installed 88 88 kconfig tools and to determine flags settings for use in 89 - 'make {menu,n,g,x}config'. Previously pkg-config was being used but not 89 + 'make {g,x}config'. Previously pkg-config was being used but not 90 90 verified or documented. 91 91 92 92 Flex
+5
Documentation/scsi/scsi-parameters.txt
··· 97 97 allowing boot to proceed. none ignores them, expecting 98 98 user space to do the scan. 99 99 100 + scsi_mod.use_blk_mq= 101 + [SCSI] use blk-mq I/O path by default 102 + See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig. 103 + Format: <y/n> 104 + 100 105 sim710= [SCSI,HW] 101 106 See header of drivers/scsi/sim710.c. 102 107
+1
MAINTAINERS
··· 2311 2311 F: drivers/i2c/busses/i2c-cadence.c 2312 2312 F: drivers/mmc/host/sdhci-of-arasan.c 2313 2313 F: drivers/edac/synopsys_edac.c 2314 + F: drivers/i2c/busses/i2c-xiic.c 2314 2315 2315 2316 ARM64 PORT (AARCH64 ARCHITECTURE) 2316 2317 M: Catalin Marinas <catalin.marinas@arm.com>
+1 -1
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 19 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc3 6 6 NAME = Merciless Moray 7 7 8 8 # *DOCUMENTATION*
+5 -5
arch/arc/Kconfig
··· 9 9 config ARC 10 10 def_bool y 11 11 select ARC_TIMERS 12 + select ARCH_HAS_PTE_SPECIAL 12 13 select ARCH_HAS_SYNC_DMA_FOR_CPU 13 14 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 14 15 select ARCH_HAS_SG_CHAIN ··· 29 28 select GENERIC_SMP_IDLE_THREAD 30 29 select HAVE_ARCH_KGDB 31 30 select HAVE_ARCH_TRACEHOOK 31 + select HAVE_DEBUG_STACKOVERFLOW 32 32 select HAVE_FUTEX_CMPXCHG if FUTEX 33 + select HAVE_GENERIC_DMA_COHERENT 33 34 select HAVE_IOREMAP_PROT 35 + select HAVE_KERNEL_GZIP 36 + select HAVE_KERNEL_LZMA 34 37 select HAVE_KPROBES 35 38 select HAVE_KRETPROBES 36 39 select HAVE_MEMBLOCK ··· 49 44 select OF_EARLY_FLATTREE 50 45 select OF_RESERVED_MEM 51 46 select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING 52 - select HAVE_DEBUG_STACKOVERFLOW 53 - select HAVE_GENERIC_DMA_COHERENT 54 - select HAVE_KERNEL_GZIP 55 - select HAVE_KERNEL_LZMA 56 - select ARCH_HAS_PTE_SPECIAL 57 47 58 48 config ARCH_HAS_CACHE_LINE_SIZE 59 49 def_bool y
+1 -9
arch/arc/Makefile
··· 43 43 LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h 44 44 endif 45 45 46 - upto_gcc44 := $(call cc-ifversion, -le, 0404, y) 47 - atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) 48 - 49 - cflags-$(atleast_gcc44) += -fsection-anchors 46 + cflags-y += -fsection-anchors 50 47 51 48 cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 52 49 cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape ··· 78 81 79 82 cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian 80 83 ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB 81 - 82 - # STAR 9000518362: (fixed with binutils shipping with gcc 4.8) 83 - # arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept 84 - # --build-id w/o "-marclinux". Default arc-elf32-ld is OK 85 - ldflags-$(upto_gcc44) += -marclinux 86 84 87 85 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 88 86
+26
arch/arc/boot/dts/axc003.dtsi
··· 94 94 }; 95 95 96 96 /* 97 + * Mark DMA peripherals connected via IOC port as dma-coherent. We do 98 + * it via overlay because peripherals defined in axs10x_mb.dtsi are 99 + * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so 100 + * only AXS103 board has HW-coherent DMA peripherals) 101 + * We don't need to mark pgu@17000 as dma-coherent because it uses 102 + * external DMA buffer located outside of IOC aperture. 103 + */ 104 + axs10x_mb { 105 + ethernet@0x18000 { 106 + dma-coherent; 107 + }; 108 + 109 + ehci@0x40000 { 110 + dma-coherent; 111 + }; 112 + 113 + ohci@0x60000 { 114 + dma-coherent; 115 + }; 116 + 117 + mmc@0x15000 { 118 + dma-coherent; 119 + }; 120 + }; 121 + 122 + /* 97 123 * The DW APB ICTL intc on MB is connected to CPU intc via a 98 124 * DT "invisible" DW APB GPIO block, configured to simply pass thru 99 125 * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+26
arch/arc/boot/dts/axc003_idu.dtsi
··· 101 101 }; 102 102 103 103 /* 104 + * Mark DMA peripherals connected via IOC port as dma-coherent. We do 105 + * it via overlay because peripherals defined in axs10x_mb.dtsi are 106 + * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so 107 + * only AXS103 board has HW-coherent DMA peripherals) 108 + * We don't need to mark pgu@17000 as dma-coherent because it uses 109 + * external DMA buffer located outside of IOC aperture. 110 + */ 111 + axs10x_mb { 112 + ethernet@0x18000 { 113 + dma-coherent; 114 + }; 115 + 116 + ehci@0x40000 { 117 + dma-coherent; 118 + }; 119 + 120 + ohci@0x60000 { 121 + dma-coherent; 122 + }; 123 + 124 + mmc@0x15000 { 125 + dma-coherent; 126 + }; 127 + }; 128 + 129 + /* 104 130 * This INTC is actually connected to DW APB GPIO 105 131 * which acts as a wire between MB INTC and CPU INTC. 106 132 * GPIO INTC is configured in platform init code
+6 -1
arch/arc/boot/dts/axs10x_mb.dtsi
··· 9 9 */ 10 10 11 11 / { 12 + aliases { 13 + ethernet = &gmac; 14 + }; 15 + 12 16 axs10x_mb { 13 17 compatible = "simple-bus"; 14 18 #address-cells = <1>; ··· 72 68 }; 73 69 }; 74 70 75 - ethernet@0x18000 { 71 + gmac: ethernet@0x18000 { 76 72 #interrupt-cells = <1>; 77 73 compatible = "snps,dwmac"; 78 74 reg = < 0x18000 0x2000 >; ··· 85 81 max-speed = <100>; 86 82 resets = <&creg_rst 5>; 87 83 reset-names = "stmmaceth"; 84 + mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ 88 85 }; 89 86 90 87 ehci@0x40000 {
+10 -1
arch/arc/boot/dts/hsdk.dts
··· 25 25 bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; 26 26 }; 27 27 28 + aliases { 29 + ethernet = &gmac; 30 + }; 31 + 28 32 cpus { 29 33 #address-cells = <1>; 30 34 #size-cells = <0>; ··· 167 163 #clock-cells = <0>; 168 164 }; 169 165 170 - ethernet@8000 { 166 + gmac: ethernet@8000 { 171 167 #interrupt-cells = <1>; 172 168 compatible = "snps,dwmac"; 173 169 reg = <0x8000 0x2000>; ··· 180 176 phy-handle = <&phy0>; 181 177 resets = <&cgu_rst HSDK_ETH_RESET>; 182 178 reset-names = "stmmaceth"; 179 + mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ 180 + dma-coherent; 183 181 184 182 mdio { 185 183 #address-cells = <1>; ··· 200 194 compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; 201 195 reg = <0x60000 0x100>; 202 196 interrupts = <15>; 197 + dma-coherent; 203 198 }; 204 199 205 200 ehci@40000 { 206 201 compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; 207 202 reg = <0x40000 0x100>; 208 203 interrupts = <15>; 204 + dma-coherent; 209 205 }; 210 206 211 207 mmc@a000 { ··· 220 212 clock-names = "biu", "ciu"; 221 213 interrupts = <12>; 222 214 bus-width = <4>; 215 + dma-coherent; 223 216 }; 224 217 }; 225 218
-3
arch/arc/configs/axs101_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 - # CONFIG_SWAP is not set 3 1 CONFIG_SYSVIPC=y 4 2 CONFIG_POSIX_MQUEUE=y 5 3 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 61 63 CONFIG_MOUSE_SERIAL=y 62 64 CONFIG_MOUSE_SYNAPTICS_USB=y 63 65 # CONFIG_LEGACY_PTYS is not set 64 - # CONFIG_DEVKMEM is not set 65 66 CONFIG_SERIAL_8250=y 66 67 CONFIG_SERIAL_8250_CONSOLE=y 67 68 CONFIG_SERIAL_8250_DW=y
-3
arch/arc/configs/axs103_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 - # CONFIG_SWAP is not set 3 1 CONFIG_SYSVIPC=y 4 2 CONFIG_POSIX_MQUEUE=y 5 3 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 62 64 CONFIG_MOUSE_SERIAL=y 63 65 CONFIG_MOUSE_SYNAPTICS_USB=y 64 66 # CONFIG_LEGACY_PTYS is not set 65 - # CONFIG_DEVKMEM is not set 66 67 CONFIG_SERIAL_8250=y 67 68 CONFIG_SERIAL_8250_CONSOLE=y 68 69 CONFIG_SERIAL_8250_DW=y
-3
arch/arc/configs/axs103_smp_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 - # CONFIG_SWAP is not set 3 1 CONFIG_SYSVIPC=y 4 2 CONFIG_POSIX_MQUEUE=y 5 3 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 63 65 CONFIG_MOUSE_SERIAL=y 64 66 CONFIG_MOUSE_SYNAPTICS_USB=y 65 67 # CONFIG_LEGACY_PTYS is not set 66 - # CONFIG_DEVKMEM is not set 67 68 CONFIG_SERIAL_8250=y 68 69 CONFIG_SERIAL_8250_CONSOLE=y 69 70 CONFIG_SERIAL_8250_DW=y
-2
arch/arc/configs/haps_hs_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 1 # CONFIG_SWAP is not set 3 2 CONFIG_SYSVIPC=y 4 3 CONFIG_POSIX_MQUEUE=y ··· 56 57 # CONFIG_SERIO_SERPORT is not set 57 58 CONFIG_SERIO_ARC_PS2=y 58 59 # CONFIG_LEGACY_PTYS is not set 59 - # CONFIG_DEVKMEM is not set 60 60 CONFIG_SERIAL_8250=y 61 61 CONFIG_SERIAL_8250_CONSOLE=y 62 62 CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/haps_hs_smp_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 1 # CONFIG_SWAP is not set 3 2 CONFIG_SYSVIPC=y 4 3 CONFIG_POSIX_MQUEUE=y ··· 59 60 # CONFIG_SERIO_SERPORT is not set 60 61 CONFIG_SERIO_ARC_PS2=y 61 62 # CONFIG_LEGACY_PTYS is not set 62 - # CONFIG_DEVKMEM is not set 63 63 CONFIG_SERIAL_8250=y 64 64 CONFIG_SERIAL_8250_CONSOLE=y 65 65 CONFIG_SERIAL_8250_NR_UARTS=1
-1
arch/arc/configs/hsdk_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 1 CONFIG_SYSVIPC=y 3 2 # CONFIG_CROSS_MEMORY_ATTACH is not set 4 3 CONFIG_NO_HZ_IDLE=y
-1
arch/arc/configs/nps_defconfig
··· 59 59 # CONFIG_INPUT_MOUSE is not set 60 60 # CONFIG_SERIO is not set 61 61 # CONFIG_LEGACY_PTYS is not set 62 - # CONFIG_DEVKMEM is not set 63 62 CONFIG_SERIAL_8250=y 64 63 CONFIG_SERIAL_8250_CONSOLE=y 65 64 CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsim_700_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_SWAP is not set 4 3 CONFIG_SYSVIPC=y 5 4 CONFIG_POSIX_MQUEUE=y ··· 43 44 # CONFIG_INPUT_MOUSE is not set 44 45 # CONFIG_SERIO is not set 45 46 # CONFIG_LEGACY_PTYS is not set 46 - # CONFIG_DEVKMEM is not set 47 47 CONFIG_SERIAL_ARC=y 48 48 CONFIG_SERIAL_ARC_CONSOLE=y 49 49 # CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsim_hs_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_SWAP is not set 4 3 CONFIG_SYSVIPC=y 5 4 CONFIG_POSIX_MQUEUE=y ··· 44 45 # CONFIG_INPUT_MOUSE is not set 45 46 # CONFIG_SERIO is not set 46 47 # CONFIG_LEGACY_PTYS is not set 47 - # CONFIG_DEVKMEM is not set 48 48 CONFIG_SERIAL_ARC=y 49 49 CONFIG_SERIAL_ARC_CONSOLE=y 50 50 # CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsim_hs_smp_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_SWAP is not set 4 3 # CONFIG_CROSS_MEMORY_ATTACH is not set 5 4 CONFIG_HIGH_RES_TIMERS=y ··· 43 44 # CONFIG_INPUT_MOUSE is not set 44 45 # CONFIG_SERIO is not set 45 46 # CONFIG_LEGACY_PTYS is not set 46 - # CONFIG_DEVKMEM is not set 47 47 CONFIG_SERIAL_ARC=y 48 48 CONFIG_SERIAL_ARC_CONSOLE=y 49 49 # CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsimosci_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_SWAP is not set 4 3 CONFIG_SYSVIPC=y 5 4 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 47 48 # CONFIG_SERIO_SERPORT is not set 48 49 CONFIG_SERIO_ARC_PS2=y 49 50 # CONFIG_LEGACY_PTYS is not set 50 - # CONFIG_DEVKMEM is not set 51 51 CONFIG_SERIAL_8250=y 52 52 CONFIG_SERIAL_8250_CONSOLE=y 53 53 CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsimosci_hs_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_SWAP is not set 4 3 CONFIG_SYSVIPC=y 5 4 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 46 47 # CONFIG_SERIO_SERPORT is not set 47 48 CONFIG_SERIO_ARC_PS2=y 48 49 # CONFIG_LEGACY_PTYS is not set 49 - # CONFIG_DEVKMEM is not set 50 50 CONFIG_SERIAL_8250=y 51 51 CONFIG_SERIAL_8250_CONSOLE=y 52 52 CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsimosci_hs_smp_defconfig
··· 1 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2 1 # CONFIG_SWAP is not set 3 2 CONFIG_SYSVIPC=y 4 3 # CONFIG_CROSS_MEMORY_ATTACH is not set ··· 57 58 # CONFIG_SERIO_SERPORT is not set 58 59 CONFIG_SERIO_ARC_PS2=y 59 60 # CONFIG_LEGACY_PTYS is not set 60 - # CONFIG_DEVKMEM is not set 61 61 CONFIG_SERIAL_8250=y 62 62 CONFIG_SERIAL_8250_CONSOLE=y 63 63 CONFIG_SERIAL_8250_NR_UARTS=1
-1
arch/arc/configs/tb10x_defconfig
··· 57 57 # CONFIG_SERIO is not set 58 58 # CONFIG_VT is not set 59 59 # CONFIG_LEGACY_PTYS is not set 60 - # CONFIG_DEVKMEM is not set 61 60 CONFIG_SERIAL_8250=y 62 61 CONFIG_SERIAL_8250_CONSOLE=y 63 62 CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/vdk_hs38_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_CROSS_MEMORY_ATTACH is not set 4 3 CONFIG_HIGH_RES_TIMERS=y 5 4 CONFIG_IKCONFIG=y ··· 52 53 CONFIG_MOUSE_PS2_TOUCHKIT=y 53 54 CONFIG_SERIO_ARC_PS2=y 54 55 # CONFIG_LEGACY_PTYS is not set 55 - # CONFIG_DEVKMEM is not set 56 56 CONFIG_SERIAL_8250=y 57 57 CONFIG_SERIAL_8250_CONSOLE=y 58 58 CONFIG_SERIAL_8250_DW=y
-1
arch/arc/configs/vdk_hs38_smp_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 - CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3 2 # CONFIG_CROSS_MEMORY_ATTACH is not set 4 3 CONFIG_HIGH_RES_TIMERS=y 5 4 CONFIG_IKCONFIG=y
+1 -1
arch/arc/include/asm/atomic.h
··· 84 84 "1: llock %[orig], [%[ctr]] \n" \ 85 85 " " #asm_op " %[val], %[orig], %[i] \n" \ 86 86 " scond %[val], [%[ctr]] \n" \ 87 - " \n" \ 87 + " bnz 1b \n" \ 88 88 : [val] "=&r" (val), \ 89 89 [orig] "=&r" (orig) \ 90 90 : [ctr] "r" (&v->counter), \
+13
arch/arc/include/asm/dma-mapping.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // (C) 2018 Synopsys, Inc. (www.synopsys.com) 3 + 4 + #ifndef ASM_ARC_DMA_MAPPING_H 5 + #define ASM_ARC_DMA_MAPPING_H 6 + 7 + #include <asm-generic/dma-mapping.h> 8 + 9 + void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 10 + const struct iommu_ops *iommu, bool coherent); 11 + #define arch_setup_dma_ops arch_setup_dma_ops 12 + 13 + #endif
+4 -9
arch/arc/kernel/troubleshoot.c
··· 83 83 static void show_faulting_vma(unsigned long address, char *buf) 84 84 { 85 85 struct vm_area_struct *vma; 86 - struct inode *inode; 87 - unsigned long ino = 0; 88 - dev_t dev = 0; 89 86 char *nm = buf; 90 87 struct mm_struct *active_mm = current->active_mm; 91 88 ··· 96 99 * if the container VMA is not found 97 100 */ 98 101 if (vma && (vma->vm_start <= address)) { 99 - struct file *file = vma->vm_file; 100 - if (file) { 101 - nm = file_path(file, buf, PAGE_SIZE - 1); 102 - inode = file_inode(vma->vm_file); 103 - dev = inode->i_sb->s_dev; 104 - ino = inode->i_ino; 102 + if (vma->vm_file) { 103 + nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); 104 + if (IS_ERR(nm)) 105 + nm = "?"; 105 106 } 106 107 pr_info(" @off 0x%lx in [%s]\n" 107 108 " VMA: 0x%08lx to 0x%08lx\n",
+21 -15
arch/arc/mm/cache.c
··· 65 65 66 66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", 67 67 perip_base, 68 - IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); 68 + IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); 69 69 70 70 return buf; 71 71 } ··· 897 897 } 898 898 899 899 /* 900 - * DMA ops for systems with IOC 901 - * IOC hardware snoops all DMA traffic keeping the caches consistent with 902 - * memory - eliding need for any explicit cache maintenance of DMA buffers 903 - */ 904 - static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} 905 - static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} 906 - static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} 907 - 908 - /* 909 900 * Exported DMA API 910 901 */ 911 902 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) ··· 1144 1153 { 1145 1154 unsigned int ioc_base, mem_sz; 1146 1155 1156 + /* 1157 + * As for today we don't support both IOC and ZONE_HIGHMEM enabled 1158 + * simultaneously. This happens because as of today IOC aperture covers 1159 + * only ZONE_NORMAL (low mem) and any dma transactions outside this 1160 + * region won't be HW coherent. 1161 + * If we want to use both IOC and ZONE_HIGHMEM we can use 1162 + * bounce_buffer to handle dma transactions to HIGHMEM. 1163 + * Also it is possible to modify dma_direct cache ops or increase IOC 1164 + * aperture size if we are planning to use HIGHMEM without PAE. 1165 + */ 1166 + if (IS_ENABLED(CONFIG_HIGHMEM)) 1167 + panic("IOC and HIGHMEM can't be used simultaneously"); 1168 + 1147 1169 /* Flush + invalidate + disable L1 dcache */ 1148 1170 __dc_disable(); 1149 1171 ··· 1268 1264 if (is_isa_arcv2() && ioc_enable) 1269 1265 arc_ioc_setup(); 1270 1266 1271 - if (is_isa_arcv2() && ioc_enable) { 1272 - __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 1273 - __dma_cache_inv = __dma_cache_inv_ioc; 1274 - __dma_cache_wback = __dma_cache_wback_ioc; 1275 - } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { 1267 + if (is_isa_arcv2() && l2_line_sz && slc_enable) { 1276 1268 __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 1277 1269 __dma_cache_inv = __dma_cache_inv_slc; 1278 1270 __dma_cache_wback = __dma_cache_wback_slc; ··· 1277 1277 __dma_cache_inv = __dma_cache_inv_l1; 1278 1278 __dma_cache_wback = __dma_cache_wback_l1; 1279 1279 } 1280 + /* 1281 + * In case of IOC (say IOC+SLC case), pointers above could still be set 1282 + * but end up not being relevant as the first function in chain is not 1283 + * called at all for @dma_direct_ops 1284 + * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() 1285 + */ 1280 1286 } 1281 1287 1282 1288 void __ref arc_cache_init(void)
+41 -41
arch/arc/mm/dma.c
··· 6 6 * published by the Free Software Foundation. 7 7 */ 8 8 9 - /* 10 - * DMA Coherent API Notes 11 - * 12 - * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is 13 - * implemented by accessing it using a kernel virtual address, with 14 - * Cache bit off in the TLB entry. 15 - * 16 - * The default DMA address == Phy address which is 0x8000_0000 based. 17 - */ 18 - 19 9 #include <linux/dma-noncoherent.h> 20 10 #include <asm/cache.h> 21 11 #include <asm/cacheflush.h> 22 12 13 + /* 14 + * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c) 15 + * - hardware IOC not available (or "dma-coherent" not set for device in DT) 16 + * - But still handle both coherent and non-coherent requests from caller 17 + * 18 + * For DMA coherent hardware (IOC) generic code suffices 19 + */ 23 20 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 24 21 gfp_t gfp, unsigned long attrs) 25 22 { ··· 24 27 struct page *page; 25 28 phys_addr_t paddr; 26 29 void *kvaddr; 27 - int need_coh = 1, need_kvaddr = 0; 30 + bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); 31 + 32 + /* 33 + * __GFP_HIGHMEM flag is cleared by upper layer functions 34 + * (in include/linux/dma-mapping.h) so we should never get a 35 + * __GFP_HIGHMEM here. 36 + */ 37 + BUG_ON(gfp & __GFP_HIGHMEM); 28 38 29 39 page = alloc_pages(gfp, order); 30 40 if (!page) 31 41 return NULL; 32 - 33 - /* 34 - * IOC relies on all data (even coherent DMA data) being in cache 35 - * Thus allocate normal cached memory 36 - * 37 - * The gains with IOC are two pronged: 38 - * -For streaming data, elides need for cache maintenance, saving 39 - * cycles in flush code, and bus bandwidth as all the lines of a 40 - * buffer need to be flushed out to memory 41 - * -For coherent data, Read/Write to buffers terminate early in cache 42 - * (vs. always going to memory - thus are faster) 43 - */ 44 - if ((is_isa_arcv2() && ioc_enable) || 45 - (attrs & DMA_ATTR_NON_CONSISTENT)) 46 - need_coh = 0; 47 - 48 - /* 49 - * - A coherent buffer needs MMU mapping to enforce non-cachability 50 - * - A highmem page needs a virtual handle (hence MMU mapping) 51 - * independent of cachability 52 - */ 53 - if (PageHighMem(page) || need_coh) 54 - need_kvaddr = 1; 55 42 56 43 /* This is linear addr (0x8000_0000 based) */ 57 44 paddr = page_to_phys(page); 58 45 59 46 *dma_handle = paddr; 60 47 61 - /* This is kernel Virtual address (0x7000_0000 based) */ 62 - if (need_kvaddr) { 48 + /* 49 + * A coherent buffer needs MMU mapping to enforce non-cachability. 50 + * kvaddr is kernel Virtual address (0x7000_0000 based). 51 + */ 52 + if (need_coh) { 63 53 kvaddr = ioremap_nocache(paddr, size); 64 54 if (kvaddr == NULL) { 65 55 __free_pages(page, order); ··· 77 93 { 78 94 phys_addr_t paddr = dma_handle; 79 95 struct page *page = virt_to_page(paddr); 80 - int is_non_coh = 1; 81 96 82 - is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 83 - (is_isa_arcv2() && ioc_enable); 84 - 85 - if (PageHighMem(page) || !is_non_coh) 97 + if (!(attrs & DMA_ATTR_NON_CONSISTENT)) 86 98 iounmap((void __force __iomem *)vaddr); 87 99 88 100 __free_pages(page, get_order(size)); ··· 163 183 164 184 default: 165 185 break; 186 + } 187 + } 188 + 189 + /* 190 + * Plug in coherent or noncoherent dma ops 191 + */ 192 + void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 193 + const struct iommu_ops *iommu, bool coherent) 194 + { 195 + /* 196 + * IOC hardware snoops all DMA traffic keeping the caches consistent 197 + * with memory - eliding need for any explicit cache maintenance of 198 + * DMA buffers - so we can use dma_direct cache ops. 199 + */ 200 + if (is_isa_arcv2() && ioc_enable && coherent) { 201 + set_dma_ops(dev, &dma_direct_ops); 202 + dev_info(dev, "use dma_direct_ops cache ops\n"); 203 + } else { 204 + set_dma_ops(dev, &dma_noncoherent_ops); 205 + dev_info(dev, "use dma_noncoherent_ops cache ops\n"); 166 206 } 167 207 }
-1
arch/arm/include/asm/kvm_host.h
··· 223 223 struct kvm_vcpu_events *events); 224 224 225 225 #define KVM_ARCH_WANT_MMU_NOTIFIER 226 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 227 226 int kvm_unmap_hva_range(struct kvm *kvm, 228 227 unsigned long start, unsigned long end); 229 228 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+2
arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
··· 46 46 pinctrl-0 = <&mmc0_pins>; 47 47 vmmc-supply = <&reg_cldo1>; 48 48 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; 49 + bus-width = <4>; 49 50 status = "okay"; 50 51 }; 51 52 ··· 57 56 vqmmc-supply = <&reg_bldo2>; 58 57 non-removable; 59 58 cap-mmc-hw-reset; 59 + bus-width = <8>; 60 60 status = "okay"; 61 61 }; 62 62
+1 -3
arch/arm64/include/asm/kvm_host.h
··· 61 61 u64 vmid_gen; 62 62 u32 vmid; 63 63 64 - /* 1-level 2nd stage table and lock */ 65 - spinlock_t pgd_lock; 64 + /* 1-level 2nd stage table, protected by kvm->mmu_lock */ 66 65 pgd_t *pgd; 67 66 68 67 /* VTTBR value associated with above pgd and vmid */ ··· 356 357 struct kvm_vcpu_events *events); 357 358 358 359 #define KVM_ARCH_WANT_MMU_NOTIFIER 359 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 360 360 int kvm_unmap_hva_range(struct kvm *kvm, 361 361 unsigned long start, unsigned long end); 362 362 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+6 -3
arch/arm64/kvm/hyp/switch.c
··· 98 98 val = read_sysreg(cpacr_el1); 99 99 val |= CPACR_EL1_TTA; 100 100 val &= ~CPACR_EL1_ZEN; 101 - if (!update_fp_enabled(vcpu)) 101 + if (!update_fp_enabled(vcpu)) { 102 102 val &= ~CPACR_EL1_FPEN; 103 + __activate_traps_fpsimd32(vcpu); 104 + } 103 105 104 106 write_sysreg(val, cpacr_el1); 105 107 ··· 116 114 117 115 val = CPTR_EL2_DEFAULT; 118 116 val |= CPTR_EL2_TTA | CPTR_EL2_TZ; 119 - if (!update_fp_enabled(vcpu)) 117 + if (!update_fp_enabled(vcpu)) { 120 118 val |= CPTR_EL2_TFP; 119 + __activate_traps_fpsimd32(vcpu); 120 + } 121 121 122 122 write_sysreg(val, cptr_el2); 123 123 } ··· 133 129 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 134 130 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 135 131 136 - __activate_traps_fpsimd32(vcpu); 137 132 if (has_vhe()) 138 133 activate_traps_vhe(vcpu); 139 134 else
+6 -4
arch/arm64/mm/mmu.c
··· 985 985 986 986 pmd = READ_ONCE(*pmdp); 987 987 988 - /* No-op for empty entry and WARN_ON for valid entry */ 989 - if (!pmd_present(pmd) || !pmd_table(pmd)) { 988 + if (!pmd_present(pmd)) 989 + return 1; 990 + if (!pmd_table(pmd)) { 990 991 VM_WARN_ON(!pmd_table(pmd)); 991 992 return 1; 992 993 } ··· 1008 1007 1009 1008 pud = READ_ONCE(*pudp); 1010 1009 1011 - /* No-op for empty entry and WARN_ON for valid entry */ 1012 - if (!pud_present(pud) || !pud_table(pud)) { 1010 + if (!pud_present(pud)) 1011 + return 1; 1012 + if (!pud_table(pud)) { 1013 1013 VM_WARN_ON(!pud_table(pud)); 1014 1014 return 1; 1015 1015 }
+1 -1
arch/m68k/mm/mcfmmu.c
··· 172 172 high_memory = (void *)_ramend; 173 173 174 174 /* Reserve kernel text/data/bss */ 175 - memblock_reserve(memstart, memstart - _rambase); 175 + memblock_reserve(_rambase, memstart - _rambase); 176 176 177 177 m68k_virt_to_node_shift = fls(_ramend - 1) - 6; 178 178 module_fixup(NULL, __start_fixup, __stop_fixup);
-1
arch/mips/include/asm/kvm_host.h
··· 931 931 bool write); 932 932 933 933 #define KVM_ARCH_WANT_MMU_NOTIFIER 934 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 935 934 int kvm_unmap_hva_range(struct kvm *kvm, 936 935 unsigned long start, unsigned long end); 937 936 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+1
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
··· 40 40 int desc; /* the current descriptor */ 41 41 struct ltq_dma_desc *desc_base; /* the descriptor base */ 42 42 int phys; /* physical addr */ 43 + struct device *dev; 43 44 }; 44 45 45 46 enum {
+20
arch/mips/kernel/vdso.c
··· 13 13 #include <linux/err.h> 14 14 #include <linux/init.h> 15 15 #include <linux/ioport.h> 16 + #include <linux/kernel.h> 16 17 #include <linux/mm.h> 17 18 #include <linux/sched.h> 18 19 #include <linux/slab.h> ··· 21 20 22 21 #include <asm/abi.h> 23 22 #include <asm/mips-cps.h> 23 + #include <asm/page.h> 24 24 #include <asm/vdso.h> 25 25 26 26 /* Kernel-provided data used by the VDSO. */ ··· 130 128 vvar_size = gic_size + PAGE_SIZE; 131 129 size = vvar_size + image->size; 132 130 131 + /* 132 + * Find a region that's large enough for us to perform the 133 + * colour-matching alignment below. 134 + */ 135 + if (cpu_has_dc_aliases) 136 + size += shm_align_mask + 1; 137 + 133 138 base = get_unmapped_area(NULL, 0, size, 0, 0); 134 139 if (IS_ERR_VALUE(base)) { 135 140 ret = base; 136 141 goto out; 142 + } 143 + 144 + /* 145 + * If we suffer from dcache aliasing, ensure that the VDSO data page 146 + * mapping is coloured the same as the kernel's mapping of that memory. 147 + * This ensures that when the kernel updates the VDSO data userland 148 + * will observe it without requiring cache invalidations. 149 + */ 150 + if (cpu_has_dc_aliases) { 151 + base = __ALIGN_MASK(base, shm_align_mask); 152 + base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; 137 153 } 138 154 139 155 data_addr = base + gic_size;
-10
arch/mips/kvm/mmu.c
··· 512 512 return 1; 513 513 } 514 514 515 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 516 - { 517 - unsigned long end = hva + PAGE_SIZE; 518 - 519 - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); 520 - 521 - kvm_mips_callbacks->flush_shadow_all(kvm); 522 - return 0; 523 - } 524 - 525 515 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 526 516 { 527 517 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+2 -2
arch/mips/lantiq/xway/dma.c
··· 130 130 unsigned long flags; 131 131 132 132 ch->desc = 0; 133 - ch->desc_base = dma_zalloc_coherent(NULL, 133 + ch->desc_base = dma_zalloc_coherent(ch->dev, 134 134 LTQ_DESC_NUM * LTQ_DESC_SIZE, 135 135 &ch->phys, GFP_ATOMIC); 136 136 ··· 182 182 if (!ch->desc_base) 183 183 return; 184 184 ltq_dma_close(ch); 185 - dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, 185 + dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE, 186 186 ch->desc_base, ch->phys); 187 187 } 188 188 EXPORT_SYMBOL_GPL(ltq_dma_free);
+4
arch/nds32/Kconfig
··· 40 40 select NO_IOPORT_MAP 41 41 select RTC_LIB 42 42 select THREAD_INFO_IN_TASK 43 + select HAVE_FUNCTION_TRACER 44 + select HAVE_FUNCTION_GRAPH_TRACER 45 + select HAVE_FTRACE_MCOUNT_RECORD 46 + select HAVE_DYNAMIC_FTRACE 43 47 help 44 48 Andes(nds32) Linux support. 45 49
+4
arch/nds32/Makefile
··· 5 5 6 6 comma = , 7 7 8 + ifdef CONFIG_FUNCTION_TRACER 9 + arch-y += -malways-save-lp -mno-relax 10 + endif 11 + 8 12 KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) 9 13 KBUILD_CFLAGS += -mcmodel=large 10 14
+2 -2
arch/nds32/include/asm/elf.h
··· 121 121 */ 122 122 #define ELF_CLASS ELFCLASS32 123 123 #ifdef __NDS32_EB__ 124 - #define ELF_DATA ELFDATA2MSB; 124 + #define ELF_DATA ELFDATA2MSB 125 125 #else 126 - #define ELF_DATA ELFDATA2LSB; 126 + #define ELF_DATA ELFDATA2LSB 127 127 #endif 128 128 #define ELF_ARCH EM_NDS32 129 129 #define USE_ELF_CORE_DUMP
+46
arch/nds32/include/asm/ftrace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_NDS32_FTRACE_H 4 + #define __ASM_NDS32_FTRACE_H 5 + 6 + #ifdef CONFIG_FUNCTION_TRACER 7 + 8 + #define HAVE_FUNCTION_GRAPH_FP_TEST 9 + 10 + #define MCOUNT_ADDR ((unsigned long)(_mcount)) 11 + /* mcount call is composed of three instructions: 12 + * sethi + ori + jral 13 + */ 14 + #define MCOUNT_INSN_SIZE 12 15 + 16 + extern void _mcount(unsigned long parent_ip); 17 + 18 + #ifdef CONFIG_DYNAMIC_FTRACE 19 + 20 + #define FTRACE_ADDR ((unsigned long)_ftrace_caller) 21 + 22 + #ifdef __NDS32_EL__ 23 + #define INSN_NOP 0x09000040 24 + #define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2) 25 + #define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046) 26 + #define ENDIAN_CONVERT(insn) be32_to_cpu(insn) 27 + #else /* __NDS32_EB__ */ 28 + #define INSN_NOP 0x40000009 29 + #define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2) 30 + #define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000) 31 + #define ENDIAN_CONVERT(insn) (insn) 32 + #endif 33 + 34 + extern void _ftrace_caller(unsigned long parent_ip); 35 + static inline unsigned long ftrace_call_adjust(unsigned long addr) 36 + { 37 + return addr; 38 + } 39 + struct dyn_arch_ftrace { 40 + }; 41 + 42 + #endif /* CONFIG_DYNAMIC_FTRACE */ 43 + 44 + #endif /* CONFIG_FUNCTION_TRACER */ 45 + 46 + #endif /* __ASM_NDS32_FTRACE_H */
+1
arch/nds32/include/asm/nds32.h
··· 17 17 #else 18 18 #define FP_OFFSET (-2) 19 19 #endif 20 + #define LP_OFFSET (-1) 20 21 21 22 extern void __init early_trap_init(void); 22 23 static inline void GIE_ENABLE(void)
+119 -110
arch/nds32/include/asm/uaccess.h
··· 38 38 extern int fixup_exception(struct pt_regs *regs); 39 39 40 40 #define KERNEL_DS ((mm_segment_t) { ~0UL }) 41 - #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) 41 + #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) 42 42 43 43 #define get_ds() (KERNEL_DS) 44 44 #define get_fs() (current_thread_info()->addr_limit) ··· 49 49 current_thread_info()->addr_limit = fs; 50 50 } 51 51 52 - #define segment_eq(a, b) ((a) == (b)) 52 + #define segment_eq(a, b) ((a) == (b)) 53 53 54 54 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) 55 55 56 - #define access_ok(type, addr, size) \ 56 + #define access_ok(type, addr, size) \ 57 57 __range_ok((unsigned long)addr, (unsigned long)size) 58 58 /* 59 59 * Single-value transfer routines. They automatically use the right ··· 75 75 * versions are void (ie, don't return a value as such). 76 76 */ 77 77 78 - #define get_user(x,p) \ 79 - ({ \ 80 - long __e = -EFAULT; \ 81 - if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \ 82 - __e = __get_user(x,p); \ 83 - } else \ 84 - x = 0; \ 85 - __e; \ 86 - }) 87 - #define __get_user(x,ptr) \ 78 + #define get_user __get_user \ 79 + 80 + #define __get_user(x, ptr) \ 88 81 ({ \ 89 82 long __gu_err = 0; \ 90 - __get_user_err((x),(ptr),__gu_err); \ 83 + __get_user_check((x), (ptr), __gu_err); \ 91 84 __gu_err; \ 92 85 }) 93 86 94 - #define __get_user_error(x,ptr,err) \ 87 + #define __get_user_error(x, ptr, err) \ 95 88 ({ \ 96 - __get_user_err((x),(ptr),err); \ 97 - (void) 0; \ 89 + __get_user_check((x), (ptr), (err)); \ 90 + (void)0; \ 98 91 }) 99 92 100 - #define __get_user_err(x,ptr,err) \ 93 + #define __get_user_check(x, ptr, err) \ 94 + ({ \ 95 + const __typeof__(*(ptr)) __user *__p = (ptr); \ 96 + might_fault(); \ 97 + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 98 + __get_user_err((x), __p, (err)); \ 99 + } else { \ 100 + (x) = 0; (err) = -EFAULT; \ 101 + } \ 102 + }) 103 + 104 + #define __get_user_err(x, ptr, err) \ 101 105 do { \ 102 - unsigned long __gu_addr = (unsigned long)(ptr); \ 103 106 unsigned long __gu_val; \ 104 107 __chk_user_ptr(ptr); \ 105 108 switch (sizeof(*(ptr))) { \ 106 109 case 1: \ 107 - __get_user_asm("lbi",__gu_val,__gu_addr,err); \ 110 + __get_user_asm("lbi", __gu_val, (ptr), (err)); \ 108 111 break; \ 109 112 case 2: \ 110 - __get_user_asm("lhi",__gu_val,__gu_addr,err); \ 113 + __get_user_asm("lhi", __gu_val, (ptr), (err)); \ 111 114 break; \ 112 115 case 4: \ 113 - __get_user_asm("lwi",__gu_val,__gu_addr,err); \ 116 + __get_user_asm("lwi", __gu_val, (ptr), (err)); \ 114 117 break; \ 115 118 case 8: \ 116 - __get_user_asm_dword(__gu_val,__gu_addr,err); \ 119 + __get_user_asm_dword(__gu_val, (ptr), (err)); \ 117 120 break; \ 118 121 default: \ 119 122 BUILD_BUG(); \ 120 123 break; \ 121 124 } \ 122 - (x) = (__typeof__(*(ptr)))__gu_val; \ 125 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 123 126 } while (0) 124 127 125 - #define __get_user_asm(inst,x,addr,err) \ 126 - asm volatile( \ 127 - "1: "inst" %1,[%2]\n" \ 128 - "2:\n" \ 129 - " .section .fixup,\"ax\"\n" \ 130 - " .align 2\n" \ 131 - "3: move %0, %3\n" \ 132 - " move %1, #0\n" \ 133 - " b 2b\n" \ 134 - " .previous\n" \ 135 - " .section __ex_table,\"a\"\n" \ 136 - " .align 3\n" \ 137 - " .long 1b, 3b\n" \ 138 - " .previous" \ 139 - : "+r" (err), "=&r" (x) \ 140 - : "r" (addr), "i" (-EFAULT) \ 141 - : "cc") 128 + #define __get_user_asm(inst, x, addr, err) \ 129 + __asm__ __volatile__ ( \ 130 + "1: "inst" %1,[%2]\n" \ 131 + "2:\n" \ 132 + " .section .fixup,\"ax\"\n" \ 133 + " .align 2\n" \ 134 + "3: move %0, %3\n" \ 135 + " move %1, #0\n" \ 136 + " b 2b\n" \ 137 + " .previous\n" \ 138 + " .section __ex_table,\"a\"\n" \ 139 + " .align 3\n" \ 140 + " .long 1b, 3b\n" \ 141 + " .previous" \ 142 + : "+r" (err), "=&r" (x) \ 143 + : "r" (addr), "i" (-EFAULT) \ 144 + : "cc") 142 145 143 146 #ifdef __NDS32_EB__ 144 147 #define __gu_reg_oper0 "%H1" ··· 152 149 #endif 153 150 154 151 #define __get_user_asm_dword(x, addr, err) \ 155 - asm volatile( \ 156 - "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ 157 - "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ 158 - "3:\n" \ 159 - " .section .fixup,\"ax\"\n" \ 160 - " .align 2\n" \ 161 - "4: move %0, %3\n" \ 162 - " b 3b\n" \ 163 - " .previous\n" \ 164 - " .section __ex_table,\"a\"\n" \ 165 - " .align 3\n" \ 166 - " .long 1b, 4b\n" \ 167 - " .long 2b, 4b\n" \ 168 - " .previous" \ 169 - : "+r"(err), "=&r"(x) \ 170 - : "r"(addr), "i"(-EFAULT) \ 171 - : "cc") 172 - #define put_user(x,p) \ 173 - ({ \ 174 - long __e = -EFAULT; \ 175 - if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ 176 - __e = __put_user(x,p); \ 177 - } \ 178 - __e; \ 179 - }) 180 - #define __put_user(x,ptr) \ 152 + __asm__ __volatile__ ( \ 153 + "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ 154 + "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ 155 + "3:\n" \ 156 + " .section .fixup,\"ax\"\n" \ 157 + " .align 2\n" \ 158 + "4: move %0, %3\n" \ 159 + " b 3b\n" \ 160 + " .previous\n" \ 161 + " .section __ex_table,\"a\"\n" \ 162 + " .align 3\n" \ 163 + " .long 1b, 4b\n" \ 164 + " .long 2b, 4b\n" \ 165 + " .previous" \ 166 + : "+r"(err), "=&r"(x) \ 167 + : "r"(addr), "i"(-EFAULT) \ 168 + : "cc") 169 + 170 + #define put_user __put_user \ 171 + 172 + #define __put_user(x, ptr) \ 181 173 ({ \ 182 174 long __pu_err = 0; \ 183 - __put_user_err((x),(ptr),__pu_err); \ 175 + __put_user_err((x), (ptr), __pu_err); \ 184 176 __pu_err; \ 185 177 }) 186 178 187 - #define __put_user_error(x,ptr,err) \ 179 + #define __put_user_error(x, ptr, err) \ 188 180 ({ \ 189 - __put_user_err((x),(ptr),err); \ 190 - (void) 0; \ 181 + __put_user_err((x), (ptr), (err)); \ 182 + (void)0; \ 191 183 }) 192 184 193 - #define __put_user_err(x,ptr,err) \ 185 + #define __put_user_check(x, ptr, err) \ 186 + ({ \ 187 + __typeof__(*(ptr)) __user *__p = (ptr); \ 188 + might_fault(); \ 189 + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 190 + __put_user_err((x), __p, (err)); \ 191 + } else { \ 192 + (err) = -EFAULT; \ 193 + } \ 194 + }) 195 + 196 + #define __put_user_err(x, ptr, err) \ 194 197 do { \ 195 - unsigned long __pu_addr = (unsigned long)(ptr); \ 196 198 __typeof__(*(ptr)) __pu_val = (x); \ 197 199 __chk_user_ptr(ptr); \ 198 200 switch (sizeof(*(ptr))) { \ 199 201 case 1: \ 200 - __put_user_asm("sbi",__pu_val,__pu_addr,err); \ 202 + __put_user_asm("sbi", __pu_val, (ptr), (err)); \ 201 203 break; \ 202 204 case 2: \ 203 - __put_user_asm("shi",__pu_val,__pu_addr,err); \ 205 + __put_user_asm("shi", __pu_val, (ptr), (err)); \ 204 206 break; \ 205 207 case 4: \ 206 - __put_user_asm("swi",__pu_val,__pu_addr,err); \ 208 + __put_user_asm("swi", __pu_val, (ptr), (err)); \ 207 209 break; \ 208 210 case 8: \ 209 - __put_user_asm_dword(__pu_val,__pu_addr,err); \ 211 + __put_user_asm_dword(__pu_val, (ptr), (err)); \ 210 212 break; \ 211 213 default: \ 212 214 BUILD_BUG(); \ ··· 219 211 } \ 220 212 } while (0) 221 213 222 - #define __put_user_asm(inst,x,addr,err) \ 223 - asm volatile( \ 224 - "1: "inst" %1,[%2]\n" \ 225 - "2:\n" \ 226 - " .section .fixup,\"ax\"\n" \ 227 - " .align 2\n" \ 228 - "3: move %0, %3\n" \ 229 - " b 2b\n" \ 230 - " .previous\n" \ 231 - " .section __ex_table,\"a\"\n" \ 232 - " .align 3\n" \ 233 - " .long 1b, 3b\n" \ 234 - " .previous" \ 235 - : "+r" (err) \ 236 - : "r" (x), "r" (addr), "i" (-EFAULT) \ 237 - : "cc") 214 + #define __put_user_asm(inst, x, addr, err) \ 215 + __asm__ __volatile__ ( \ 216 + "1: "inst" %1,[%2]\n" \ 217 + "2:\n" \ 218 + " .section .fixup,\"ax\"\n" \ 219 + " .align 2\n" \ 220 + "3: move %0, %3\n" \ 221 + " b 2b\n" \ 222 + " .previous\n" \ 223 + " .section __ex_table,\"a\"\n" \ 224 + " .align 3\n" \ 225 + " .long 1b, 3b\n" \ 226 + " .previous" \ 227 + : "+r" (err) \ 228 + : "r" (x), "r" (addr), "i" (-EFAULT) \ 229 + : "cc") 238 230 239 231 #ifdef __NDS32_EB__ 240 232 #define __pu_reg_oper0 "%H2" ··· 245 237 #endif 246 238 247 239 #define __put_user_asm_dword(x, addr, err) \ 248 - asm volatile( \ 249 - "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ 250 - "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ 251 - "3:\n" \ 252 - " .section .fixup,\"ax\"\n" \ 253 - " .align 2\n" \ 254 - "4: move %0, %3\n" \ 255 - " b 3b\n" \ 256 - " .previous\n" \ 257 - " .section __ex_table,\"a\"\n" \ 258 - " .align 3\n" \ 259 - " .long 1b, 4b\n" \ 260 - " .long 2b, 4b\n" \ 261 - " .previous" \ 262 - : "+r"(err) \ 263 - : "r"(addr), "r"(x), "i"(-EFAULT) \ 264 - : "cc") 240 + __asm__ __volatile__ ( \ 241 + "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ 242 + "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ 243 + "3:\n" \ 244 + " .section .fixup,\"ax\"\n" \ 245 + " .align 2\n" \ 246 + "4: move %0, %3\n" \ 247 + " b 3b\n" \ 248 + " .previous\n" \ 249 + " .section __ex_table,\"a\"\n" \ 250 + " .align 3\n" \ 251 + " .long 1b, 4b\n" \ 252 + " .long 2b, 4b\n" \ 253 + " .previous" \ 254 + : "+r"(err) \ 255 + : "r"(addr), "r"(x), "i"(-EFAULT) \ 256 + : "cc") 257 + 265 258 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); 266 259 extern long strncpy_from_user(char *dest, const char __user * src, long count); 267 260 extern __must_check long strlen_user(const char __user * str);
+6
arch/nds32/kernel/Makefile
··· 21 21 22 22 23 23 obj-y += vdso/ 24 + 25 + obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 26 + 27 + ifdef CONFIG_FUNCTION_TRACER 28 + CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 29 + endif
+2 -1
arch/nds32/kernel/atl2c.c
··· 9 9 10 10 void __iomem *atl2c_base; 11 11 static const struct of_device_id atl2c_ids[] __initconst = { 12 - {.compatible = "andestech,atl2c",} 12 + {.compatible = "andestech,atl2c",}, 13 + {} 13 14 }; 14 15 15 16 static int __init atl2c_of_init(void)
+1 -1
arch/nds32/kernel/ex-entry.S
··· 118 118 /* interrupt */ 119 119 2: 120 120 #ifdef CONFIG_TRACE_IRQFLAGS 121 - jal trace_hardirqs_off 121 + jal __trace_hardirqs_off 122 122 #endif 123 123 move $r0, $sp 124 124 sethi $lp, hi20(ret_from_intr)
+2 -2
arch/nds32/kernel/ex-exit.S
··· 138 138 #ifdef CONFIG_TRACE_IRQFLAGS 139 139 lwi $p0, [$sp+(#IPSW_OFFSET)] 140 140 andi $p0, $p0, #0x1 141 - la $r10, trace_hardirqs_off 142 - la $r9, trace_hardirqs_on 141 + la $r10, __trace_hardirqs_off 142 + la $r9, __trace_hardirqs_on 143 143 cmovz $r9, $p0, $r10 144 144 jral $r9 145 145 #endif
+309
arch/nds32/kernel/ftrace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/ftrace.h> 4 + #include <linux/uaccess.h> 5 + #include <asm/cacheflush.h> 6 + 7 + #ifndef CONFIG_DYNAMIC_FTRACE 8 + extern void (*ftrace_trace_function)(unsigned long, unsigned long, 9 + struct ftrace_ops*, struct pt_regs*); 10 + extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 11 + extern void ftrace_graph_caller(void); 12 + 13 + noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, 14 + struct ftrace_ops *op, struct pt_regs *regs) 15 + { 16 + __asm__ (""); /* avoid to optimize as pure function */ 17 + } 18 + 19 + noinline void _mcount(unsigned long parent_ip) 20 + { 21 + /* save all state by the compiler prologue */ 22 + 23 + unsigned long ip = (unsigned long)__builtin_return_address(0); 24 + 25 + if (ftrace_trace_function != ftrace_stub) 26 + ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip, 27 + NULL, NULL); 28 + 29 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 30 + if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub 31 + || ftrace_graph_entry != ftrace_graph_entry_stub) 32 + ftrace_graph_caller(); 33 + #endif 34 + 35 + /* restore all state by the compiler epilogue */ 36 + } 37 + EXPORT_SYMBOL(_mcount); 38 + 39 + #else /* CONFIG_DYNAMIC_FTRACE */ 40 + 41 + noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, 42 + struct ftrace_ops *op, struct pt_regs *regs) 43 + { 44 + __asm__ (""); /* avoid to optimize as pure function */ 45 + } 46 + 47 + noinline void __naked _mcount(unsigned long parent_ip) 48 + { 49 + __asm__ (""); /* avoid to optimize as pure function */ 50 + } 51 + EXPORT_SYMBOL(_mcount); 52 + 53 + #define XSTR(s) STR(s) 54 + #define STR(s) #s 55 + void _ftrace_caller(unsigned long parent_ip) 56 + { 57 + /* save all state needed by the compiler prologue */ 58 + 59 + /* 60 + * prepare arguments for real tracing function 61 + * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE 62 + * second arg : parent_ip 63 + */ 64 + __asm__ __volatile__ ( 65 + "move $r1, %0 \n\t" 66 + "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t" 67 + : 68 + : "r" (parent_ip), "r" (__builtin_return_address(0))); 69 + 70 + /* a placeholder for the call to a real tracing function */ 71 + __asm__ __volatile__ ( 72 + "ftrace_call: \n\t" 73 + "nop \n\t" 74 + "nop \n\t" 75 + "nop \n\t"); 76 + 77 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 78 + /* a placeholder for the call to ftrace_graph_caller */ 79 + __asm__ __volatile__ ( 80 + "ftrace_graph_call: \n\t" 81 + "nop \n\t" 82 + "nop \n\t" 83 + "nop \n\t"); 84 + #endif 85 + /* restore all state needed by the compiler epilogue */ 86 + } 87 + 88 + int __init ftrace_dyn_arch_init(void) 89 + { 90 + return 0; 91 + } 92 + 93 + int ftrace_arch_code_modify_prepare(void) 94 + { 95 + set_all_modules_text_rw(); 96 + return 0; 97 + } 98 + 99 + int ftrace_arch_code_modify_post_process(void) 100 + { 101 + set_all_modules_text_ro(); 102 + return 0; 103 + } 104 + 105 + static unsigned long gen_sethi_insn(unsigned long addr) 106 + { 107 + unsigned long opcode = 0x46000000; 108 + unsigned long imm = addr >> 12; 109 + unsigned long rt_num = 0xf << 20; 110 + 111 + return ENDIAN_CONVERT(opcode | rt_num | imm); 112 + } 113 + 114 + static unsigned long gen_ori_insn(unsigned long addr) 115 + { 116 + unsigned long opcode = 0x58000000; 117 + unsigned long imm = addr & 0x0000fff; 118 + unsigned long rt_num = 0xf << 20; 119 + unsigned long ra_num = 0xf << 15; 120 + 121 + return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm); 122 + } 123 + 124 + static unsigned long gen_jral_insn(unsigned long addr) 125 + { 126 + unsigned long opcode = 0x4a000001; 127 + unsigned long rt_num = 0x1e << 20; 128 + unsigned long rb_num = 0xf << 10; 129 + 130 + return ENDIAN_CONVERT(opcode | rt_num | rb_num); 131 + } 132 + 133 + static void ftrace_gen_call_insn(unsigned long *call_insns, 134 + unsigned long addr) 135 + { 136 + call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */ 137 + call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */ 138 + call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */ 139 + } 140 + 141 + static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn, 142 + unsigned long *new_insn, bool validate) 143 + { 144 + unsigned long orig_insn[3]; 145 + 146 + if (validate) { 147 + if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE)) 148 + return -EFAULT; 149 + if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE)) 150 + return -EINVAL; 151 + } 152 + 153 + if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE)) 154 + return -EPERM; 155 + 156 + return 0; 157 + } 158 + 159 + static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn, 160 + unsigned long *new_insn, bool validate) 161 + { 162 + int ret; 163 + 164 + ret = __ftrace_modify_code(pc, old_insn, new_insn, validate); 165 + if (ret) 166 + return ret; 167 + 168 + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); 169 + 170 + return ret; 171 + } 172 + 173 + int ftrace_update_ftrace_func(ftrace_func_t func) 174 + { 175 + unsigned long pc = (unsigned long)&ftrace_call; 176 + unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 177 + unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 178 + 179 + if (func != ftrace_stub) 180 + ftrace_gen_call_insn(new_insn, (unsigned long)func); 181 + 182 + return ftrace_modify_code(pc, old_insn, new_insn, false); 183 + } 184 + 185 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 186 + { 187 + unsigned long pc = rec->ip; 188 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 189 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 190 + 191 + ftrace_gen_call_insn(call_insn, addr); 192 + 193 + return ftrace_modify_code(pc, nop_insn, call_insn, true); 194 + } 195 + 196 + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 197 + unsigned long addr) 198 + { 199 + unsigned long pc = rec->ip; 200 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 201 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 202 + 203 + ftrace_gen_call_insn(call_insn, addr); 204 + 205 + return ftrace_modify_code(pc, call_insn, nop_insn, true); 206 + } 207 + #endif /* CONFIG_DYNAMIC_FTRACE */ 208 + 209 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 210 + void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 211 + unsigned long frame_pointer) 212 + { 213 + unsigned long return_hooker = (unsigned long)&return_to_handler; 214 + struct ftrace_graph_ent trace; 215 + unsigned long old; 216 + int err; 217 + 218 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 219 + return; 220 + 221 + old = *parent; 222 + 223 + trace.func = self_addr; 224 + trace.depth = current->curr_ret_stack + 1; 225 + 226 + /* Only trace if the calling function expects to */ 227 + if (!ftrace_graph_entry(&trace)) 228 + return; 229 + 230 + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 231 + frame_pointer, NULL); 232 + 233 + if (err == -EBUSY) 234 + return; 235 + 236 + *parent = return_hooker; 237 + } 238 + 239 + noinline void ftrace_graph_caller(void) 240 + { 241 + unsigned long *parent_ip = 242 + (unsigned long *)(__builtin_frame_address(2) - 4); 243 + 244 + unsigned long selfpc = 245 + (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE); 246 + 247 + unsigned long frame_pointer = 248 + (unsigned long)__builtin_frame_address(3); 249 + 250 + prepare_ftrace_return(parent_ip, selfpc, frame_pointer); 251 + } 252 + 253 + extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); 254 + void __naked return_to_handler(void) 255 + { 256 + __asm__ __volatile__ ( 257 + /* save state needed by the ABI */ 258 + "smw.adm $r0,[$sp],$r1,#0x0 \n\t" 259 + 260 + /* get original return address */ 261 + "move $r0, $fp \n\t" 262 + "bal ftrace_return_to_handler\n\t" 263 + "move $lp, $r0 \n\t" 264 + 265 + /* restore state nedded by the ABI */ 266 + "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); 267 + } 268 + 269 + #ifdef CONFIG_DYNAMIC_FTRACE 270 + extern unsigned long ftrace_graph_call; 271 + 272 + static int ftrace_modify_graph_caller(bool enable) 273 + { 274 + unsigned long pc = (unsigned long)&ftrace_graph_call; 275 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 276 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 277 + 278 + ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller); 279 + 280 + if (enable) 281 + return ftrace_modify_code(pc, nop_insn, call_insn, true); 282 + else 283 + return ftrace_modify_code(pc, call_insn, nop_insn, true); 284 + } 285 + 286 + int ftrace_enable_ftrace_graph_caller(void) 287 + { 288 + return ftrace_modify_graph_caller(true); 289 + } 290 + 291 + int ftrace_disable_ftrace_graph_caller(void) 292 + { 293 + return ftrace_modify_graph_caller(false); 294 + } 295 + #endif /* CONFIG_DYNAMIC_FTRACE */ 296 + 297 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 298 + 299 + 300 + #ifdef CONFIG_TRACE_IRQFLAGS 301 + noinline void __trace_hardirqs_off(void) 302 + { 303 + trace_hardirqs_off(); 304 + } 305 + noinline void __trace_hardirqs_on(void) 306 + { 307 + trace_hardirqs_on(); 308 + } 309 + #endif /* CONFIG_TRACE_IRQFLAGS */
+2 -2
arch/nds32/kernel/module.c
··· 40 40 41 41 tmp2 = tmp & loc_mask; 42 42 if (partial_in_place) { 43 - tmp &= (!loc_mask); 43 + tmp &= (~loc_mask); 44 44 tmp = 45 45 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 46 46 } else { ··· 70 70 71 71 tmp2 = tmp & loc_mask; 72 72 if (partial_in_place) { 73 - tmp &= (!loc_mask); 73 + tmp &= (~loc_mask); 74 74 tmp = 75 75 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 76 76 } else {
+5 -1
arch/nds32/kernel/stacktrace.c
··· 4 4 #include <linux/sched/debug.h> 5 5 #include <linux/sched/task_stack.h> 6 6 #include <linux/stacktrace.h> 7 + #include <linux/ftrace.h> 7 8 8 9 void save_stack_trace(struct stack_trace *trace) 9 10 { ··· 17 16 unsigned long *fpn; 18 17 int skip = trace->skip; 19 18 int savesched; 19 + int graph_idx = 0; 20 20 21 21 if (tsk == current) { 22 22 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); ··· 31 29 && (fpn >= (unsigned long *)TASK_SIZE)) { 32 30 unsigned long lpp, fpp; 33 31 34 - lpp = fpn[-1]; 32 + lpp = fpn[LP_OFFSET]; 35 33 fpp = fpn[FP_OFFSET]; 36 34 if (!__kernel_text_address(lpp)) 37 35 break; 36 + else 37 + lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); 38 38 39 39 if (savesched || !in_sched_functions(lpp)) { 40 40 if (skip) {
+9 -33
arch/nds32/kernel/traps.c
··· 8 8 #include <linux/kdebug.h> 9 9 #include <linux/sched/task_stack.h> 10 10 #include <linux/uaccess.h> 11 + #include <linux/ftrace.h> 11 12 12 13 #include <asm/proc-fns.h> 13 14 #include <asm/unistd.h> ··· 95 94 set_fs(fs); 96 95 } 97 96 98 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 99 - #include <linux/ftrace.h> 100 - static void 101 - get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) 102 - { 103 - if (*addr == (unsigned long)return_to_handler) { 104 - int index = tsk->curr_ret_stack; 105 - 106 - if (tsk->ret_stack && index >= *graph) { 107 - index -= *graph; 108 - *addr = tsk->ret_stack[index].ret; 109 - (*graph)++; 110 - } 111 - } 112 - } 113 - #else 114 - static inline void 115 - get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) 116 - { 117 - } 118 - #endif 119 - 120 97 #define LOOP_TIMES (100) 121 98 static void __dump(struct task_struct *tsk, unsigned long *base_reg) 122 99 { ··· 105 126 while (!kstack_end(base_reg)) { 106 127 ret_addr = *base_reg++; 107 128 if (__kernel_text_address(ret_addr)) { 108 - get_real_ret_addr(&ret_addr, tsk, &graph); 129 + ret_addr = ftrace_graph_ret_addr( 130 + tsk, &graph, ret_addr, NULL); 109 131 print_ip_sym(ret_addr); 110 132 } 111 133 if (--cnt < 0) ··· 117 137 !((unsigned long)base_reg & 0x3) && 118 138 ((unsigned long)base_reg >= TASK_SIZE)) { 119 139 unsigned long next_fp; 120 - #if !defined(NDS32_ABI_2) 121 - ret_addr = base_reg[0]; 122 - next_fp = base_reg[1]; 123 - #else 124 - ret_addr = base_reg[-1]; 140 + ret_addr = base_reg[LP_OFFSET]; 125 141 next_fp = base_reg[FP_OFFSET]; 126 - #endif 127 142 if (__kernel_text_address(ret_addr)) { 128 - get_real_ret_addr(&ret_addr, tsk, &graph); 143 + 144 + ret_addr = ftrace_graph_ret_addr( 145 + tsk, &graph, ret_addr, NULL); 129 146 print_ip_sym(ret_addr); 130 147 } 131 148 if (--cnt < 0) ··· 173 196 pr_emerg("CPU: %i\n", smp_processor_id()); 174 197 show_regs(regs); 175 198 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", 176 - tsk->comm, tsk->pid, task_thread_info(tsk) + 1); 199 + tsk->comm, tsk->pid, end_of_stack(tsk)); 177 200 178 201 if (!user_mode(regs) || in_interrupt()) { 179 - dump_mem("Stack: ", regs->sp, 180 - THREAD_SIZE + (unsigned long)task_thread_info(tsk)); 202 + dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK); 181 203 dump_instr(regs); 182 204 dump_stack(); 183 205 }
+12
arch/nds32/kernel/vmlinux.lds.S
··· 13 13 ENTRY(_stext_lma) 14 14 jiffies = jiffies_64; 15 15 16 + #if defined(CONFIG_GCOV_KERNEL) 17 + #define NDS32_EXIT_KEEP(x) x 18 + #else 19 + #define NDS32_EXIT_KEEP(x) 20 + #endif 21 + 16 22 SECTIONS 17 23 { 18 24 _stext_lma = TEXTADDR - LOAD_OFFSET; 19 25 . = TEXTADDR; 20 26 __init_begin = .; 21 27 HEAD_TEXT_SECTION 28 + .exit.text : { 29 + NDS32_EXIT_KEEP(EXIT_TEXT) 30 + } 22 31 INIT_TEXT_SECTION(PAGE_SIZE) 23 32 INIT_DATA_SECTION(16) 33 + .exit.data : { 34 + NDS32_EXIT_KEEP(EXIT_DATA) 35 + } 24 36 PERCPU_SECTION(L1_CACHE_BYTES) 25 37 __init_end = .; 26 38
+1 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 358 358 unsigned long pp, key; 359 359 unsigned long v, orig_v, gr; 360 360 __be64 *hptep; 361 - int index; 361 + long int index; 362 362 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 363 363 364 364 if (kvm_is_radix(vcpu->kvm))
+3 -3
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 725 725 gpa, shift); 726 726 kvmppc_radix_tlbie_page(kvm, gpa, shift); 727 727 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { 728 - unsigned long npages = 1; 728 + unsigned long psize = PAGE_SIZE; 729 729 if (shift) 730 - npages = 1ul << (shift - PAGE_SHIFT); 731 - kvmppc_update_dirty_map(memslot, gfn, npages); 730 + psize = 1ul << shift; 731 + kvmppc_update_dirty_map(memslot, gfn, psize); 732 732 } 733 733 } 734 734 return 0;
-7
arch/riscv/kernel/setup.c
··· 85 85 #ifdef CONFIG_BLK_DEV_INITRD 86 86 static void __init setup_initrd(void) 87 87 { 88 - extern char __initramfs_start[]; 89 - extern unsigned long __initramfs_size; 90 88 unsigned long size; 91 - 92 - if (__initramfs_size > 0) { 93 - initrd_start = (unsigned long)(&__initramfs_start); 94 - initrd_end = initrd_start + __initramfs_size; 95 - } 96 89 97 90 if (initrd_start >= initrd_end) { 98 91 printk(KERN_INFO "initrd not found or empty");
+7 -1
arch/s390/include/asm/mmu.h
··· 16 16 unsigned long asce; 17 17 unsigned long asce_limit; 18 18 unsigned long vdso_base; 19 - /* The mmu context allocates 4K page tables. */ 19 + /* 20 + * The following bitfields need a down_write on the mm 21 + * semaphore when they are written to. As they are only 22 + * written once, they can be read without a lock. 23 + * 24 + * The mmu context allocates 4K page tables. 25 + */ 20 26 unsigned int alloc_pgste:1; 21 27 /* The mmu context uses extended page tables. */ 22 28 unsigned int has_pgste:1;
+2
arch/s390/kvm/kvm-s390.c
··· 695 695 r = -EINVAL; 696 696 else { 697 697 r = 0; 698 + down_write(&kvm->mm->mmap_sem); 698 699 kvm->mm->context.allow_gmap_hpage_1m = 1; 700 + up_write(&kvm->mm->mmap_sem); 699 701 /* 700 702 * We might have to create fake 4k page 701 703 * tables. To avoid that the hardware works on
+18 -12
arch/s390/kvm/priv.c
··· 280 280 goto retry; 281 281 } 282 282 } 283 - if (rc) 284 - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 285 283 up_read(&current->mm->mmap_sem); 284 + if (rc == -EFAULT) 285 + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 286 + if (rc < 0) 287 + return rc; 286 288 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 287 289 vcpu->run->s.regs.gprs[reg1] |= key; 288 290 return 0; ··· 326 324 goto retry; 327 325 } 328 326 } 329 - if (rc < 0) 330 - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 331 327 up_read(&current->mm->mmap_sem); 328 + if (rc == -EFAULT) 329 + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 330 + if (rc < 0) 331 + return rc; 332 332 kvm_s390_set_psw_cc(vcpu, rc); 333 333 return 0; 334 334 } ··· 394 390 FAULT_FLAG_WRITE, &unlocked); 395 391 rc = !rc ? -EAGAIN : rc; 396 392 } 393 + up_read(&current->mm->mmap_sem); 397 394 if (rc == -EFAULT) 398 395 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 399 - 400 - up_read(&current->mm->mmap_sem); 401 - if (rc >= 0) 402 - start += PAGE_SIZE; 396 + if (rc < 0) 397 + return rc; 398 + start += PAGE_SIZE; 403 399 } 404 400 405 401 if (m3 & (SSKE_MC | SSKE_MR)) { ··· 1006 1002 FAULT_FLAG_WRITE, &unlocked); 1007 1003 rc = !rc ? -EAGAIN : rc; 1008 1004 } 1005 + up_read(&current->mm->mmap_sem); 1009 1006 if (rc == -EFAULT) 1010 1007 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1011 - 1012 - up_read(&current->mm->mmap_sem); 1013 - if (rc >= 0) 1014 - start += PAGE_SIZE; 1008 + if (rc == -EAGAIN) 1009 + continue; 1010 + if (rc < 0) 1011 + return rc; 1015 1012 } 1013 + start += PAGE_SIZE; 1016 1014 } 1017 1015 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 1018 1016 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
+2 -1
arch/s390/kvm/vsie.c
··· 173 173 return set_validity_icpt(scb_s, 0x0039U); 174 174 175 175 /* copy only the wrapping keys */ 176 - if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) 176 + if (read_guest_real(vcpu, crycb_addr + 72, 177 + vsie_page->crycb.dea_wrapping_key_mask, 56)) 177 178 return set_validity_icpt(scb_s, 0x0035U); 178 179 179 180 scb_s->ecb3 |= ecb3_flags;
+6 -6
arch/x86/include/asm/atomic.h
··· 80 80 * true if the result is zero, or false for all 81 81 * other cases. 82 82 */ 83 - #define arch_atomic_sub_and_test arch_atomic_sub_and_test 84 83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 85 84 { 86 85 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 87 86 } 87 + #define arch_atomic_sub_and_test arch_atomic_sub_and_test 88 88 89 89 /** 90 90 * arch_atomic_inc - increment atomic variable ··· 92 92 * 93 93 * Atomically increments @v by 1. 94 94 */ 95 - #define arch_atomic_inc arch_atomic_inc 96 95 static __always_inline void arch_atomic_inc(atomic_t *v) 97 96 { 98 97 asm volatile(LOCK_PREFIX "incl %0" 99 98 : "+m" (v->counter)); 100 99 } 100 + #define arch_atomic_inc arch_atomic_inc 101 101 102 102 /** 103 103 * arch_atomic_dec - decrement atomic variable ··· 105 105 * 106 106 * Atomically decrements @v by 1. 107 107 */ 108 - #define arch_atomic_dec arch_atomic_dec 109 108 static __always_inline void arch_atomic_dec(atomic_t *v) 110 109 { 111 110 asm volatile(LOCK_PREFIX "decl %0" 112 111 : "+m" (v->counter)); 113 112 } 113 + #define arch_atomic_dec arch_atomic_dec 114 114 115 115 /** 116 116 * arch_atomic_dec_and_test - decrement and test ··· 120 120 * returns true if the result is 0, or false for all other 121 121 * cases. 122 122 */ 123 - #define arch_atomic_dec_and_test arch_atomic_dec_and_test 124 123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 125 124 { 126 125 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 127 126 } 127 + #define arch_atomic_dec_and_test arch_atomic_dec_and_test 128 128 129 129 /** 130 130 * arch_atomic_inc_and_test - increment and test ··· 134 134 * and returns true if the result is zero, or false for all 135 135 * other cases. 136 136 */ 137 - #define arch_atomic_inc_and_test arch_atomic_inc_and_test 138 137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 139 138 { 140 139 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 141 140 } 141 + #define arch_atomic_inc_and_test arch_atomic_inc_and_test 142 142 143 143 /** 144 144 * arch_atomic_add_negative - add and test if negative ··· 149 149 * if the result is negative, or false when 150 150 * result is greater than or equal to zero. 151 151 */ 152 - #define arch_atomic_add_negative arch_atomic_add_negative 153 152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) 154 153 { 155 154 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 156 155 } 156 + #define arch_atomic_add_negative arch_atomic_add_negative 157 157 158 158 /** 159 159 * arch_atomic_add_return - add integer and return
+4 -4
arch/x86/include/asm/atomic64_32.h
··· 205 205 * 206 206 * Atomically increments @v by 1. 207 207 */ 208 - #define arch_atomic64_inc arch_atomic64_inc 209 208 static inline void arch_atomic64_inc(atomic64_t *v) 210 209 { 211 210 __alternative_atomic64(inc, inc_return, /* no output */, 212 211 "S" (v) : "memory", "eax", "ecx", "edx"); 213 212 } 213 + #define arch_atomic64_inc arch_atomic64_inc 214 214 215 215 /** 216 216 * arch_atomic64_dec - decrement atomic64 variable ··· 218 218 * 219 219 * Atomically decrements @v by 1. 220 220 */ 221 - #define arch_atomic64_dec arch_atomic64_dec 222 221 static inline void arch_atomic64_dec(atomic64_t *v) 223 222 { 224 223 __alternative_atomic64(dec, dec_return, /* no output */, 225 224 "S" (v) : "memory", "eax", "ecx", "edx"); 226 225 } 226 + #define arch_atomic64_dec arch_atomic64_dec 227 227 228 228 /** 229 229 * arch_atomic64_add_unless - add unless the number is a given value ··· 245 245 return (int)a; 246 246 } 247 247 248 - #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero 249 248 static inline int arch_atomic64_inc_not_zero(atomic64_t *v) 250 249 { 251 250 int r; ··· 252 253 "S" (v) : "ecx", "edx", "memory"); 253 254 return r; 254 255 } 256 + #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero 255 257 256 - #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 257 258 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) 258 259 { 259 260 long long r; ··· 261 262 "S" (v) : "ecx", "memory"); 262 263 return r; 263 264 } 265 + #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 264 266 265 267 #undef alternative_atomic64 266 268 #undef __alternative_atomic64
+6 -6
arch/x86/include/asm/atomic64_64.h
··· 71 71 * true if the result is zero, or false for all 72 72 * other cases. 73 73 */ 74 - #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 75 74 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) 76 75 { 77 76 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); 78 77 } 78 + #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 79 79 80 80 /** 81 81 * arch_atomic64_inc - increment atomic64 variable ··· 83 83 * 84 84 * Atomically increments @v by 1. 85 85 */ 86 - #define arch_atomic64_inc arch_atomic64_inc 87 86 static __always_inline void arch_atomic64_inc(atomic64_t *v) 88 87 { 89 88 asm volatile(LOCK_PREFIX "incq %0" 90 89 : "=m" (v->counter) 91 90 : "m" (v->counter)); 92 91 } 92 + #define arch_atomic64_inc arch_atomic64_inc 93 93 94 94 /** 95 95 * arch_atomic64_dec - decrement atomic64 variable ··· 97 97 * 98 98 * Atomically decrements @v by 1. 99 99 */ 100 - #define arch_atomic64_dec arch_atomic64_dec 101 100 static __always_inline void arch_atomic64_dec(atomic64_t *v) 102 101 { 103 102 asm volatile(LOCK_PREFIX "decq %0" 104 103 : "=m" (v->counter) 105 104 : "m" (v->counter)); 106 105 } 106 + #define arch_atomic64_dec arch_atomic64_dec 107 107 108 108 /** 109 109 * arch_atomic64_dec_and_test - decrement and test ··· 113 113 * returns true if the result is 0, or false for all other 114 114 * cases. 115 115 */ 116 - #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 117 116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v) 118 117 { 119 118 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); 120 119 } 120 + #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 121 121 122 122 /** 123 123 * arch_atomic64_inc_and_test - increment and test ··· 127 127 * and returns true if the result is zero, or false for all 128 128 * other cases. 129 129 */ 130 - #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 131 130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v) 132 131 { 133 132 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); 134 133 } 134 + #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 135 135 136 136 /** 137 137 * arch_atomic64_add_negative - add and test if negative ··· 142 142 * if the result is negative, or false when 143 143 * result is greater than or equal to zero. 144 144 */ 145 - #define arch_atomic64_add_negative arch_atomic64_add_negative 146 145 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) 147 146 { 148 147 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); 149 148 } 149 + #define arch_atomic64_add_negative arch_atomic64_add_negative 150 150 151 151 /** 152 152 * arch_atomic64_add_return - add and return
+11 -1
arch/x86/include/asm/kdebug.h
··· 22 22 DIE_NMIUNKNOWN, 23 23 }; 24 24 25 + enum show_regs_mode { 26 + SHOW_REGS_SHORT, 27 + /* 28 + * For when userspace crashed, but we don't think it's our fault, and 29 + * therefore don't print kernel registers. 30 + */ 31 + SHOW_REGS_USER, 32 + SHOW_REGS_ALL 33 + }; 34 + 25 35 extern void die(const char *, struct pt_regs *,long); 26 36 extern int __must_check __die(const char *, struct pt_regs *, long); 27 37 extern void show_stack_regs(struct pt_regs *regs); 28 - extern void __show_regs(struct pt_regs *regs, int all); 38 + extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); 29 39 extern void show_iret_regs(struct pt_regs *regs); 30 40 extern unsigned long oops_begin(void); 31 41 extern void oops_end(unsigned long, struct pt_regs *, int signr);
+7 -15
arch/x86/include/asm/kvm_host.h
··· 1237 1237 #define EMULTYPE_NO_DECODE (1 << 0) 1238 1238 #define EMULTYPE_TRAP_UD (1 << 1) 1239 1239 #define EMULTYPE_SKIP (1 << 2) 1240 - #define EMULTYPE_RETRY (1 << 3) 1241 - #define EMULTYPE_NO_REEXECUTE (1 << 4) 1242 - #define EMULTYPE_NO_UD_ON_FAIL (1 << 5) 1243 - #define EMULTYPE_VMWARE (1 << 6) 1244 - int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 1245 - int emulation_type, void *insn, int insn_len); 1246 - 1247 - static inline int emulate_instruction(struct kvm_vcpu *vcpu, 1248 - int emulation_type) 1249 - { 1250 - return x86_emulate_instruction(vcpu, 0, 1251 - emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); 1252 - } 1240 + #define EMULTYPE_ALLOW_RETRY (1 << 3) 1241 + #define EMULTYPE_NO_UD_ON_FAIL (1 << 4) 1242 + #define EMULTYPE_VMWARE (1 << 5) 1243 + int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); 1244 + int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 1245 + void *insn, int insn_len); 1253 1246 1254 1247 void kvm_enable_efer_bits(u64); 1255 1248 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); ··· 1443 1450 ____kvm_handle_fault_on_reboot(insn, "") 1444 1451 1445 1452 #define KVM_ARCH_WANT_MMU_NOTIFIER 1446 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 1447 1453 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1448 1454 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1449 1455 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); ··· 1455 1463 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1456 1464 1457 1465 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, 1458 - unsigned long ipi_bitmap_high, int min, 1466 + unsigned long ipi_bitmap_high, u32 min, 1459 1467 unsigned long icr, int op_64_bit); 1460 1468 1461 1469 u64 kvm_get_arch_capabilities(void);
+1 -1
arch/x86/include/asm/pgtable.h
··· 1195 1195 return xchg(pmdp, pmd); 1196 1196 } else { 1197 1197 pmd_t old = *pmdp; 1198 - *pmdp = pmd; 1198 + WRITE_ONCE(*pmdp, pmd); 1199 1199 return old; 1200 1200 } 1201 1201 }
+11 -11
arch/x86/include/asm/pgtable_64.h
··· 55 55 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 56 56 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 57 57 58 + static inline void native_set_pte(pte_t *ptep, pte_t pte) 59 + { 60 + WRITE_ONCE(*ptep, pte); 61 + } 62 + 58 63 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 59 64 pte_t *ptep) 60 65 { 61 - *ptep = native_make_pte(0); 62 - } 63 - 64 - static inline void native_set_pte(pte_t *ptep, pte_t pte) 65 - { 66 - *ptep = pte; 66 + native_set_pte(ptep, native_make_pte(0)); 67 67 } 68 68 69 69 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) ··· 73 73 74 74 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 75 75 { 76 - *pmdp = pmd; 76 + WRITE_ONCE(*pmdp, pmd); 77 77 } 78 78 79 79 static inline void native_pmd_clear(pmd_t *pmd) ··· 109 109 110 110 static inline void native_set_pud(pud_t *pudp, pud_t pud) 111 111 { 112 - *pudp = pud; 112 + WRITE_ONCE(*pudp, pud); 113 113 } 114 114 115 115 static inline void native_pud_clear(pud_t *pud) ··· 137 137 pgd_t pgd; 138 138 139 139 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 140 - *p4dp = p4d; 140 + WRITE_ONCE(*p4dp, p4d); 141 141 return; 142 142 } 143 143 144 144 pgd = native_make_pgd(native_p4d_val(p4d)); 145 145 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); 146 - *p4dp = native_make_p4d(native_pgd_val(pgd)); 146 + WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); 147 147 } 148 148 149 149 static inline void native_p4d_clear(p4d_t *p4d) ··· 153 153 154 154 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 155 155 { 156 - *pgdp = pti_set_user_pgtbl(pgdp, pgd); 156 + WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); 157 157 } 158 158 159 159 static inline void native_pgd_clear(pgd_t *pgd)
+1 -1
arch/x86/kernel/apic/vector.c
··· 413 413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { 414 414 /* Something in the core code broke! Survive gracefully */ 415 415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); 416 - return EINVAL; 416 + return -EINVAL; 417 417 } 418 418 419 419 ret = assign_managed_vector(irqd, vector_searchmask);
+16 -8
arch/x86/kernel/cpu/microcode/amd.c
··· 504 504 struct microcode_amd *mc_amd; 505 505 struct ucode_cpu_info *uci; 506 506 struct ucode_patch *p; 507 + enum ucode_state ret; 507 508 u32 rev, dummy; 508 509 509 510 BUG_ON(raw_smp_processor_id() != cpu); ··· 522 521 523 522 /* need to apply patch? */ 524 523 if (rev >= mc_amd->hdr.patch_id) { 525 - c->microcode = rev; 526 - uci->cpu_sig.rev = rev; 527 - return UCODE_OK; 524 + ret = UCODE_OK; 525 + goto out; 528 526 } 529 527 530 528 if (__apply_microcode_amd(mc_amd)) { ··· 531 531 cpu, mc_amd->hdr.patch_id); 532 532 return UCODE_ERROR; 533 533 } 534 - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, 535 - mc_amd->hdr.patch_id); 536 534 537 - uci->cpu_sig.rev = mc_amd->hdr.patch_id; 538 - c->microcode = mc_amd->hdr.patch_id; 535 + rev = mc_amd->hdr.patch_id; 536 + ret = UCODE_UPDATED; 539 537 540 - return UCODE_UPDATED; 538 + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 539 + 540 + out: 541 + uci->cpu_sig.rev = rev; 542 + c->microcode = rev; 543 + 544 + /* Update boot_cpu_data's revision too, if we're on the BSP: */ 545 + if (c->cpu_index == boot_cpu_data.cpu_index) 546 + boot_cpu_data.microcode = rev; 547 + 548 + return ret; 541 549 } 542 550 543 551 static int install_equiv_cpu_table(const u8 *buf)
+13 -6
arch/x86/kernel/cpu/microcode/intel.c
··· 795 795 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 796 796 struct cpuinfo_x86 *c = &cpu_data(cpu); 797 797 struct microcode_intel *mc; 798 + enum ucode_state ret; 798 799 static int prev_rev; 799 800 u32 rev; 800 801 ··· 818 817 */ 819 818 rev = intel_get_microcode_revision(); 820 819 if (rev >= mc->hdr.rev) { 821 - uci->cpu_sig.rev = rev; 822 - c->microcode = rev; 823 - return UCODE_OK; 820 + ret = UCODE_OK; 821 + goto out; 824 822 } 825 823 826 824 /* ··· 848 848 prev_rev = rev; 849 849 } 850 850 851 - uci->cpu_sig.rev = rev; 852 - c->microcode = rev; 851 + ret = UCODE_UPDATED; 853 852 854 - return UCODE_UPDATED; 853 + out: 854 + uci->cpu_sig.rev = rev; 855 + c->microcode = rev; 856 + 857 + /* Update boot_cpu_data's revision too, if we're on the BSP: */ 858 + if (c->cpu_index == boot_cpu_data.cpu_index) 859 + boot_cpu_data.microcode = rev; 860 + 861 + return ret; 855 862 } 856 863 857 864 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+3 -8
arch/x86/kernel/dumpstack.c
··· 146 146 * they can be printed in the right context. 147 147 */ 148 148 if (!partial && on_stack(info, regs, sizeof(*regs))) { 149 - __show_regs(regs, 0); 149 + __show_regs(regs, SHOW_REGS_SHORT); 150 150 151 151 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, 152 152 IRET_FRAME_SIZE)) { ··· 344 344 oops_exit(); 345 345 346 346 /* Executive summary in case the oops scrolled away */ 347 - __show_regs(&exec_summary_regs, true); 347 + __show_regs(&exec_summary_regs, SHOW_REGS_ALL); 348 348 349 349 if (!signr) 350 350 return; ··· 407 407 408 408 void show_regs(struct pt_regs *regs) 409 409 { 410 - bool all = true; 411 - 412 410 show_regs_print_info(KERN_DEFAULT); 413 411 414 - if (IS_ENABLED(CONFIG_X86_32)) 415 - all = !user_mode(regs); 416 - 417 - __show_regs(regs, all); 412 + __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL); 418 413 419 414 /* 420 415 * When in-kernel, we also print out the stack at the time of the fault..
+2 -2
arch/x86/kernel/process_32.c
··· 59 59 #include <asm/intel_rdt_sched.h> 60 60 #include <asm/proto.h> 61 61 62 - void __show_regs(struct pt_regs *regs, int all) 62 + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 63 63 { 64 64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 65 65 unsigned long d0, d1, d2, d3, d6, d7; ··· 85 85 printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", 86 86 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); 87 87 88 - if (!all) 88 + if (mode != SHOW_REGS_ALL) 89 89 return; 90 90 91 91 cr0 = read_cr0();
+10 -2
arch/x86/kernel/process_64.c
··· 62 62 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); 63 63 64 64 /* Prints also some state that isn't saved in the pt_regs */ 65 - void __show_regs(struct pt_regs *regs, int all) 65 + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 66 66 { 67 67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 68 68 unsigned long d0, d1, d2, d3, d6, d7; ··· 87 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", 88 88 regs->r13, regs->r14, regs->r15); 89 89 90 - if (!all) 90 + if (mode == SHOW_REGS_SHORT) 91 91 return; 92 + 93 + if (mode == SHOW_REGS_USER) { 94 + rdmsrl(MSR_FS_BASE, fs); 95 + rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 96 + printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", 97 + fs, shadowgs); 98 + return; 99 + } 92 100 93 101 asm("movl %%ds,%0" : "=r" (ds)); 94 102 asm("movl %%cs,%0" : "=r" (cs));
+1 -1
arch/x86/kernel/tsc.c
··· 1415 1415 1416 1416 static unsigned long __init get_loops_per_jiffy(void) 1417 1417 { 1418 - unsigned long lpj = tsc_khz * KHZ; 1418 + u64 lpj = (u64)tsc_khz * KHZ; 1419 1419 1420 1420 do_div(lpj, HZ); 1421 1421 return lpj;
+20 -7
arch/x86/kvm/lapic.c
··· 548 548 } 549 549 550 550 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, 551 - unsigned long ipi_bitmap_high, int min, 551 + unsigned long ipi_bitmap_high, u32 min, 552 552 unsigned long icr, int op_64_bit) 553 553 { 554 554 int i; ··· 571 571 rcu_read_lock(); 572 572 map = rcu_dereference(kvm->arch.apic_map); 573 573 574 + if (min > map->max_apic_id) 575 + goto out; 574 576 /* Bits above cluster_size are masked in the caller. */ 575 - for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) { 576 - vcpu = map->phys_map[min + i]->vcpu; 577 - count += kvm_apic_set_irq(vcpu, &irq, NULL); 577 + for_each_set_bit(i, &ipi_bitmap_low, 578 + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { 579 + if (map->phys_map[min + i]) { 580 + vcpu = map->phys_map[min + i]->vcpu; 581 + count += kvm_apic_set_irq(vcpu, &irq, NULL); 582 + } 578 583 } 579 584 580 585 min += cluster_size; 581 - for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) { 582 - vcpu = map->phys_map[min + i]->vcpu; 583 - count += kvm_apic_set_irq(vcpu, &irq, NULL); 586 + 587 + if (min > map->max_apic_id) 588 + goto out; 589 + 590 + for_each_set_bit(i, &ipi_bitmap_high, 591 + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { 592 + if (map->phys_map[min + i]) { 593 + vcpu = map->phys_map[min + i]->vcpu; 594 + count += kvm_apic_set_irq(vcpu, &irq, NULL); 595 + } 584 596 } 585 597 598 + out: 586 599 rcu_read_unlock(); 587 600 return count; 588 601 }
+15 -11
arch/x86/kvm/mmu.c
··· 1853 1853 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 1854 1854 } 1855 1855 1856 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1857 - { 1858 - return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); 1859 - } 1860 - 1861 1856 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1862 1857 { 1863 1858 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); ··· 5212 5217 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, 5213 5218 void *insn, int insn_len) 5214 5219 { 5215 - int r, emulation_type = EMULTYPE_RETRY; 5220 + int r, emulation_type = 0; 5216 5221 enum emulation_result er; 5217 5222 bool direct = vcpu->arch.mmu.direct_map; 5218 5223 ··· 5225 5230 r = RET_PF_INVALID; 5226 5231 if (unlikely(error_code & PFERR_RSVD_MASK)) { 5227 5232 r = handle_mmio_page_fault(vcpu, cr2, direct); 5228 - if (r == RET_PF_EMULATE) { 5229 - emulation_type = 0; 5233 + if (r == RET_PF_EMULATE) 5230 5234 goto emulate; 5231 - } 5232 5235 } 5233 5236 5234 5237 if (r == RET_PF_INVALID) { ··· 5253 5260 return 1; 5254 5261 } 5255 5262 5256 - if (mmio_info_in_cache(vcpu, cr2, direct)) 5257 - emulation_type = 0; 5263 + /* 5264 + * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still 5265 + * optimistically try to just unprotect the page and let the processor 5266 + * re-execute the instruction that caused the page fault. Do not allow 5267 + * retrying MMIO emulation, as it's not only pointless but could also 5268 + * cause us to enter an infinite loop because the processor will keep 5269 + * faulting on the non-existent MMIO address. Retrying an instruction 5270 + * from a nested guest is also pointless and dangerous as we are only 5271 + * explicitly shadowing L1's page tables, i.e. unprotecting something 5272 + * for L1 isn't going to magically fix whatever issue cause L2 to fail. 5273 + */ 5274 + if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) 5275 + emulation_type = EMULTYPE_ALLOW_RETRY; 5258 5276 emulate: 5259 5277 /* 5260 5278 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
+9 -10
arch/x86/kvm/svm.c
··· 776 776 } 777 777 778 778 if (!svm->next_rip) { 779 - if (emulate_instruction(vcpu, EMULTYPE_SKIP) != 779 + if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) != 780 780 EMULATE_DONE) 781 781 printk(KERN_DEBUG "%s: NOP\n", __func__); 782 782 return; ··· 2715 2715 2716 2716 WARN_ON_ONCE(!enable_vmware_backdoor); 2717 2717 2718 - er = emulate_instruction(vcpu, 2718 + er = kvm_emulate_instruction(vcpu, 2719 2719 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); 2720 2720 if (er == EMULATE_USER_EXIT) 2721 2721 return 0; ··· 2819 2819 string = (io_info & SVM_IOIO_STR_MASK) != 0; 2820 2820 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 2821 2821 if (string) 2822 - return emulate_instruction(vcpu, 0) == EMULATE_DONE; 2822 + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 2823 2823 2824 2824 port = io_info >> 16; 2825 2825 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; ··· 3861 3861 static int invlpg_interception(struct vcpu_svm *svm) 3862 3862 { 3863 3863 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 3864 - return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3864 + return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3865 3865 3866 3866 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); 3867 3867 return kvm_skip_emulated_instruction(&svm->vcpu); ··· 3869 3869 3870 3870 static int emulate_on_interception(struct vcpu_svm *svm) 3871 3871 { 3872 - return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3872 + return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3873 3873 } 3874 3874 3875 3875 static int rsm_interception(struct vcpu_svm *svm) 3876 3876 { 3877 - return x86_emulate_instruction(&svm->vcpu, 0, 0, 3878 - rsm_ins_bytes, 2) == EMULATE_DONE; 3877 + return kvm_emulate_instruction_from_buffer(&svm->vcpu, 3878 + rsm_ins_bytes, 2) == EMULATE_DONE; 3879 3879 } 3880 3880 3881 3881 static int rdpmc_interception(struct vcpu_svm *svm) ··· 4700 4700 ret = avic_unaccel_trap_write(svm); 4701 4701 } else { 4702 4702 /* Handling Fault */ 4703 - ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); 4703 + ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); 4704 4704 } 4705 4705 4706 4706 return ret; ··· 6747 6747 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) 6748 6748 { 6749 6749 unsigned long vaddr, vaddr_end, next_vaddr; 6750 - unsigned long dst_vaddr, dst_vaddr_end; 6750 + unsigned long dst_vaddr; 6751 6751 struct page **src_p, **dst_p; 6752 6752 struct kvm_sev_dbg debug; 6753 6753 unsigned long n; ··· 6763 6763 size = debug.len; 6764 6764 vaddr_end = vaddr + size; 6765 6765 dst_vaddr = debug.dst_uaddr; 6766 - dst_vaddr_end = dst_vaddr + size; 6767 6766 6768 6767 for (; vaddr < vaddr_end; vaddr = next_vaddr) { 6769 6768 int len, s_off, d_off;
+31 -12
arch/x86/kvm/vmx.c
··· 6983 6983 * Cause the #SS fault with 0 error code in VM86 mode. 6984 6984 */ 6985 6985 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 6986 - if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { 6986 + if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { 6987 6987 if (vcpu->arch.halt_request) { 6988 6988 vcpu->arch.halt_request = 0; 6989 6989 return kvm_vcpu_halt(vcpu); ··· 7054 7054 7055 7055 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 7056 7056 WARN_ON_ONCE(!enable_vmware_backdoor); 7057 - er = emulate_instruction(vcpu, 7057 + er = kvm_emulate_instruction(vcpu, 7058 7058 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); 7059 7059 if (er == EMULATE_USER_EXIT) 7060 7060 return 0; ··· 7157 7157 ++vcpu->stat.io_exits; 7158 7158 7159 7159 if (string) 7160 - return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7160 + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 7161 7161 7162 7162 port = exit_qualification >> 16; 7163 7163 size = (exit_qualification & 7) + 1; ··· 7231 7231 static int handle_desc(struct kvm_vcpu *vcpu) 7232 7232 { 7233 7233 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 7234 - return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7234 + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 7235 7235 } 7236 7236 7237 7237 static int handle_cr(struct kvm_vcpu *vcpu) ··· 7480 7480 7481 7481 static int handle_invd(struct kvm_vcpu *vcpu) 7482 7482 { 7483 - return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7483 + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 7484 7484 } 7485 7485 7486 7486 static int handle_invlpg(struct kvm_vcpu *vcpu) ··· 7547 7547 return kvm_skip_emulated_instruction(vcpu); 7548 7548 } 7549 7549 } 7550 - return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7550 + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 7551 7551 } 7552 7552 7553 7553 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) ··· 7704 7704 if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 7705 7705 return kvm_skip_emulated_instruction(vcpu); 7706 7706 else 7707 - return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, 7708 - NULL, 0) == EMULATE_DONE; 7707 + return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == 7708 + EMULATE_DONE; 7709 7709 } 7710 7710 7711 7711 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); ··· 7748 7748 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 7749 7749 return 1; 7750 7750 7751 - err = emulate_instruction(vcpu, 0); 7751 + err = kvm_emulate_instruction(vcpu, 0); 7752 7752 7753 7753 if (err == EMULATE_USER_EXIT) { 7754 7754 ++vcpu->stat.mmio_exits; ··· 12537 12537 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 12538 12538 bool from_vmentry = !!exit_qual; 12539 12539 u32 dummy_exit_qual; 12540 + u32 vmcs01_cpu_exec_ctrl; 12540 12541 int r = 0; 12542 + 12543 + vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 12541 12544 12542 12545 enter_guest_mode(vcpu); 12543 12546 ··· 12575 12572 * have already been set at vmentry time and should not be reset. 12576 12573 */ 12577 12574 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); 12575 + } 12576 + 12577 + /* 12578 + * If L1 had a pending IRQ/NMI until it executed 12579 + * VMLAUNCH/VMRESUME which wasn't delivered because it was 12580 + * disallowed (e.g. interrupts disabled), L0 needs to 12581 + * evaluate if this pending event should cause an exit from L2 12582 + * to L1 or delivered directly to L2 (e.g. In case L1 don't 12583 + * intercept EXTERNAL_INTERRUPT). 12584 + * 12585 + * Usually this would be handled by L0 requesting a 12586 + * IRQ/NMI window by setting VMCS accordingly. However, 12587 + * this setting was done on VMCS01 and now VMCS02 is active 12588 + * instead. Thus, we force L0 to perform pending event 12589 + * evaluation by requesting a KVM_REQ_EVENT. 12590 + */ 12591 + if (vmcs01_cpu_exec_ctrl & 12592 + (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) { 12593 + kvm_make_request(KVM_REQ_EVENT, vcpu); 12578 12594 } 12579 12595 12580 12596 /* ··· 14009 13987 if (check_vmentry_prereqs(vcpu, vmcs12) || 14010 13988 check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 14011 13989 return -EINVAL; 14012 - 14013 - if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING) 14014 - vmx->nested.nested_run_pending = 1; 14015 13990 14016 13991 vmx->nested.dirty_vmcs12 = true; 14017 13992 ret = enter_vmx_non_root_mode(vcpu, NULL);
+23 -5
arch/x86/kvm/x86.c
··· 4987 4987 emul_type = 0; 4988 4988 } 4989 4989 4990 - er = emulate_instruction(vcpu, emul_type); 4990 + er = kvm_emulate_instruction(vcpu, emul_type); 4991 4991 if (er == EMULATE_USER_EXIT) 4992 4992 return 0; 4993 4993 if (er != EMULATE_DONE) ··· 5870 5870 gpa_t gpa = cr2; 5871 5871 kvm_pfn_t pfn; 5872 5872 5873 - if (emulation_type & EMULTYPE_NO_REEXECUTE) 5873 + if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) 5874 + return false; 5875 + 5876 + if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5874 5877 return false; 5875 5878 5876 5879 if (!vcpu->arch.mmu.direct_map) { ··· 5961 5958 */ 5962 5959 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 5963 5960 5964 - if (!(emulation_type & EMULTYPE_RETRY)) 5961 + if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) 5962 + return false; 5963 + 5964 + if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5965 5965 return false; 5966 5966 5967 5967 if (x86_page_table_writing_insn(ctxt)) ··· 6282 6276 6283 6277 return r; 6284 6278 } 6285 - EXPORT_SYMBOL_GPL(x86_emulate_instruction); 6279 + 6280 + int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 6281 + { 6282 + return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 6283 + } 6284 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 6285 + 6286 + int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 6287 + void *insn, int insn_len) 6288 + { 6289 + return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 6290 + } 6291 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 6286 6292 6287 6293 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 6288 6294 unsigned short port) ··· 7752 7734 { 7753 7735 int r; 7754 7736 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 7755 - r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 7737 + r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 7756 7738 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 7757 7739 if (r != EMULATE_DONE) 7758 7740 return 0;
+2
arch/x86/kvm/x86.h
··· 274 274 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 275 275 int page_num); 276 276 bool kvm_vector_hashing_enabled(void); 277 + int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 278 + int emulation_type, void *insn, int insn_len); 277 279 278 280 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 279 281 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
+4 -4
arch/x86/mm/pgtable.c
··· 269 269 if (pgd_val(pgd) != 0) { 270 270 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 271 271 272 - *pgdp = native_make_pgd(0); 272 + pgd_clear(pgdp); 273 273 274 274 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 275 275 pmd_free(mm, pmd); ··· 494 494 int changed = !pte_same(*ptep, entry); 495 495 496 496 if (changed && dirty) 497 - *ptep = entry; 497 + set_pte(ptep, entry); 498 498 499 499 return changed; 500 500 } ··· 509 509 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 510 510 511 511 if (changed && dirty) { 512 - *pmdp = entry; 512 + set_pmd(pmdp, entry); 513 513 /* 514 514 * We had a write-protection fault here and changed the pmd 515 515 * to to more permissive. No need to flush the TLB for that, ··· 529 529 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 530 530 531 531 if (changed && dirty) { 532 - *pudp = entry; 532 + set_pud(pudp, entry); 533 533 /* 534 534 * We had a write-protection fault here and changed the pud 535 535 * to to more permissive. No need to flush the TLB for that,
+2 -2
block/bfq-cgroup.c
··· 275 275 276 276 void bfqg_and_blkg_put(struct bfq_group *bfqg) 277 277 { 278 - bfqg_put(bfqg); 279 - 280 278 blkg_put(bfqg_to_blkg(bfqg)); 279 + 280 + bfqg_put(bfqg); 281 281 } 282 282 283 283 /* @stats = 0 */
+2 -1
block/bio.c
··· 2015 2015 { 2016 2016 if (unlikely(bio->bi_blkg)) 2017 2017 return -EBUSY; 2018 - blkg_get(blkg); 2018 + if (!blkg_try_get(blkg)) 2019 + return -ENODEV; 2019 2020 bio->bi_blkg = blkg; 2020 2021 return 0; 2021 2022 }
+48 -57
block/blk-cgroup.c
··· 310 310 } 311 311 } 312 312 313 - static void blkg_pd_offline(struct blkcg_gq *blkg) 314 - { 315 - int i; 316 - 317 - lockdep_assert_held(blkg->q->queue_lock); 318 - lockdep_assert_held(&blkg->blkcg->lock); 319 - 320 - for (i = 0; i < BLKCG_MAX_POLS; i++) { 321 - struct blkcg_policy *pol = blkcg_policy[i]; 322 - 323 - if (blkg->pd[i] && !blkg->pd[i]->offline && 324 - pol->pd_offline_fn) { 325 - pol->pd_offline_fn(blkg->pd[i]); 326 - blkg->pd[i]->offline = true; 327 - } 328 - } 329 - } 330 - 331 313 static void blkg_destroy(struct blkcg_gq *blkg) 332 314 { 333 315 struct blkcg *blkcg = blkg->blkcg; 334 316 struct blkcg_gq *parent = blkg->parent; 317 + int i; 335 318 336 319 lockdep_assert_held(blkg->q->queue_lock); 337 320 lockdep_assert_held(&blkcg->lock); ··· 322 339 /* Something wrong if we are trying to remove same group twice */ 323 340 WARN_ON_ONCE(list_empty(&blkg->q_node)); 324 341 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 342 + 343 + for (i = 0; i < BLKCG_MAX_POLS; i++) { 344 + struct blkcg_policy *pol = blkcg_policy[i]; 345 + 346 + if (blkg->pd[i] && pol->pd_offline_fn) 347 + pol->pd_offline_fn(blkg->pd[i]); 348 + } 325 349 326 350 if (parent) { 327 351 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); ··· 372 382 struct blkcg *blkcg = blkg->blkcg; 373 383 374 384 spin_lock(&blkcg->lock); 375 - blkg_pd_offline(blkg); 376 385 blkg_destroy(blkg); 377 386 spin_unlock(&blkcg->lock); 378 387 } ··· 1042 1053 { } /* terminate */ 1043 1054 }; 1044 1055 1056 + /* 1057 + * blkcg destruction is a three-stage process. 1058 + * 1059 + * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1060 + * which offlines writeback. Here we tie the next stage of blkg destruction 1061 + * to the completion of writeback associated with the blkcg. This lets us 1062 + * avoid punting potentially large amounts of outstanding writeback to root 1063 + * while maintaining any ongoing policies. The next stage is triggered when 1064 + * the nr_cgwbs count goes to zero. 1065 + * 1066 + * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1067 + * and handles the destruction of blkgs. Here the css reference held by 1068 + * the blkg is put back eventually allowing blkcg_css_free() to be called. 1069 + * This work may occur in cgwb_release_workfn() on the cgwb_release 1070 + * workqueue. Any submitted ios that fail to get the blkg ref will be 1071 + * punted to the root_blkg. 1072 + * 1073 + * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1074 + * This finally frees the blkcg. 1075 + */ 1076 + 1045 1077 /** 1046 1078 * blkcg_css_offline - cgroup css_offline callback 1047 1079 * @css: css of interest 1048 1080 * 1049 - * This function is called when @css is about to go away and responsible 1050 - * for offlining all blkgs pd and killing all wbs associated with @css. 1051 - * blkgs pd offline should be done while holding both q and blkcg locks. 1052 - * As blkcg lock is nested inside q lock, this function performs reverse 1053 - * double lock dancing. 1054 - * 1055 - * This is the blkcg counterpart of ioc_release_fn(). 1081 + * This function is called when @css is about to go away. Here the cgwbs are 1082 + * offlined first and only once writeback associated with the blkcg has 1083 + * finished do we start step 2 (see above). 1056 1084 */ 1057 1085 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1058 1086 { 1059 1087 struct blkcg *blkcg = css_to_blkcg(css); 1060 - struct blkcg_gq *blkg; 1061 1088 1062 - spin_lock_irq(&blkcg->lock); 1063 - 1064 - hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 1065 - struct request_queue *q = blkg->q; 1066 - 1067 - if (spin_trylock(q->queue_lock)) { 1068 - blkg_pd_offline(blkg); 1069 - spin_unlock(q->queue_lock); 1070 - } else { 1071 - spin_unlock_irq(&blkcg->lock); 1072 - cpu_relax(); 1073 - spin_lock_irq(&blkcg->lock); 1074 - } 1075 - } 1076 - 1077 - spin_unlock_irq(&blkcg->lock); 1078 - 1089 + /* this prevents anyone from attaching or migrating to this blkcg */ 1079 1090 wb_blkcg_offline(blkcg); 1091 + 1092 + /* put the base cgwb reference allowing step 2 to be triggered */ 1093 + blkcg_cgwb_put(blkcg); 1080 1094 } 1081 1095 1082 1096 /** 1083 - * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg 1097 + * blkcg_destroy_blkgs - responsible for shooting down blkgs 1084 1098 * @blkcg: blkcg of interest 1085 1099 * 1086 - * This function is called when blkcg css is about to free and responsible for 1087 - * destroying all blkgs associated with @blkcg. 1088 - * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1100 + * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1089 1101 * is nested inside q lock, this function performs reverse double lock dancing. 1102 + * Destroying the blkgs releases the reference held on the blkcg's css allowing 1103 + * blkcg_css_free to eventually be called. 1104 + * 1105 + * This is the blkcg counterpart of ioc_release_fn(). 1090 1106 */ 1091 - static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) 1107 + void blkcg_destroy_blkgs(struct blkcg *blkcg) 1092 1108 { 1093 1109 spin_lock_irq(&blkcg->lock); 1110 + 1094 1111 while (!hlist_empty(&blkcg->blkg_list)) { 1095 1112 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1096 - struct blkcg_gq, 1097 - blkcg_node); 1113 + struct blkcg_gq, blkcg_node); 1098 1114 struct request_queue *q = blkg->q; 1099 1115 1100 1116 if (spin_trylock(q->queue_lock)) { ··· 1111 1117 spin_lock_irq(&blkcg->lock); 1112 1118 } 1113 1119 } 1120 + 1114 1121 spin_unlock_irq(&blkcg->lock); 1115 1122 } 1116 1123 ··· 1119 1124 { 1120 1125 struct blkcg *blkcg = css_to_blkcg(css); 1121 1126 int i; 1122 - 1123 - blkcg_destroy_all_blkgs(blkcg); 1124 1127 1125 1128 mutex_lock(&blkcg_pol_mutex); 1126 1129 ··· 1182 1189 INIT_HLIST_HEAD(&blkcg->blkg_list); 1183 1190 #ifdef CONFIG_CGROUP_WRITEBACK 1184 1191 INIT_LIST_HEAD(&blkcg->cgwb_list); 1192 + refcount_set(&blkcg->cgwb_refcnt, 1); 1185 1193 #endif 1186 1194 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1187 1195 ··· 1474 1480 1475 1481 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1476 1482 if (blkg->pd[pol->plid]) { 1477 - if (!blkg->pd[pol->plid]->offline && 1478 - pol->pd_offline_fn) { 1483 + if (pol->pd_offline_fn) 1479 1484 pol->pd_offline_fn(blkg->pd[pol->plid]); 1480 - blkg->pd[pol->plid]->offline = true; 1481 - } 1482 1485 pol->pd_free_fn(blkg->pd[pol->plid]); 1483 1486 blkg->pd[pol->plid] = NULL; 1484 1487 }
+4 -1
block/blk-core.c
··· 2163 2163 { 2164 2164 const int op = bio_op(bio); 2165 2165 2166 - if (part->policy && (op_is_write(op) && !op_is_flush(op))) { 2166 + if (part->policy && op_is_write(op)) { 2167 2167 char b[BDEVNAME_SIZE]; 2168 + 2169 + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 2170 + return false; 2168 2171 2169 2172 WARN_ONCE(1, 2170 2173 "generic_make_request: Trying to write "
+3 -2
block/blk-throttle.c
··· 2129 2129 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) 2130 2130 { 2131 2131 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2132 - if (bio->bi_css) 2133 - bio_associate_blkg(bio, tg_to_blkg(tg)); 2132 + /* fallback to root_blkg if we fail to get a blkg ref */ 2133 + if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) 2134 + bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); 2134 2135 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 2135 2136 #endif 2136 2137 }
+1 -1
drivers/acpi/acpi_lpss.c
··· 879 879 #define LPSS_GPIODEF0_DMA_LLP BIT(13) 880 880 881 881 static DEFINE_MUTEX(lpss_iosf_mutex); 882 - static bool lpss_iosf_d3_entered; 882 + static bool lpss_iosf_d3_entered = true; 883 883 884 884 static void lpss_iosf_enter_d3_state(void) 885 885 {
+7 -6
drivers/acpi/bus.c
··· 35 35 #include <linux/delay.h> 36 36 #ifdef CONFIG_X86 37 37 #include <asm/mpspec.h> 38 + #include <linux/dmi.h> 38 39 #endif 39 40 #include <linux/acpi_iort.h> 40 41 #include <linux/pci.h> 41 42 #include <acpi/apei.h> 42 - #include <linux/dmi.h> 43 43 #include <linux/suspend.h> 44 44 45 45 #include "internal.h" ··· 80 80 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), 81 81 }, 82 82 }, 83 - {} 84 - }; 85 - #else 86 - static const struct dmi_system_id dsdt_dmi_table[] __initconst = { 87 83 {} 88 84 }; 89 85 #endif ··· 1029 1033 1030 1034 acpi_permanent_mmap = true; 1031 1035 1036 + #ifdef CONFIG_X86 1032 1037 /* 1033 1038 * If the machine falls into the DMI check table, 1034 - * DSDT will be copied to memory 1039 + * DSDT will be copied to memory. 1040 + * Note that calling dmi_check_system() here on other architectures 1041 + * would not be OK because only x86 initializes dmi early enough. 1042 + * Thankfully only x86 systems need such quirks for now. 1035 1043 */ 1036 1044 dmi_check_system(dsdt_dmi_table); 1045 + #endif 1037 1046 1038 1047 status = acpi_reallocate_root_table(); 1039 1048 if (ACPI_FAILURE(status)) {
drivers/ata/libata-core.c
+9 -11
drivers/base/memory.c
··· 417 417 int nid; 418 418 419 419 /* 420 - * The block contains more than one zone can not be offlined. 421 - * This can happen e.g. for ZONE_DMA and ZONE_DMA32 422 - */ 423 - if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) 424 - return sprintf(buf, "none\n"); 425 - 426 - start_pfn = valid_start_pfn; 427 - nr_pages = valid_end_pfn - start_pfn; 428 - 429 - /* 430 420 * Check the existing zone. Make sure that we do that only on the 431 421 * online nodes otherwise the page_zone is not reliable 432 422 */ 433 423 if (mem->state == MEM_ONLINE) { 424 + /* 425 + * The block contains more than one zone can not be offlined. 426 + * This can happen e.g. for ZONE_DMA and ZONE_DMA32 427 + */ 428 + if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, 429 + &valid_start_pfn, &valid_end_pfn)) 430 + return sprintf(buf, "none\n"); 431 + start_pfn = valid_start_pfn; 434 432 strcat(buf, page_zone(pfn_to_page(start_pfn))->name); 435 433 goto out; 436 434 } 437 435 438 - nid = pfn_to_nid(start_pfn); 436 + nid = mem->nid; 439 437 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); 440 438 strcat(buf, default_zone->name); 441 439
+3
drivers/block/nbd.c
··· 1239 1239 case NBD_SET_SOCK: 1240 1240 return nbd_add_socket(nbd, arg, false); 1241 1241 case NBD_SET_BLKSIZE: 1242 + if (!arg || !is_power_of_2(arg) || arg < 512 || 1243 + arg > PAGE_SIZE) 1244 + return -EINVAL; 1242 1245 nbd_size_set(nbd, arg, 1243 1246 div_s64(config->bytesize, arg)); 1244 1247 return 0;
+179 -58
drivers/block/rbd.c
··· 4207 4207 4208 4208 count += sprintf(&buf[count], "%s" 4209 4209 "pool_id %llu\npool_name %s\n" 4210 + "pool_ns %s\n" 4210 4211 "image_id %s\nimage_name %s\n" 4211 4212 "snap_id %llu\nsnap_name %s\n" 4212 4213 "overlap %llu\n", 4213 4214 !count ? "" : "\n", /* first? */ 4214 4215 spec->pool_id, spec->pool_name, 4216 + spec->pool_ns ?: "", 4215 4217 spec->image_id, spec->image_name ?: "(unknown)", 4216 4218 spec->snap_id, spec->snap_name, 4217 4219 rbd_dev->parent_overlap); ··· 4586 4584 &rbd_dev->header.features); 4587 4585 } 4588 4586 4587 + struct parent_image_info { 4588 + u64 pool_id; 4589 + const char *pool_ns; 4590 + const char *image_id; 4591 + u64 snap_id; 4592 + 4593 + bool has_overlap; 4594 + u64 overlap; 4595 + }; 4596 + 4597 + /* 4598 + * The caller is responsible for @pii. 4599 + */ 4600 + static int decode_parent_image_spec(void **p, void *end, 4601 + struct parent_image_info *pii) 4602 + { 4603 + u8 struct_v; 4604 + u32 struct_len; 4605 + int ret; 4606 + 4607 + ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", 4608 + &struct_v, &struct_len); 4609 + if (ret) 4610 + return ret; 4611 + 4612 + ceph_decode_64_safe(p, end, pii->pool_id, e_inval); 4613 + pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 4614 + if (IS_ERR(pii->pool_ns)) { 4615 + ret = PTR_ERR(pii->pool_ns); 4616 + pii->pool_ns = NULL; 4617 + return ret; 4618 + } 4619 + pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 4620 + if (IS_ERR(pii->image_id)) { 4621 + ret = PTR_ERR(pii->image_id); 4622 + pii->image_id = NULL; 4623 + return ret; 4624 + } 4625 + ceph_decode_64_safe(p, end, pii->snap_id, e_inval); 4626 + return 0; 4627 + 4628 + e_inval: 4629 + return -EINVAL; 4630 + } 4631 + 4632 + static int __get_parent_info(struct rbd_device *rbd_dev, 4633 + struct page *req_page, 4634 + struct page *reply_page, 4635 + struct parent_image_info *pii) 4636 + { 4637 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4638 + size_t reply_len = PAGE_SIZE; 4639 + void *p, *end; 4640 + int ret; 4641 + 4642 + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 4643 + "rbd", "parent_get", CEPH_OSD_FLAG_READ, 4644 + req_page, sizeof(u64), reply_page, &reply_len); 4645 + if (ret) 4646 + return ret == -EOPNOTSUPP ? 1 : ret; 4647 + 4648 + p = page_address(reply_page); 4649 + end = p + reply_len; 4650 + ret = decode_parent_image_spec(&p, end, pii); 4651 + if (ret) 4652 + return ret; 4653 + 4654 + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 4655 + "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, 4656 + req_page, sizeof(u64), reply_page, &reply_len); 4657 + if (ret) 4658 + return ret; 4659 + 4660 + p = page_address(reply_page); 4661 + end = p + reply_len; 4662 + ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); 4663 + if (pii->has_overlap) 4664 + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 4665 + 4666 + return 0; 4667 + 4668 + e_inval: 4669 + return -EINVAL; 4670 + } 4671 + 4672 + /* 4673 + * The caller is responsible for @pii. 4674 + */ 4675 + static int __get_parent_info_legacy(struct rbd_device *rbd_dev, 4676 + struct page *req_page, 4677 + struct page *reply_page, 4678 + struct parent_image_info *pii) 4679 + { 4680 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4681 + size_t reply_len = PAGE_SIZE; 4682 + void *p, *end; 4683 + int ret; 4684 + 4685 + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 4686 + "rbd", "get_parent", CEPH_OSD_FLAG_READ, 4687 + req_page, sizeof(u64), reply_page, &reply_len); 4688 + if (ret) 4689 + return ret; 4690 + 4691 + p = page_address(reply_page); 4692 + end = p + reply_len; 4693 + ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); 4694 + pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 4695 + if (IS_ERR(pii->image_id)) { 4696 + ret = PTR_ERR(pii->image_id); 4697 + pii->image_id = NULL; 4698 + return ret; 4699 + } 4700 + ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); 4701 + pii->has_overlap = true; 4702 + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 4703 + 4704 + return 0; 4705 + 4706 + e_inval: 4707 + return -EINVAL; 4708 + } 4709 + 4710 + static int get_parent_info(struct rbd_device *rbd_dev, 4711 + struct parent_image_info *pii) 4712 + { 4713 + struct page *req_page, *reply_page; 4714 + void *p; 4715 + int ret; 4716 + 4717 + req_page = alloc_page(GFP_KERNEL); 4718 + if (!req_page) 4719 + return -ENOMEM; 4720 + 4721 + reply_page = alloc_page(GFP_KERNEL); 4722 + if (!reply_page) { 4723 + __free_page(req_page); 4724 + return -ENOMEM; 4725 + } 4726 + 4727 + p = page_address(req_page); 4728 + ceph_encode_64(&p, rbd_dev->spec->snap_id); 4729 + ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); 4730 + if (ret > 0) 4731 + ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, 4732 + pii); 4733 + 4734 + __free_page(req_page); 4735 + __free_page(reply_page); 4736 + return ret; 4737 + } 4738 + 4589 4739 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 4590 4740 { 4591 4741 struct rbd_spec *parent_spec; 4592 - size_t size; 4593 - void *reply_buf = NULL; 4594 - __le64 snapid; 4595 - void *p; 4596 - void *end; 4597 - u64 pool_id; 4598 - char *image_id; 4599 - u64 snap_id; 4600 - u64 overlap; 4742 + struct parent_image_info pii = { 0 }; 4601 4743 int ret; 4602 4744 4603 4745 parent_spec = rbd_spec_alloc(); 4604 4746 if (!parent_spec) 4605 4747 return -ENOMEM; 4606 4748 4607 - size = sizeof (__le64) + /* pool_id */ 4608 - sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ 4609 - sizeof (__le64) + /* snap_id */ 4610 - sizeof (__le64); /* overlap */ 4611 - reply_buf = kmalloc(size, GFP_KERNEL); 4612 - if (!reply_buf) { 4613 - ret = -ENOMEM; 4614 - goto out_err; 4615 - } 4616 - 4617 - snapid = cpu_to_le64(rbd_dev->spec->snap_id); 4618 - ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4619 - &rbd_dev->header_oloc, "get_parent", 4620 - &snapid, sizeof(snapid), reply_buf, size); 4621 - dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4622 - if (ret < 0) 4749 + ret = get_parent_info(rbd_dev, &pii); 4750 + if (ret) 4623 4751 goto out_err; 4624 4752 4625 - p = reply_buf; 4626 - end = reply_buf + ret; 4627 - ret = -ERANGE; 4628 - ceph_decode_64_safe(&p, end, pool_id, out_err); 4629 - if (pool_id == CEPH_NOPOOL) { 4753 + dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", 4754 + __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, 4755 + pii.has_overlap, pii.overlap); 4756 + 4757 + if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { 4630 4758 /* 4631 4759 * Either the parent never existed, or we have 4632 4760 * record of it but the image got flattened so it no ··· 4765 4633 * overlap to 0. The effect of this is that all new 4766 4634 * requests will be treated as if the image had no 4767 4635 * parent. 4636 + * 4637 + * If !pii.has_overlap, the parent image spec is not 4638 + * applicable. It's there to avoid duplication in each 4639 + * snapshot record. 4768 4640 */ 4769 4641 if (rbd_dev->parent_overlap) { 4770 4642 rbd_dev->parent_overlap = 0; ··· 4783 4647 /* The ceph file layout needs to fit pool id in 32 bits */ 4784 4648 4785 4649 ret = -EIO; 4786 - if (pool_id > (u64)U32_MAX) { 4650 + if (pii.pool_id > (u64)U32_MAX) { 4787 4651 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 4788 - (unsigned long long)pool_id, U32_MAX); 4652 + (unsigned long long)pii.pool_id, U32_MAX); 4789 4653 goto out_err; 4790 4654 } 4791 - 4792 - image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 4793 - if (IS_ERR(image_id)) { 4794 - ret = PTR_ERR(image_id); 4795 - goto out_err; 4796 - } 4797 - ceph_decode_64_safe(&p, end, snap_id, out_err); 4798 - ceph_decode_64_safe(&p, end, overlap, out_err); 4799 4655 4800 4656 /* 4801 4657 * The parent won't change (except when the clone is ··· 4795 4667 * record the parent spec we have not already done so. 4796 4668 */ 4797 4669 if (!rbd_dev->parent_spec) { 4798 - parent_spec->pool_id = pool_id; 4799 - parent_spec->image_id = image_id; 4800 - parent_spec->snap_id = snap_id; 4801 - 4802 - /* TODO: support cloning across namespaces */ 4803 - if (rbd_dev->spec->pool_ns) { 4804 - parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns, 4805 - GFP_KERNEL); 4806 - if (!parent_spec->pool_ns) { 4807 - ret = -ENOMEM; 4808 - goto out_err; 4809 - } 4670 + parent_spec->pool_id = pii.pool_id; 4671 + if (pii.pool_ns && *pii.pool_ns) { 4672 + parent_spec->pool_ns = pii.pool_ns; 4673 + pii.pool_ns = NULL; 4810 4674 } 4675 + parent_spec->image_id = pii.image_id; 4676 + pii.image_id = NULL; 4677 + parent_spec->snap_id = pii.snap_id; 4811 4678 4812 4679 rbd_dev->parent_spec = parent_spec; 4813 4680 parent_spec = NULL; /* rbd_dev now owns this */ 4814 - } else { 4815 - kfree(image_id); 4816 4681 } 4817 4682 4818 4683 /* 4819 4684 * We always update the parent overlap. If it's zero we issue 4820 4685 * a warning, as we will proceed as if there was no parent. 4821 4686 */ 4822 - if (!overlap) { 4687 + if (!pii.overlap) { 4823 4688 if (parent_spec) { 4824 4689 /* refresh, careful to warn just once */ 4825 4690 if (rbd_dev->parent_overlap) ··· 4823 4702 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 4824 4703 } 4825 4704 } 4826 - rbd_dev->parent_overlap = overlap; 4705 + rbd_dev->parent_overlap = pii.overlap; 4827 4706 4828 4707 out: 4829 4708 ret = 0; 4830 4709 out_err: 4831 - kfree(reply_buf); 4710 + kfree(pii.pool_ns); 4711 + kfree(pii.image_id); 4832 4712 rbd_spec_put(parent_spec); 4833 - 4834 4713 return ret; 4835 4714 } 4836 4715
+2 -2
drivers/char/Kconfig
··· 566 566 that CPU manufacturer (perhaps with the insistence or mandate 567 567 of a Nation State's intelligence or law enforcement agencies) 568 568 has not installed a hidden back door to compromise the CPU's 569 - random number generation facilities. 570 - 569 + random number generation facilities. This can also be configured 570 + at boot with "random.trust_cpu=on/off".
+8 -3
drivers/char/random.c
··· 779 779 780 780 static void invalidate_batched_entropy(void); 781 781 782 + static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); 783 + static int __init parse_trust_cpu(char *arg) 784 + { 785 + return kstrtobool(arg, &trust_cpu); 786 + } 787 + early_param("random.trust_cpu", parse_trust_cpu); 788 + 782 789 static void crng_initialize(struct crng_state *crng) 783 790 { 784 791 int i; ··· 806 799 } 807 800 crng->state[i] ^= rv; 808 801 } 809 - #ifdef CONFIG_RANDOM_TRUST_CPU 810 - if (arch_init) { 802 + if (trust_cpu && arch_init) { 811 803 crng_init = 2; 812 804 pr_notice("random: crng done (trusting CPU's manufacturer)\n"); 813 805 } 814 - #endif 815 806 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 816 807 } 817 808
+2 -1
drivers/dax/device.c
··· 392 392 { 393 393 struct file *filp = vmf->vma->vm_file; 394 394 unsigned long fault_size; 395 - int rc, id; 395 + vm_fault_t rc = VM_FAULT_SIGBUS; 396 + int id; 396 397 pfn_t pfn; 397 398 struct dev_dax *dev_dax = filp->private_data; 398 399
+7 -1
drivers/firmware/arm_scmi/perf.c
··· 166 166 le32_to_cpu(attr->sustained_freq_khz); 167 167 dom_info->sustained_perf_level = 168 168 le32_to_cpu(attr->sustained_perf_level); 169 - dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / 169 + if (!dom_info->sustained_freq_khz || 170 + !dom_info->sustained_perf_level) 171 + /* CPUFreq converts to kHz, hence default 1000 */ 172 + dom_info->mult_factor = 1000; 173 + else 174 + dom_info->mult_factor = 175 + (dom_info->sustained_freq_khz * 1000) / 170 176 dom_info->sustained_perf_level; 171 177 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 172 178 }
+20 -4
drivers/gpio/gpio-adp5588.c
··· 41 41 uint8_t int_en[3]; 42 42 uint8_t irq_mask[3]; 43 43 uint8_t irq_stat[3]; 44 + uint8_t int_input_en[3]; 45 + uint8_t int_lvl_cached[3]; 44 46 }; 45 47 46 48 static int adp5588_gpio_read(struct i2c_client *client, u8 reg) ··· 175 173 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); 176 174 int i; 177 175 178 - for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) 176 + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { 177 + if (dev->int_input_en[i]) { 178 + mutex_lock(&dev->lock); 179 + dev->dir[i] &= ~dev->int_input_en[i]; 180 + dev->int_input_en[i] = 0; 181 + adp5588_gpio_write(dev->client, GPIO_DIR1 + i, 182 + dev->dir[i]); 183 + mutex_unlock(&dev->lock); 184 + } 185 + 186 + if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { 187 + dev->int_lvl_cached[i] = dev->int_lvl[i]; 188 + adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, 189 + dev->int_lvl[i]); 190 + } 191 + 179 192 if (dev->int_en[i] ^ dev->irq_mask[i]) { 180 193 dev->int_en[i] = dev->irq_mask[i]; 181 194 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, 182 195 dev->int_en[i]); 183 196 } 197 + } 184 198 185 199 mutex_unlock(&dev->irq_lock); 186 200 } ··· 239 221 else 240 222 return -EINVAL; 241 223 242 - adp5588_gpio_direction_input(&dev->gpio_chip, gpio); 243 - adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank, 244 - dev->int_lvl[bank]); 224 + dev->int_input_en[bank] |= bit; 245 225 246 226 return 0; 247 227 }
+1
drivers/gpio/gpio-dwapb.c
··· 728 728 out_unregister: 729 729 dwapb_gpio_unregister(gpio); 730 730 dwapb_irq_teardown(gpio); 731 + clk_disable_unprepare(gpio->clk); 731 732 732 733 return err; 733 734 }
+50 -36
drivers/gpio/gpiolib-acpi.c
··· 25 25 26 26 struct acpi_gpio_event { 27 27 struct list_head node; 28 - struct list_head initial_sync_list; 29 28 acpi_handle handle; 30 29 unsigned int pin; 31 30 unsigned int irq; ··· 48 49 struct mutex conn_lock; 49 50 struct gpio_chip *chip; 50 51 struct list_head events; 52 + struct list_head deferred_req_irqs_list_entry; 51 53 }; 52 54 53 - static LIST_HEAD(acpi_gpio_initial_sync_list); 54 - static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); 55 + /* 56 + * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init 57 + * (so builtin drivers) we register the ACPI GpioInt event handlers from a 58 + * late_initcall_sync handler, so that other builtin drivers can register their 59 + * OpRegions before the event handlers can run. This list contains gpiochips 60 + * for which the acpi_gpiochip_request_interrupts() has been deferred. 61 + */ 62 + static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); 63 + static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); 64 + static bool acpi_gpio_deferred_req_irqs_done; 55 65 56 66 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) 57 67 { ··· 95 87 return ERR_PTR(-EPROBE_DEFER); 96 88 97 89 return gpiochip_get_desc(chip, pin); 98 - } 99 - 100 - static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) 101 - { 102 - mutex_lock(&acpi_gpio_initial_sync_list_lock); 103 - list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); 104 - mutex_unlock(&acpi_gpio_initial_sync_list_lock); 105 - } 106 - 107 - static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) 108 - { 109 - mutex_lock(&acpi_gpio_initial_sync_list_lock); 110 - if (!list_empty(&event->initial_sync_list)) 111 - list_del_init(&event->initial_sync_list); 112 - mutex_unlock(&acpi_gpio_initial_sync_list_lock); 113 90 } 114 91 115 92 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) ··· 179 186 180 187 gpiod_direction_input(desc); 181 188 182 - value = gpiod_get_value(desc); 189 + value = gpiod_get_value_cansleep(desc); 183 190 184 191 ret = gpiochip_lock_as_irq(chip, pin); 185 192 if (ret) { ··· 222 229 event->irq = irq; 223 230 event->pin = pin; 224 231 event->desc = desc; 225 - INIT_LIST_HEAD(&event->initial_sync_list); 226 232 227 233 ret = request_threaded_irq(event->irq, NULL, handler, irqflags, 228 234 "ACPI:Event", event); ··· 243 251 * may refer to OperationRegions from other (builtin) drivers which 244 252 * may be probed after us. 245 253 */ 246 - if (handler == acpi_gpio_irq_handler && 247 - (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || 248 - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) 249 - acpi_gpio_add_to_initial_sync_list(event); 254 + if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || 255 + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) 256 + handler(event->irq, event); 250 257 251 258 return AE_OK; 252 259 ··· 274 283 struct acpi_gpio_chip *acpi_gpio; 275 284 acpi_handle handle; 276 285 acpi_status status; 286 + bool defer; 277 287 278 288 if (!chip->parent || !chip->to_irq) 279 289 return; ··· 285 293 286 294 status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); 287 295 if (ACPI_FAILURE(status)) 296 + return; 297 + 298 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); 299 + defer = !acpi_gpio_deferred_req_irqs_done; 300 + if (defer) 301 + list_add(&acpi_gpio->deferred_req_irqs_list_entry, 302 + &acpi_gpio_deferred_req_irqs_list); 303 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 304 + 305 + if (defer) 288 306 return; 289 307 290 308 acpi_walk_resources(handle, "_AEI", ··· 327 325 if (ACPI_FAILURE(status)) 328 326 return; 329 327 328 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); 329 + if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) 330 + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); 331 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 332 + 330 333 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 331 334 struct gpio_desc *desc; 332 - 333 - acpi_gpio_del_from_initial_sync_list(event); 334 335 335 336 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) 336 337 disable_irq_wake(event->irq); ··· 1057 1052 1058 1053 acpi_gpio->chip = chip; 1059 1054 INIT_LIST_HEAD(&acpi_gpio->events); 1055 + INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); 1060 1056 1061 1057 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); 1062 1058 if (ACPI_FAILURE(status)) { ··· 1204 1198 return con_id == NULL; 1205 1199 } 1206 1200 1207 - /* Sync the initial state of handlers after all builtin drivers have probed */ 1208 - static int acpi_gpio_initial_sync(void) 1201 + /* Run deferred acpi_gpiochip_request_interrupts() */ 1202 + static int acpi_gpio_handle_deferred_request_interrupts(void) 1209 1203 { 1210 - struct acpi_gpio_event *event, *ep; 1204 + struct acpi_gpio_chip *acpi_gpio, *tmp; 1211 1205 1212 - mutex_lock(&acpi_gpio_initial_sync_list_lock); 1213 - list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, 1214 - initial_sync_list) { 1215 - acpi_evaluate_object(event->handle, NULL, NULL, NULL); 1216 - list_del_init(&event->initial_sync_list); 1206 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); 1207 + list_for_each_entry_safe(acpi_gpio, tmp, 1208 + &acpi_gpio_deferred_req_irqs_list, 1209 + deferred_req_irqs_list_entry) { 1210 + acpi_handle handle; 1211 + 1212 + handle = ACPI_HANDLE(acpi_gpio->chip->parent); 1213 + acpi_walk_resources(handle, "_AEI", 1214 + acpi_gpiochip_request_interrupt, acpi_gpio); 1215 + 1216 + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); 1217 1217 } 1218 - mutex_unlock(&acpi_gpio_initial_sync_list_lock); 1218 + 1219 + acpi_gpio_deferred_req_irqs_done = true; 1220 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 1219 1221 1220 1222 return 0; 1221 1223 } 1222 1224 /* We must use _sync so that this runs after the first deferred_probe run */ 1223 - late_initcall_sync(acpi_gpio_initial_sync); 1225 + late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
+1
drivers/gpio/gpiolib-of.c
··· 31 31 struct of_phandle_args *gpiospec = data; 32 32 33 33 return chip->gpiodev->dev.of_node == gpiospec->np && 34 + chip->of_xlate && 34 35 chip->of_xlate(chip, gpiospec, NULL) >= 0; 35 36 } 36 37
+26 -7
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 170 170 unsigned int tiling_mode = 0; 171 171 unsigned int stride = 0; 172 172 173 - switch (info->drm_format_mod << 10) { 174 - case PLANE_CTL_TILED_LINEAR: 173 + switch (info->drm_format_mod) { 174 + case DRM_FORMAT_MOD_LINEAR: 175 175 tiling_mode = I915_TILING_NONE; 176 176 break; 177 - case PLANE_CTL_TILED_X: 177 + case I915_FORMAT_MOD_X_TILED: 178 178 tiling_mode = I915_TILING_X; 179 179 stride = info->stride; 180 180 break; 181 - case PLANE_CTL_TILED_Y: 181 + case I915_FORMAT_MOD_Y_TILED: 182 + case I915_FORMAT_MOD_Yf_TILED: 182 183 tiling_mode = I915_TILING_Y; 183 184 stride = info->stride; 184 185 break; 185 186 default: 186 - gvt_dbg_core("not supported tiling mode\n"); 187 + gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", 188 + info->drm_format_mod); 187 189 } 188 190 obj->tiling_and_stride = tiling_mode | stride; 189 191 } else { ··· 224 222 info->height = p.height; 225 223 info->stride = p.stride; 226 224 info->drm_format = p.drm_format; 227 - info->drm_format_mod = p.tiled; 225 + 226 + switch (p.tiled) { 227 + case PLANE_CTL_TILED_LINEAR: 228 + info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; 229 + break; 230 + case PLANE_CTL_TILED_X: 231 + info->drm_format_mod = I915_FORMAT_MOD_X_TILED; 232 + break; 233 + case PLANE_CTL_TILED_Y: 234 + info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; 235 + break; 236 + case PLANE_CTL_TILED_YF: 237 + info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; 238 + break; 239 + default: 240 + gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 241 + } 242 + 228 243 info->size = (((p.stride * p.height * p.bpp) / 8) + 229 - (PAGE_SIZE - 1)) >> PAGE_SHIFT; 244 + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 230 245 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 231 246 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 232 247 if (ret)
+2 -3
drivers/gpu/drm/i915/gvt/fb_decoder.c
··· 220 220 if (IS_SKYLAKE(dev_priv) 221 221 || IS_KABYLAKE(dev_priv) 222 222 || IS_BROXTON(dev_priv)) { 223 - plane->tiled = (val & PLANE_CTL_TILED_MASK) >> 224 - _PLANE_CTL_TILED_SHIFT; 223 + plane->tiled = val & PLANE_CTL_TILED_MASK; 225 224 fmt = skl_format_to_drm( 226 225 val & PLANE_CTL_FORMAT_MASK, 227 226 val & PLANE_CTL_ORDER_RGBX, ··· 259 260 return -EINVAL; 260 261 } 261 262 262 - plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), 263 + plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, 263 264 (IS_SKYLAKE(dev_priv) 264 265 || IS_KABYLAKE(dev_priv) 265 266 || IS_BROXTON(dev_priv)) ?
+1 -1
drivers/gpu/drm/i915/gvt/fb_decoder.h
··· 101 101 /* color space conversion and gamma correction are not included */ 102 102 struct intel_vgpu_primary_plane_format { 103 103 u8 enabled; /* plane is enabled */ 104 - u8 tiled; /* X-tiled */ 104 + u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ 105 105 u8 bpp; /* bits per pixel */ 106 106 u32 hw_format; /* format field in the PRI_CTL register */ 107 107 u32 drm_format; /* format in DRM definition */
+27 -6
drivers/gpu/drm/i915/gvt/handlers.c
··· 1296 1296 return 0; 1297 1297 } 1298 1298 1299 + static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, 1300 + unsigned int offset, void *p_data, unsigned int bytes) 1301 + { 1302 + write_vreg(vgpu, offset, p_data, bytes); 1303 + 1304 + if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) 1305 + vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; 1306 + else 1307 + vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; 1308 + 1309 + return 0; 1310 + } 1311 + 1299 1312 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, 1300 1313 unsigned int offset, void *p_data, unsigned int bytes) 1301 1314 { ··· 1538 1525 u32 v = *(u32 *)p_data; 1539 1526 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; 1540 1527 1541 - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; 1542 - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; 1543 - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; 1528 + switch (offset) { 1529 + case _PHY_CTL_FAMILY_EDP: 1530 + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; 1531 + break; 1532 + case _PHY_CTL_FAMILY_DDI: 1533 + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; 1534 + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; 1535 + break; 1536 + } 1544 1537 1545 1538 vgpu_vreg(vgpu, offset) = v; 1546 1539 ··· 2831 2812 MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, 2832 2813 skl_power_well_ctl_write); 2833 2814 2815 + MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); 2816 + 2834 2817 MMIO_D(_MMIO(0xa210), D_SKL_PLUS); 2835 2818 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2836 2819 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); ··· 3008 2987 NULL, gen9_trtte_write); 3009 2988 MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); 3010 2989 3011 - MMIO_D(_MMIO(0x45008), D_SKL_PLUS); 3012 - 3013 2990 MMIO_D(_MMIO(0x46430), D_SKL_PLUS); 3014 2991 3015 2992 MMIO_D(_MMIO(0x46520), D_SKL_PLUS); ··· 3044 3025 MMIO_D(_MMIO(0x44500), D_SKL_PLUS); 3045 3026 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 3046 3027 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3047 - NULL, NULL); 3028 + NULL, NULL); 3029 + MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3030 + NULL, NULL); 3048 3031 3049 3032 MMIO_D(_MMIO(0x4ab8), D_KBL); 3050 3033 MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
-2
drivers/gpu/drm/i915/gvt/mmio_context.c
··· 562 562 * performace for batch mmio read/write, so we need 563 563 * handle forcewake mannually. 564 564 */ 565 - intel_runtime_pm_get(dev_priv); 566 565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 567 566 switch_mmio(pre, next, ring_id); 568 567 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 569 - intel_runtime_pm_put(dev_priv); 570 568 } 571 569 572 570 /**
+30 -7
drivers/gpu/drm/i915/gvt/sched_policy.c
··· 47 47 return false; 48 48 } 49 49 50 + /* We give 2 seconds higher prio for vGPU during start */ 51 + #define GVT_SCHED_VGPU_PRI_TIME 2 52 + 50 53 struct vgpu_sched_data { 51 54 struct list_head lru_list; 52 55 struct intel_vgpu *vgpu; 53 56 bool active; 54 - 57 + bool pri_sched; 58 + ktime_t pri_time; 55 59 ktime_t sched_in_time; 56 60 ktime_t sched_time; 57 61 ktime_t left_ts; ··· 187 183 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 188 184 continue; 189 185 186 + if (vgpu_data->pri_sched) { 187 + if (ktime_before(ktime_get(), vgpu_data->pri_time)) { 188 + vgpu = vgpu_data->vgpu; 189 + break; 190 + } else 191 + vgpu_data->pri_sched = false; 192 + } 193 + 190 194 /* Return the vGPU only if it has time slice left */ 191 195 if (vgpu_data->left_ts > 0) { 192 196 vgpu = vgpu_data->vgpu; ··· 214 202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 215 203 struct vgpu_sched_data *vgpu_data; 216 204 struct intel_vgpu *vgpu = NULL; 205 + 217 206 /* no active vgpu or has already had a target */ 218 207 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) 219 208 goto out; ··· 222 209 vgpu = find_busy_vgpu(sched_data); 223 210 if (vgpu) { 224 211 scheduler->next_vgpu = vgpu; 225 - 226 - /* Move the last used vGPU to the tail of lru_list */ 227 212 vgpu_data = vgpu->sched_data; 228 - list_del_init(&vgpu_data->lru_list); 229 - list_add_tail(&vgpu_data->lru_list, 230 - &sched_data->lru_runq_head); 213 + if (!vgpu_data->pri_sched) { 214 + /* Move the last used vGPU to the tail of lru_list */ 215 + list_del_init(&vgpu_data->lru_list); 216 + list_add_tail(&vgpu_data->lru_list, 217 + &sched_data->lru_runq_head); 218 + } 231 219 } else { 232 220 scheduler->next_vgpu = gvt->idle_vgpu; 233 221 } ··· 342 328 { 343 329 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 344 330 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 331 + ktime_t now; 345 332 346 333 if (!list_empty(&vgpu_data->lru_list)) 347 334 return; 348 335 349 - list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); 336 + now = ktime_get(); 337 + vgpu_data->pri_time = ktime_add(now, 338 + ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); 339 + vgpu_data->pri_sched = true; 340 + 341 + list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); 350 342 351 343 if (!hrtimer_active(&sched_data->timer)) 352 344 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), ··· 446 426 &vgpu->gvt->scheduler; 447 427 int ring_id; 448 428 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 429 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 449 430 450 431 if (!vgpu_data->active) 451 432 return; ··· 465 444 scheduler->current_vgpu = NULL; 466 445 } 467 446 447 + intel_runtime_pm_get(dev_priv); 468 448 spin_lock_bh(&scheduler->mmio_context_lock); 469 449 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 470 450 if (scheduler->engine_owner[ring_id] == vgpu) { ··· 474 452 } 475 453 } 476 454 spin_unlock_bh(&scheduler->mmio_context_lock); 455 + intel_runtime_pm_put(dev_priv); 477 456 mutex_unlock(&vgpu->gvt->sched_lock); 478 457 }
+2 -2
drivers/gpu/drm/i915/i915_reg.h
··· 10422 10422 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10423 10423 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) 10424 10424 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10425 - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10425 + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ 10426 10426 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) 10427 10427 #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) 10428 10428 #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) ··· 10437 10437 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ 10438 10438 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) 10439 10439 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10440 - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ 10440 + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ 10441 10441 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) 10442 10442 #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) 10443 10443 #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+9 -8
drivers/gpu/drm/i915/intel_ddi.c
··· 2708 2708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2709 2709 intel_dp_stop_link_train(intel_dp); 2710 2710 2711 - intel_ddi_enable_pipe_clock(crtc_state); 2711 + if (!is_mst) 2712 + intel_ddi_enable_pipe_clock(crtc_state); 2712 2713 } 2713 2714 2714 2715 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, ··· 2811 2810 bool is_mst = intel_crtc_has_type(old_crtc_state, 2812 2811 INTEL_OUTPUT_DP_MST); 2813 2812 2814 - intel_ddi_disable_pipe_clock(old_crtc_state); 2815 - 2816 - /* 2817 - * Power down sink before disabling the port, otherwise we end 2818 - * up getting interrupts from the sink on detecting link loss. 2819 - */ 2820 - if (!is_mst) 2813 + if (!is_mst) { 2814 + intel_ddi_disable_pipe_clock(old_crtc_state); 2815 + /* 2816 + * Power down sink before disabling the port, otherwise we end 2817 + * up getting interrupts from the sink on detecting link loss. 2818 + */ 2821 2819 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2820 + } 2822 2821 2823 2822 intel_disable_ddi_buf(encoder); 2824 2823
+19 -14
drivers/gpu/drm/i915/intel_dp.c
··· 4160 4160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4161 4161 } 4162 4162 4163 - /* 4164 - * If display is now connected check links status, 4165 - * there has been known issues of link loss triggering 4166 - * long pulse. 4167 - * 4168 - * Some sinks (eg. ASUS PB287Q) seem to perform some 4169 - * weird HPD ping pong during modesets. So we can apparently 4170 - * end up with HPD going low during a modeset, and then 4171 - * going back up soon after. And once that happens we must 4172 - * retrain the link to get a picture. That's in case no 4173 - * userspace component reacted to intermittent HPD dip. 4174 - */ 4175 4163 int intel_dp_retrain_link(struct intel_encoder *encoder, 4176 4164 struct drm_modeset_acquire_ctx *ctx) 4177 4165 { ··· 4649 4661 } 4650 4662 4651 4663 static int 4652 - intel_dp_long_pulse(struct intel_connector *connector) 4664 + intel_dp_long_pulse(struct intel_connector *connector, 4665 + struct drm_modeset_acquire_ctx *ctx) 4653 4666 { 4654 4667 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 4655 4668 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); ··· 4709 4720 */ 4710 4721 status = connector_status_disconnected; 4711 4722 goto out; 4723 + } else { 4724 + /* 4725 + * If display is now connected check links status, 4726 + * there has been known issues of link loss triggering 4727 + * long pulse. 4728 + * 4729 + * Some sinks (eg. ASUS PB287Q) seem to perform some 4730 + * weird HPD ping pong during modesets. So we can apparently 4731 + * end up with HPD going low during a modeset, and then 4732 + * going back up soon after. And once that happens we must 4733 + * retrain the link to get a picture. That's in case no 4734 + * userspace component reacted to intermittent HPD dip. 4735 + */ 4736 + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4737 + 4738 + intel_dp_retrain_link(encoder, ctx); 4712 4739 } 4713 4740 4714 4741 /* ··· 4786 4781 return ret; 4787 4782 } 4788 4783 4789 - status = intel_dp_long_pulse(intel_dp->attached_connector); 4784 + status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); 4790 4785 } 4791 4786 4792 4787 intel_dp->detect_done = false;
+4
drivers/gpu/drm/i915/intel_dp_mst.c
··· 166 166 struct intel_connector *connector = 167 167 to_intel_connector(old_conn_state->connector); 168 168 169 + intel_ddi_disable_pipe_clock(old_crtc_state); 170 + 169 171 /* this can fail */ 170 172 drm_dp_check_act_status(&intel_dp->mst_mgr); 171 173 /* and this can also fail */ ··· 254 252 I915_WRITE(DP_TP_STATUS(port), temp); 255 253 256 254 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); 255 + 256 + intel_ddi_enable_pipe_clock(pipe_config); 257 257 } 258 258 259 259 static void intel_mst_enable_dp(struct intel_encoder *encoder,
+50 -19
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 1123 1123 int ret; 1124 1124 1125 1125 if (dpcd >= 0x12) { 1126 - ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd); 1126 + /* Even if we're enabling MST, start with disabling the 1127 + * branching unit to clear any sink-side MST topology state 1128 + * that wasn't set by us 1129 + */ 1130 + ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0); 1127 1131 if (ret < 0) 1128 1132 return ret; 1129 1133 1130 - dpcd &= ~DP_MST_EN; 1131 - if (state) 1132 - dpcd |= DP_MST_EN; 1133 - 1134 - ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd); 1135 - if (ret < 0) 1136 - return ret; 1134 + if (state) { 1135 + /* Now, start initializing */ 1136 + ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 1137 + DP_MST_EN); 1138 + if (ret < 0) 1139 + return ret; 1140 + } 1137 1141 } 1138 1142 1139 1143 return nvif_mthd(disp, 0, &args, sizeof(args)); ··· 1146 1142 int 1147 1143 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) 1148 1144 { 1149 - int ret, state = 0; 1145 + struct drm_dp_aux *aux; 1146 + int ret; 1147 + bool old_state, new_state; 1148 + u8 mstm_ctrl; 1150 1149 1151 1150 if (!mstm) 1152 1151 return 0; 1153 1152 1154 - if (dpcd[0] >= 0x12) { 1155 - ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); 1153 + mutex_lock(&mstm->mgr.lock); 1154 + 1155 + old_state = mstm->mgr.mst_state; 1156 + new_state = old_state; 1157 + aux = mstm->mgr.aux; 1158 + 1159 + if (old_state) { 1160 + /* Just check that the MST hub is still as we expect it */ 1161 + ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl); 1162 + if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) { 1163 + DRM_DEBUG_KMS("Hub gone, disabling MST topology\n"); 1164 + new_state = false; 1165 + } 1166 + } else if (dpcd[0] >= 0x12) { 1167 + ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]); 1156 1168 if (ret < 0) 1157 - return ret; 1169 + goto probe_error; 1158 1170 1159 1171 if (!(dpcd[1] & DP_MST_CAP)) 1160 1172 dpcd[0] = 0x11; 1161 1173 else 1162 - state = allow; 1174 + new_state = allow; 1163 1175 } 1164 1176 1165 - ret = nv50_mstm_enable(mstm, dpcd[0], state); 1166 - if (ret) 1167 - return ret; 1177 + if (new_state == old_state) { 1178 + mutex_unlock(&mstm->mgr.lock); 1179 + return new_state; 1180 + } 1168 1181 1169 - ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state); 1182 + ret = nv50_mstm_enable(mstm, dpcd[0], new_state); 1183 + if (ret) 1184 + goto probe_error; 1185 + 1186 + mutex_unlock(&mstm->mgr.lock); 1187 + 1188 + ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state); 1170 1189 if (ret) 1171 1190 return nv50_mstm_enable(mstm, dpcd[0], 0); 1172 1191 1173 - return mstm->mgr.mst_state; 1192 + return new_state; 1193 + 1194 + probe_error: 1195 + mutex_unlock(&mstm->mgr.lock); 1196 + return ret; 1174 1197 } 1175 1198 1176 1199 static void ··· 2105 2074 static const struct drm_mode_config_funcs 2106 2075 nv50_disp_func = { 2107 2076 .fb_create = nouveau_user_framebuffer_create, 2108 - .output_poll_changed = drm_fb_helper_output_poll_changed, 2077 + .output_poll_changed = nouveau_fbcon_output_poll_changed, 2109 2078 .atomic_check = nv50_disp_atomic_check, 2110 2079 .atomic_commit = nv50_disp_atomic_commit, 2111 2080 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+60 -50
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 409 409 nouveau_connector_ddc_detect(struct drm_connector *connector) 410 410 { 411 411 struct drm_device *dev = connector->dev; 412 - struct nouveau_connector *nv_connector = nouveau_connector(connector); 413 - struct nouveau_drm *drm = nouveau_drm(dev); 414 - struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); 415 - struct nouveau_encoder *nv_encoder = NULL; 412 + struct nouveau_encoder *nv_encoder = NULL, *found = NULL; 416 413 struct drm_encoder *encoder; 417 - int i, panel = -ENODEV; 418 - 419 - /* eDP panels need powering on by us (if the VBIOS doesn't default it 420 - * to on) before doing any AUX channel transactions. LVDS panel power 421 - * is handled by the SOR itself, and not required for LVDS DDC. 422 - */ 423 - if (nv_connector->type == DCB_CONNECTOR_eDP) { 424 - panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); 425 - if (panel == 0) { 426 - nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); 427 - msleep(300); 428 - } 429 - } 414 + int i, ret; 415 + bool switcheroo_ddc = false; 430 416 431 417 drm_connector_for_each_possible_encoder(connector, encoder, i) { 432 418 nv_encoder = nouveau_encoder(encoder); 433 419 434 - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 435 - int ret = nouveau_dp_detect(nv_encoder); 420 + switch (nv_encoder->dcb->type) { 421 + case DCB_OUTPUT_DP: 422 + ret = nouveau_dp_detect(nv_encoder); 436 423 if (ret == NOUVEAU_DP_MST) 437 424 return NULL; 438 - if (ret == NOUVEAU_DP_SST) 425 + else if (ret == NOUVEAU_DP_SST) 426 + found = nv_encoder; 427 + 428 + break; 429 + case DCB_OUTPUT_LVDS: 430 + switcheroo_ddc = !!(vga_switcheroo_handler_flags() & 431 + VGA_SWITCHEROO_CAN_SWITCH_DDC); 432 + /* fall-through */ 433 + default: 434 + if (!nv_encoder->i2c) 439 435 break; 440 - } else 441 - if ((vga_switcheroo_handler_flags() & 442 - VGA_SWITCHEROO_CAN_SWITCH_DDC) && 443 - nv_encoder->dcb->type == DCB_OUTPUT_LVDS && 444 - nv_encoder->i2c) { 445 - int ret; 446 - vga_switcheroo_lock_ddc(dev->pdev); 447 - ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); 448 - vga_switcheroo_unlock_ddc(dev->pdev); 449 - if (ret) 450 - break; 451 - } else 452 - if (nv_encoder->i2c) { 436 + 437 + if (switcheroo_ddc) 438 + vga_switcheroo_lock_ddc(dev->pdev); 453 439 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) 454 - break; 440 + found = nv_encoder; 441 + if (switcheroo_ddc) 442 + vga_switcheroo_unlock_ddc(dev->pdev); 443 + 444 + break; 455 445 } 446 + if (found) 447 + break; 456 448 } 457 449 458 - /* eDP panel not detected, restore panel power GPIO to previous 459 - * state to avoid confusing the SOR for other output types. 460 - */ 461 - if (!nv_encoder && panel == 0) 462 - nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); 463 - 464 - return nv_encoder; 450 + return found; 465 451 } 466 452 467 453 static struct nouveau_encoder * ··· 541 555 nv_connector->edid = NULL; 542 556 } 543 557 544 - /* Outputs are only polled while runtime active, so acquiring a 545 - * runtime PM ref here is unnecessary (and would deadlock upon 546 - * runtime suspend because it waits for polling to finish). 558 + /* Outputs are only polled while runtime active, so resuming the 559 + * device here is unnecessary (and would deadlock upon runtime suspend 560 + * because it waits for polling to finish). We do however, want to 561 + * prevent the autosuspend timer from elapsing during this operation 562 + * if possible. 547 563 */ 548 - if (!drm_kms_helper_is_poll_worker()) { 549 - ret = pm_runtime_get_sync(connector->dev->dev); 564 + if (drm_kms_helper_is_poll_worker()) { 565 + pm_runtime_get_noresume(dev->dev); 566 + } else { 567 + ret = pm_runtime_get_sync(dev->dev); 550 568 if (ret < 0 && ret != -EACCES) 551 569 return conn_status; 552 570 } ··· 628 638 629 639 out: 630 640 631 - if (!drm_kms_helper_is_poll_worker()) { 632 - pm_runtime_mark_last_busy(connector->dev->dev); 633 - pm_runtime_put_autosuspend(connector->dev->dev); 634 - } 641 + pm_runtime_mark_last_busy(dev->dev); 642 + pm_runtime_put_autosuspend(dev->dev); 635 643 636 644 return conn_status; 637 645 } ··· 1093 1105 const struct nvif_notify_conn_rep_v0 *rep = notify->data; 1094 1106 const char *name = connector->name; 1095 1107 struct nouveau_encoder *nv_encoder; 1108 + int ret; 1109 + 1110 + ret = pm_runtime_get(drm->dev->dev); 1111 + if (ret == 0) { 1112 + /* We can't block here if there's a pending PM request 1113 + * running, as we'll deadlock nouveau_display_fini() when it 1114 + * calls nvif_put() on our nvif_notify struct. So, simply 1115 + * defer the hotplug event until the device finishes resuming 1116 + */ 1117 + NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", 1118 + name); 1119 + schedule_work(&drm->hpd_work); 1120 + 1121 + pm_runtime_put_noidle(drm->dev->dev); 1122 + return NVIF_NOTIFY_KEEP; 1123 + } else if (ret != 1 && ret != -EACCES) { 1124 + NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", 1125 + name, ret); 1126 + return NVIF_NOTIFY_DROP; 1127 + } 1096 1128 1097 1129 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { 1098 1130 NV_DEBUG(drm, "service %s\n", name); ··· 1130 1122 drm_helper_hpd_irq_event(connector->dev); 1131 1123 } 1132 1124 1125 + pm_runtime_mark_last_busy(drm->dev->dev); 1126 + pm_runtime_put_autosuspend(drm->dev->dev); 1133 1127 return NVIF_NOTIFY_KEEP; 1134 1128 } 1135 1129
+32 -12
drivers/gpu/drm/nouveau/nouveau_display.c
··· 293 293 294 294 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 295 295 .fb_create = nouveau_user_framebuffer_create, 296 - .output_poll_changed = drm_fb_helper_output_poll_changed, 296 + .output_poll_changed = nouveau_fbcon_output_poll_changed, 297 297 }; 298 298 299 299 ··· 355 355 pm_runtime_get_sync(drm->dev->dev); 356 356 357 357 drm_helper_hpd_irq_event(drm->dev); 358 - /* enable polling for external displays */ 359 - drm_kms_helper_poll_enable(drm->dev); 360 358 361 359 pm_runtime_mark_last_busy(drm->dev->dev); 362 360 pm_runtime_put_sync(drm->dev->dev); ··· 377 379 { 378 380 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); 379 381 struct acpi_bus_event *info = data; 382 + int ret; 380 383 381 384 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { 382 385 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { 383 - /* 384 - * This may be the only indication we receive of a 385 - * connector hotplug on a runtime suspended GPU, 386 - * schedule hpd_work to check. 387 - */ 388 - schedule_work(&drm->hpd_work); 386 + ret = pm_runtime_get(drm->dev->dev); 387 + if (ret == 1 || ret == -EACCES) { 388 + /* If the GPU is already awake, or in a state 389 + * where we can't wake it up, it can handle 390 + * it's own hotplug events. 391 + */ 392 + pm_runtime_put_autosuspend(drm->dev->dev); 393 + } else if (ret == 0) { 394 + /* This may be the only indication we receive 395 + * of a connector hotplug on a runtime 396 + * suspended GPU, schedule hpd_work to check. 397 + */ 398 + NV_DEBUG(drm, "ACPI requested connector reprobe\n"); 399 + schedule_work(&drm->hpd_work); 400 + pm_runtime_put_noidle(drm->dev->dev); 401 + } else { 402 + NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", 403 + ret); 404 + } 389 405 390 406 /* acpi-video should not generate keypresses for this */ 391 407 return NOTIFY_BAD; ··· 423 411 if (ret) 424 412 return ret; 425 413 414 + /* enable connector detection and polling for connectors without HPD 415 + * support 416 + */ 417 + drm_kms_helper_poll_enable(dev); 418 + 426 419 /* enable hotplug interrupts */ 427 420 drm_connector_list_iter_begin(dev, &conn_iter); 428 421 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { ··· 442 425 } 443 426 444 427 void 445 - nouveau_display_fini(struct drm_device *dev, bool suspend) 428 + nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) 446 429 { 447 430 struct nouveau_display *disp = nouveau_display(dev); 448 431 struct nouveau_drm *drm = nouveau_drm(dev); ··· 466 449 nvif_notify_put(&conn->hpd); 467 450 } 468 451 drm_connector_list_iter_end(&conn_iter); 452 + 453 + if (!runtime) 454 + cancel_work_sync(&drm->hpd_work); 469 455 470 456 drm_kms_helper_poll_disable(dev); 471 457 disp->fini(dev); ··· 638 618 } 639 619 } 640 620 641 - nouveau_display_fini(dev, true); 621 + nouveau_display_fini(dev, true, runtime); 642 622 return 0; 643 623 } 644 624 645 - nouveau_display_fini(dev, true); 625 + nouveau_display_fini(dev, true, runtime); 646 626 647 627 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 648 628 struct nouveau_framebuffer *nouveau_fb;
+1 -1
drivers/gpu/drm/nouveau/nouveau_display.h
··· 62 62 int nouveau_display_create(struct drm_device *dev); 63 63 void nouveau_display_destroy(struct drm_device *dev); 64 64 int nouveau_display_init(struct drm_device *dev); 65 - void nouveau_display_fini(struct drm_device *dev, bool suspend); 65 + void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime); 66 66 int nouveau_display_suspend(struct drm_device *dev, bool runtime); 67 67 void nouveau_display_resume(struct drm_device *dev, bool runtime); 68 68 int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
+9 -12
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 230 230 mutex_unlock(&drm->master.lock); 231 231 } 232 232 if (ret) { 233 - NV_ERROR(drm, "Client allocation failed: %d\n", ret); 233 + NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret); 234 234 goto done; 235 235 } 236 236 ··· 240 240 }, sizeof(struct nv_device_v0), 241 241 &cli->device); 242 242 if (ret) { 243 - NV_ERROR(drm, "Device allocation failed: %d\n", ret); 243 + NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret); 244 244 goto done; 245 245 } 246 246 247 247 ret = nvif_mclass(&cli->device.object, mmus); 248 248 if (ret < 0) { 249 - NV_ERROR(drm, "No supported MMU class\n"); 249 + NV_PRINTK(err, cli, "No supported MMU class\n"); 250 250 goto done; 251 251 } 252 252 253 253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); 254 254 if (ret) { 255 - NV_ERROR(drm, "MMU allocation failed: %d\n", ret); 255 + NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret); 256 256 goto done; 257 257 } 258 258 259 259 ret = nvif_mclass(&cli->mmu.object, vmms); 260 260 if (ret < 0) { 261 - NV_ERROR(drm, "No supported VMM class\n"); 261 + NV_PRINTK(err, cli, "No supported VMM class\n"); 262 262 goto done; 263 263 } 264 264 265 265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); 266 266 if (ret) { 267 - NV_ERROR(drm, "VMM allocation failed: %d\n", ret); 267 + NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret); 268 268 goto done; 269 269 } 270 270 271 271 ret = nvif_mclass(&cli->mmu.object, mems); 272 272 if (ret < 0) { 273 - NV_ERROR(drm, "No supported MEM class\n"); 273 + NV_PRINTK(err, cli, "No supported MEM class\n"); 274 274 goto done; 275 275 } 276 276 ··· 592 592 pm_runtime_allow(dev->dev); 593 593 pm_runtime_mark_last_busy(dev->dev); 594 594 pm_runtime_put(dev->dev); 595 - } else { 596 - /* enable polling for external displays */ 597 - drm_kms_helper_poll_enable(dev); 598 595 } 596 + 599 597 return 0; 600 598 601 599 fail_dispinit: ··· 627 629 nouveau_debugfs_fini(drm); 628 630 629 631 if (dev->mode_config.num_crtc) 630 - nouveau_display_fini(dev, false); 632 + nouveau_display_fini(dev, false, false); 631 633 nouveau_display_destroy(dev); 632 634 633 635 nouveau_bios_takedown(dev); ··· 833 835 return -EBUSY; 834 836 } 835 837 836 - drm_kms_helper_poll_disable(drm_dev); 837 838 nouveau_switcheroo_optimus_dsm(); 838 839 ret = nouveau_do_suspend(drm_dev, true); 839 840 pci_save_state(pdev);
+57
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 466 466 console_unlock(); 467 467 468 468 if (state == FBINFO_STATE_RUNNING) { 469 + nouveau_fbcon_hotplug_resume(drm->fbcon); 469 470 pm_runtime_mark_last_busy(drm->dev->dev); 470 471 pm_runtime_put_sync(drm->dev->dev); 471 472 } ··· 488 487 schedule_work(&drm->fbcon_work); 489 488 } 490 489 490 + void 491 + nouveau_fbcon_output_poll_changed(struct drm_device *dev) 492 + { 493 + struct nouveau_drm *drm = nouveau_drm(dev); 494 + struct nouveau_fbdev *fbcon = drm->fbcon; 495 + int ret; 496 + 497 + if (!fbcon) 498 + return; 499 + 500 + mutex_lock(&fbcon->hotplug_lock); 501 + 502 + ret = pm_runtime_get(dev->dev); 503 + if (ret == 1 || ret == -EACCES) { 504 + drm_fb_helper_hotplug_event(&fbcon->helper); 505 + 506 + pm_runtime_mark_last_busy(dev->dev); 507 + pm_runtime_put_autosuspend(dev->dev); 508 + } else if (ret == 0) { 509 + /* If the GPU was already in the process of suspending before 510 + * this event happened, then we can't block here as we'll 511 + * deadlock the runtime pmops since they wait for us to 512 + * finish. So, just defer this event for when we runtime 513 + * resume again. It will be handled by fbcon_work. 514 + */ 515 + NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); 516 + fbcon->hotplug_waiting = true; 517 + pm_runtime_put_noidle(drm->dev->dev); 518 + } else { 519 + DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", 520 + ret); 521 + } 522 + 523 + mutex_unlock(&fbcon->hotplug_lock); 524 + } 525 + 526 + void 527 + nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) 528 + { 529 + struct nouveau_drm *drm; 530 + 531 + if (!fbcon) 532 + return; 533 + drm = nouveau_drm(fbcon->helper.dev); 534 + 535 + mutex_lock(&fbcon->hotplug_lock); 536 + if (fbcon->hotplug_waiting) { 537 + fbcon->hotplug_waiting = false; 538 + 539 + NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); 540 + drm_fb_helper_hotplug_event(&fbcon->helper); 541 + } 542 + mutex_unlock(&fbcon->hotplug_lock); 543 + } 544 + 491 545 int 492 546 nouveau_fbcon_init(struct drm_device *dev) 493 547 { ··· 561 505 562 506 drm->fbcon = fbcon; 563 507 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 508 + mutex_init(&fbcon->hotplug_lock); 564 509 565 510 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 566 511
+5
drivers/gpu/drm/nouveau/nouveau_fbcon.h
··· 41 41 struct nvif_object gdi; 42 42 struct nvif_object blit; 43 43 struct nvif_object twod; 44 + 45 + struct mutex hotplug_lock; 46 + bool hotplug_waiting; 44 47 }; 45 48 46 49 void nouveau_fbcon_restore(void); ··· 71 68 void nouveau_fbcon_accel_save_disable(struct drm_device *dev); 72 69 void nouveau_fbcon_accel_restore(struct drm_device *dev); 73 70 71 + void nouveau_fbcon_output_poll_changed(struct drm_device *dev); 72 + void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon); 74 73 extern int nouveau_nofbaccel; 75 74 76 75 #endif /* __NV50_FBCON_H__ */
-2
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 46 46 pr_err("VGA switcheroo: switched nouveau on\n"); 47 47 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 48 48 nouveau_pmops_resume(&pdev->dev); 49 - drm_kms_helper_poll_enable(dev); 50 49 dev->switch_power_state = DRM_SWITCH_POWER_ON; 51 50 } else { 52 51 pr_err("VGA switcheroo: switched nouveau off\n"); 53 52 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 54 - drm_kms_helper_poll_disable(dev); 55 53 nouveau_switcheroo_optimus_dsm(); 56 54 nouveau_pmops_suspend(&pdev->dev); 57 55 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+14
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
··· 275 275 struct nvkm_outp *outp, *outt, *pair; 276 276 struct nvkm_conn *conn; 277 277 struct nvkm_head *head; 278 + struct nvkm_ior *ior; 278 279 struct nvbios_connE connE; 279 280 struct dcb_output dcbE; 280 281 u8 hpd = 0, ver, hdr; ··· 398 397 ret = disp->func->oneinit(disp); 399 398 if (ret) 400 399 return ret; 400 + } 401 + 402 + /* Enforce identity-mapped SOR assignment for panels, which have 403 + * certain bits (ie. backlight controls) wired to a specific SOR. 404 + */ 405 + list_for_each_entry(outp, &disp->outp, head) { 406 + if (outp->conn->info.type == DCB_CONNECTOR_LVDS || 407 + outp->conn->info.type == DCB_CONNECTOR_eDP) { 408 + ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1); 409 + if (!WARN_ON(!ior)) 410 + ior->identity = true; 411 + outp->identity = true; 412 + } 401 413 } 402 414 403 415 i = 0;
+46 -8
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
··· 28 28 29 29 #include <subdev/bios.h> 30 30 #include <subdev/bios/init.h> 31 + #include <subdev/gpio.h> 31 32 #include <subdev/i2c.h> 32 33 33 34 #include <nvif/event.h> ··· 413 412 } 414 413 415 414 static void 416 - nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) 415 + nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) 417 416 { 418 417 struct nvkm_dp *dp = nvkm_dp(outp); 419 - 420 - /* Prevent link from being retrained if sink sends an IRQ. */ 421 - atomic_set(&dp->lt.done, 0); 422 - ior->dp.nr = 0; 423 418 424 419 /* Execute DisableLT script from DP Info Table. */ 425 420 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], ··· 423 426 init.or = ior->id; 424 427 init.link = ior->arm.link; 425 428 ); 429 + } 430 + 431 + static void 432 + nvkm_dp_release(struct nvkm_outp *outp) 433 + { 434 + struct nvkm_dp *dp = nvkm_dp(outp); 435 + 436 + /* Prevent link from being retrained if sink sends an IRQ. */ 437 + atomic_set(&dp->lt.done, 0); 438 + dp->outp.ior->dp.nr = 0; 426 439 } 427 440 428 441 static int ··· 498 491 return ret; 499 492 } 500 493 501 - static void 494 + static bool 502 495 nvkm_dp_enable(struct nvkm_dp *dp, bool enable) 503 496 { 504 497 struct nvkm_i2c_aux *aux = dp->aux; ··· 512 505 513 506 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, 514 507 sizeof(dp->dpcd))) 515 - return; 508 + return true; 516 509 } 517 510 518 511 if (dp->present) { ··· 522 515 } 523 516 524 517 atomic_set(&dp->lt.done, 0); 518 + return false; 525 519 } 526 520 527 521 static int ··· 563 555 static void 564 556 nvkm_dp_init(struct nvkm_outp *outp) 565 557 { 558 + struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio; 566 559 struct nvkm_dp *dp = nvkm_dp(outp); 560 + 567 561 nvkm_notify_put(&dp->outp.conn->hpd); 568 - nvkm_dp_enable(dp, true); 562 + 563 + /* eDP panels need powering on by us (if the VBIOS doesn't default it 564 + * to on) before doing any AUX channel transactions. LVDS panel power 565 + * is handled by the SOR itself, and not required for LVDS DDC. 566 + */ 567 + if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) { 568 + int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); 569 + if (power == 0) 570 + nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); 571 + 572 + /* We delay here unconditionally, even if already powered, 573 + * because some laptop panels having a significant resume 574 + * delay before the panel begins responding. 575 + * 576 + * This is likely a bit of a hack, but no better idea for 577 + * handling this at the moment. 578 + */ 579 + msleep(300); 580 + 581 + /* If the eDP panel can't be detected, we need to restore 582 + * the panel power GPIO to avoid breaking another output. 583 + */ 584 + if (!nvkm_dp_enable(dp, true) && power == 0) 585 + nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0); 586 + } else { 587 + nvkm_dp_enable(dp, true); 588 + } 589 + 569 590 nvkm_notify_get(&dp->hpd); 570 591 } 571 592 ··· 613 576 .fini = nvkm_dp_fini, 614 577 .acquire = nvkm_dp_acquire, 615 578 .release = nvkm_dp_release, 579 + .disable = nvkm_dp_disable, 616 580 }; 617 581 618 582 static int
+1
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
··· 16 16 char name[8]; 17 17 18 18 struct list_head head; 19 + bool identity; 19 20 20 21 struct nvkm_ior_state { 21 22 struct nvkm_outp *outp;
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
··· 501 501 nv50_disp_super_ied_off(head, ior, 2); 502 502 503 503 /* If we're shutting down the OR's only active head, execute 504 - * the output path's release function. 504 + * the output path's disable function. 505 505 */ 506 506 if (ior->arm.head == (1 << head->id)) { 507 - if ((outp = ior->arm.outp) && outp->func->release) 508 - outp->func->release(outp, ior); 507 + if ((outp = ior->arm.outp) && outp->func->disable) 508 + outp->func->disable(outp, ior); 509 509 } 510 510 } 511 511
+14 -4
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
··· 93 93 if (ior) { 94 94 outp->acquired &= ~user; 95 95 if (!outp->acquired) { 96 + if (outp->func->release && outp->ior) 97 + outp->func->release(outp); 96 98 outp->ior->asy.outp = NULL; 97 99 outp->ior = NULL; 98 100 } ··· 129 127 if (proto == UNKNOWN) 130 128 return -ENOSYS; 131 129 130 + /* Deal with panels requiring identity-mapped SOR assignment. */ 131 + if (outp->identity) { 132 + ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1); 133 + if (WARN_ON(!ior)) 134 + return -ENOSPC; 135 + return nvkm_outp_acquire_ior(outp, user, ior); 136 + } 137 + 132 138 /* First preference is to reuse the OR that is currently armed 133 139 * on HW, if any, in order to prevent unnecessary switching. 134 140 */ 135 141 list_for_each_entry(ior, &outp->disp->ior, head) { 136 - if (!ior->asy.outp && ior->arm.outp == outp) 142 + if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) 137 143 return nvkm_outp_acquire_ior(outp, user, ior); 138 144 } 139 145 140 146 /* Failing that, a completely unused OR is the next best thing. */ 141 147 list_for_each_entry(ior, &outp->disp->ior, head) { 142 - if (!ior->asy.outp && ior->type == type && !ior->arm.outp && 148 + if (!ior->identity && 149 + !ior->asy.outp && ior->type == type && !ior->arm.outp && 143 150 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 144 151 return nvkm_outp_acquire_ior(outp, user, ior); 145 152 } ··· 157 146 * but will be released during the next modeset. 158 147 */ 159 148 list_for_each_entry(ior, &outp->disp->ior, head) { 160 - if (!ior->asy.outp && ior->type == type && 149 + if (!ior->identity && !ior->asy.outp && ior->type == type && 161 150 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 162 151 return nvkm_outp_acquire_ior(outp, user, ior); 163 152 } ··· 256 245 outp->index = index; 257 246 outp->info = *dcbE; 258 247 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); 259 - outp->or = ffs(outp->info.or) - 1; 260 248 261 249 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " 262 250 "edid %x bus %d head %x",
+3 -2
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
··· 13 13 struct dcb_output info; 14 14 15 15 struct nvkm_i2c_bus *i2c; 16 - int or; 17 16 18 17 struct list_head head; 19 18 struct nvkm_conn *conn; 19 + bool identity; 20 20 21 21 /* Assembly state. */ 22 22 #define NVKM_OUTP_PRIV 1 ··· 41 41 void (*init)(struct nvkm_outp *); 42 42 void (*fini)(struct nvkm_outp *); 43 43 int (*acquire)(struct nvkm_outp *); 44 - void (*release)(struct nvkm_outp *, struct nvkm_ior *); 44 + void (*release)(struct nvkm_outp *); 45 + void (*disable)(struct nvkm_outp *, struct nvkm_ior *); 45 46 }; 46 47 47 48 #define OUTP_MSG(o,l,f,a...) do { \
+2 -1
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
··· 158 158 } 159 159 160 160 /* load and execute some other ucode image (bios therm?) */ 161 - return pmu_load(init, 0x01, post, NULL, NULL); 161 + pmu_load(init, 0x01, post, NULL, NULL); 162 + return 0; 162 163 } 163 164 164 165 static const struct nvkm_devinit_func
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
··· 1423 1423 void 1424 1424 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 1425 1425 { 1426 - if (vmm->func->part && inst) { 1426 + if (inst && vmm->func->part) { 1427 1427 mutex_lock(&vmm->mutex); 1428 1428 vmm->func->part(vmm, inst); 1429 1429 mutex_unlock(&vmm->mutex);
+8 -1
drivers/hid/hid-apple.c
··· 335 335 struct hid_field *field, struct hid_usage *usage, 336 336 unsigned long **bit, int *max) 337 337 { 338 - if (usage->hid == (HID_UP_CUSTOM | 0x0003)) { 338 + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || 339 + usage->hid == (HID_UP_MSVENDOR | 0x0003)) { 339 340 /* The fn key on Apple USB keyboards */ 340 341 set_bit(EV_REP, hi->input->evbit); 341 342 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); ··· 472 471 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 473 472 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 474 473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), 474 + .driver_data = APPLE_HAS_FN }, 475 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), 476 + .driver_data = APPLE_HAS_FN }, 477 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), 478 + .driver_data = APPLE_HAS_FN }, 479 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), 475 480 .driver_data = APPLE_HAS_FN }, 476 481 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 477 482 .driver_data = APPLE_HAS_FN },
+4 -1
drivers/hid/hid-core.c
··· 1000 1000 parser = vzalloc(sizeof(struct hid_parser)); 1001 1001 if (!parser) { 1002 1002 ret = -ENOMEM; 1003 - goto err; 1003 + goto alloc_err; 1004 1004 } 1005 1005 1006 1006 parser->device = device; ··· 1039 1039 hid_err(device, "unbalanced delimiter at end of report description\n"); 1040 1040 goto err; 1041 1041 } 1042 + kfree(parser->collection_stack); 1042 1043 vfree(parser); 1043 1044 device->status |= HID_STAT_PARSED; 1044 1045 return 0; ··· 1048 1047 1049 1048 hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); 1050 1049 err: 1050 + kfree(parser->collection_stack); 1051 + alloc_err: 1051 1052 vfree(parser); 1052 1053 hid_close_report(device); 1053 1054 return ret;
+3 -3
drivers/hid/hid-ids.h
··· 88 88 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 89 89 90 90 #define USB_VENDOR_ID_APPLE 0x05ac 91 + #define BT_VENDOR_ID_APPLE 0x004c 91 92 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 92 93 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d 93 94 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e ··· 158 157 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 159 158 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 160 159 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 160 + #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c 161 161 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 162 162 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 163 163 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 ··· 529 527 530 528 #define I2C_VENDOR_ID_HANTICK 0x0911 531 529 #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 532 - 533 - #define I2C_VENDOR_ID_RAYD 0x2386 534 - #define I2C_PRODUCT_ID_RAYD_3118 0x3118 535 530 536 531 #define USB_VENDOR_ID_HANWANG 0x0b57 537 532 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 ··· 949 950 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 950 951 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 951 952 #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb 953 + #define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd 952 954 #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 953 955 #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa 954 956 #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
+3 -2
drivers/hid/hid-input.c
··· 1582 1582 input_dev->dev.parent = &hid->dev; 1583 1583 1584 1584 hidinput->input = input_dev; 1585 + hidinput->application = application; 1585 1586 list_add_tail(&hidinput->list, &hid->inputs); 1586 1587 1587 1588 INIT_LIST_HEAD(&hidinput->reports); ··· 1678 1677 struct hid_input *hidinput; 1679 1678 1680 1679 list_for_each_entry(hidinput, &hid->inputs, list) { 1681 - if (hidinput->report && 1682 - hidinput->report->application == report->application) 1680 + if (hidinput->application == report->application) 1683 1681 return hidinput; 1684 1682 } 1685 1683 ··· 1815 1815 input_unregister_device(hidinput->input); 1816 1816 else 1817 1817 input_free_device(hidinput->input); 1818 + kfree(hidinput->name); 1818 1819 kfree(hidinput); 1819 1820 } 1820 1821
+17 -2
drivers/hid/hid-multitouch.c
··· 1375 1375 struct hid_usage *usage, 1376 1376 enum latency_mode latency, 1377 1377 bool surface_switch, 1378 - bool button_switch) 1378 + bool button_switch, 1379 + bool *inputmode_found) 1379 1380 { 1380 1381 struct mt_device *td = hid_get_drvdata(hdev); 1381 1382 struct mt_class *cls = &td->mtclass; ··· 1388 1387 1389 1388 switch (usage->hid) { 1390 1389 case HID_DG_INPUTMODE: 1390 + /* 1391 + * Some elan panels wrongly declare 2 input mode features, 1392 + * and silently ignore when we set the value in the second 1393 + * field. Skip the second feature and hope for the best. 1394 + */ 1395 + if (*inputmode_found) 1396 + return false; 1397 + 1391 1398 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { 1392 1399 report_len = hid_report_len(report); 1393 1400 buf = hid_alloc_report_buf(report, GFP_KERNEL); ··· 1411 1402 } 1412 1403 1413 1404 field->value[index] = td->inputmode_value; 1405 + *inputmode_found = true; 1414 1406 return true; 1415 1407 1416 1408 case HID_DG_CONTACTMAX: ··· 1449 1439 struct hid_usage *usage; 1450 1440 int i, j; 1451 1441 bool update_report; 1442 + bool inputmode_found = false; 1452 1443 1453 1444 rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; 1454 1445 list_for_each_entry(rep, &rep_enum->report_list, list) { ··· 1468 1457 usage, 1469 1458 latency, 1470 1459 surface_switch, 1471 - button_switch)) 1460 + button_switch, 1461 + &inputmode_found)) 1472 1462 update_report = true; 1473 1463 } 1474 1464 } ··· 1696 1684 * device. 1697 1685 */ 1698 1686 hdev->quirks |= HID_QUIRK_INPUT_PER_APP; 1687 + 1688 + if (id->group != HID_GROUP_MULTITOUCH_WIN_8) 1689 + hdev->quirks |= HID_QUIRK_MULTI_INPUT; 1699 1690 1700 1691 timer_setup(&td->release_timer, mt_expired_timeout, 0); 1701 1692
+2
drivers/hid/hid-saitek.c
··· 183 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 184 184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 185 185 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 186 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION), 187 + .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 186 188 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), 187 189 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 188 190 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
+23
drivers/hid/hid-sensor-hub.c
··· 579 579 } 580 580 EXPORT_SYMBOL_GPL(sensor_hub_device_close); 581 581 582 + static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc, 583 + unsigned int *rsize) 584 + { 585 + /* 586 + * Checks if the report descriptor of Thinkpad Helix 2 has a logical 587 + * minimum for magnetic flux axis greater than the maximum. 588 + */ 589 + if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA && 590 + *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 && 591 + rdesc[915] == 0x81 && rdesc[916] == 0x08 && 592 + rdesc[917] == 0x00 && rdesc[918] == 0x27 && 593 + rdesc[921] == 0x07 && rdesc[922] == 0x00) { 594 + /* Sets negative logical minimum for mag x, y and z */ 595 + rdesc[914] = rdesc[935] = rdesc[956] = 0xc0; 596 + rdesc[915] = rdesc[936] = rdesc[957] = 0x7e; 597 + rdesc[916] = rdesc[937] = rdesc[958] = 0xf7; 598 + rdesc[917] = rdesc[938] = rdesc[959] = 0xff; 599 + } 600 + 601 + return rdesc; 602 + } 603 + 582 604 static int sensor_hub_probe(struct hid_device *hdev, 583 605 const struct hid_device_id *id) 584 606 { ··· 765 743 .probe = sensor_hub_probe, 766 744 .remove = sensor_hub_remove, 767 745 .raw_event = sensor_hub_raw_event, 746 + .report_fixup = sensor_hub_report_fixup, 768 747 #ifdef CONFIG_PM 769 748 .suspend = sensor_hub_suspend, 770 749 .resume = sensor_hub_resume,
+7 -4
drivers/hid/i2c-hid/i2c-hid.c
··· 170 170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 171 171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 172 172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 173 - { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, 174 - I2C_HID_QUIRK_RESEND_REPORT_DESCR }, 175 173 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, 176 174 I2C_HID_QUIRK_RESEND_REPORT_DESCR }, 177 175 { 0, 0 } ··· 1233 1235 pm_runtime_enable(dev); 1234 1236 1235 1237 enable_irq(client->irq); 1236 - ret = i2c_hid_hwreset(client); 1238 + 1239 + /* Instead of resetting device, simply powers the device on. This 1240 + * solves "incomplete reports" on Raydium devices 2386:3118 and 1241 + * 2386:4B33 1242 + */ 1243 + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); 1237 1244 if (ret) 1238 1245 return ret; 1239 1246 1240 - /* RAYDIUM device (2386:3118) need to re-send report descr cmd 1247 + /* Some devices need to re-send report descr cmd 1241 1248 * after resume, after this it will be back normal. 1242 1249 * otherwise it issues too many incomplete reports. 1243 1250 */
+1
drivers/hid/intel-ish-hid/ipc/hw-ish.h
··· 29 29 #define CNL_Ax_DEVICE_ID 0x9DFC 30 30 #define GLK_Ax_DEVICE_ID 0x31A2 31 31 #define CNL_H_DEVICE_ID 0xA37C 32 + #define SPT_H_DEVICE_ID 0xA135 32 33 33 34 #define REVISION_ID_CHT_A0 0x6 34 35 #define REVISION_ID_CHT_Ax_SI 0x0
+1
drivers/hid/intel-ish-hid/ipc/pci-ish.c
··· 38 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, 39 39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, 40 40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, 41 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 41 42 {0, } 42 43 }; 43 44 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+1
drivers/hwmon/raspberrypi-hwmon.c
··· 164 164 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); 165 165 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); 166 166 MODULE_LICENSE("GPL v2"); 167 + MODULE_ALIAS("platform:raspberrypi-hwmon");
+6 -1
drivers/i2c/busses/i2c-i801.c
··· 140 140 141 141 #define SBREG_BAR 0x10 142 142 #define SBREG_SMBCTRL 0xc6000c 143 + #define SBREG_SMBCTRL_DNV 0xcf000c 143 144 144 145 /* Host status bits for SMBPCISTS */ 145 146 #define SMBPCISTS_INTS BIT(3) ··· 1400 1399 spin_unlock(&p2sb_spinlock); 1401 1400 1402 1401 res = &tco_res[ICH_RES_MEM_OFF]; 1403 - res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; 1402 + if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) 1403 + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; 1404 + else 1405 + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; 1406 + 1404 1407 res->end = res->start + 3; 1405 1408 res->flags = IORESOURCE_MEM; 1406 1409
-1
drivers/i2c/busses/i2c-imx-lpi2c.c
··· 538 538 539 539 static const struct of_device_id lpi2c_imx_of_match[] = { 540 540 { .compatible = "fsl,imx7ulp-lpi2c" }, 541 - { .compatible = "fsl,imx8dv-lpi2c" }, 542 541 { }, 543 542 }; 544 543 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
+2 -5
drivers/i2c/busses/i2c-uniphier-f.c
··· 401 401 return ret; 402 402 403 403 for (msg = msgs; msg < emsg; msg++) { 404 - /* If next message is read, skip the stop condition */ 405 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 406 - /* but, force it if I2C_M_STOP is set */ 407 - if (msg->flags & I2C_M_STOP) 408 - stop = true; 404 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */ 405 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); 409 406 410 407 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); 411 408 if (ret)
+2 -5
drivers/i2c/busses/i2c-uniphier.c
··· 248 248 return ret; 249 249 250 250 for (msg = msgs; msg < emsg; msg++) { 251 - /* If next message is read, skip the stop condition */ 252 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 253 - /* but, force it if I2C_M_STOP is set */ 254 - if (msg->flags & I2C_M_STOP) 255 - stop = true; 251 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */ 252 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); 256 253 257 254 ret = uniphier_i2c_master_xfer_one(adap, msg, stop); 258 255 if (ret)
+4
drivers/i2c/busses/i2c-xiic.c
··· 532 532 { 533 533 u8 rx_watermark; 534 534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; 535 + unsigned long flags; 535 536 536 537 /* Clear and enable Rx full interrupt. */ 537 538 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); ··· 548 547 rx_watermark = IIC_RX_FIFO_DEPTH; 549 548 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); 550 549 550 + local_irq_save(flags); 551 551 if (!(msg->flags & I2C_M_NOSTART)) 552 552 /* write the address */ 553 553 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, ··· 558 556 559 557 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 560 558 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); 559 + local_irq_restore(flags); 560 + 561 561 if (i2c->nmsgs == 1) 562 562 /* very last, enable bus not busy as well */ 563 563 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
+7 -5
drivers/infiniband/core/cma.c
··· 724 724 dgid = (union ib_gid *) &addr->sib_addr; 725 725 pkey = ntohs(addr->sib_pkey); 726 726 727 + mutex_lock(&lock); 727 728 list_for_each_entry(cur_dev, &dev_list, list) { 728 729 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 729 730 if (!rdma_cap_af_ib(cur_dev->device, p)) ··· 751 750 cma_dev = cur_dev; 752 751 sgid = gid; 753 752 id_priv->id.port_num = p; 753 + goto found; 754 754 } 755 755 } 756 756 } 757 757 } 758 - 759 - if (!cma_dev) 760 - return -ENODEV; 758 + mutex_unlock(&lock); 759 + return -ENODEV; 761 760 762 761 found: 763 762 cma_attach_to_dev(id_priv, cma_dev); 764 - addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 765 - memcpy(&addr->sib_addr, &sgid, sizeof sgid); 763 + mutex_unlock(&lock); 764 + addr = (struct sockaddr_ib *)cma_src_addr(id_priv); 765 + memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); 766 766 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 767 767 return 0; 768 768 }
+2
drivers/infiniband/core/rdma_core.c
··· 882 882 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 883 883 if (!uverbs_destroy_uobject(obj, reason)) 884 884 ret = 0; 885 + else 886 + atomic_set(&obj->usecnt, 0); 885 887 } 886 888 return ret; 887 889 }
+6
drivers/infiniband/core/ucma.c
··· 124 124 static DEFINE_IDR(ctx_idr); 125 125 static DEFINE_IDR(multicast_idr); 126 126 127 + static const struct file_operations ucma_fops; 128 + 127 129 static inline struct ucma_context *_ucma_find_context(int id, 128 130 struct ucma_file *file) 129 131 { ··· 1583 1581 f = fdget(cmd.fd); 1584 1582 if (!f.file) 1585 1583 return -ENOENT; 1584 + if (f.file->f_op != &ucma_fops) { 1585 + ret = -EINVAL; 1586 + goto file_put; 1587 + } 1586 1588 1587 1589 /* Validate current fd and prevent destruction of id. */ 1588 1590 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
+2 -3
drivers/infiniband/core/uverbs_main.c
··· 1050 1050 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 1051 1051 1052 1052 if (ib_uverbs_create_uapi(device, uverbs_dev)) 1053 - goto err; 1053 + goto err_uapi; 1054 1054 1055 1055 cdev_init(&uverbs_dev->cdev, NULL); 1056 1056 uverbs_dev->cdev.owner = THIS_MODULE; ··· 1077 1077 1078 1078 err_class: 1079 1079 device_destroy(uverbs_class, uverbs_dev->cdev.dev); 1080 - 1081 1080 err_cdev: 1082 1081 cdev_del(&uverbs_dev->cdev); 1082 + err_uapi: 1083 1083 clear_bit(devnum, dev_map); 1084 - 1085 1084 err: 1086 1085 if (atomic_dec_and_test(&uverbs_dev->refcount)) 1087 1086 ib_uverbs_comp_dev(uverbs_dev);
+2
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 833 833 "Failed to destroy Shadow QP"); 834 834 return rc; 835 835 } 836 + bnxt_qplib_free_qp_res(&rdev->qplib_res, 837 + &rdev->qp1_sqp->qplib_qp); 836 838 mutex_lock(&rdev->qp_lock); 837 839 list_del(&rdev->qp1_sqp->list); 838 840 atomic_dec(&rdev->qp_count);
+1 -1
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 196 196 struct bnxt_qplib_qp *qp) 197 197 { 198 198 struct bnxt_qplib_q *rq = &qp->rq; 199 - struct bnxt_qplib_q *sq = &qp->rq; 199 + struct bnxt_qplib_q *sq = &qp->sq; 200 200 int rc = 0; 201 201 202 202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
+6
drivers/infiniband/hw/cxgb4/qp.c
··· 1685 1685 schp = to_c4iw_cq(qhp->ibqp.send_cq); 1686 1686 1687 1687 if (qhp->ibqp.uobject) { 1688 + 1689 + /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ 1690 + if (qhp->wq.flushed) 1691 + return; 1692 + 1693 + qhp->wq.flushed = 1; 1688 1694 t4_set_wq_in_error(&qhp->wq, 0); 1689 1695 t4_set_cq_in_error(&rchp->cq); 1690 1696 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+5 -3
drivers/infiniband/hw/mlx4/main.c
··· 517 517 props->page_size_cap = dev->dev->caps.page_size_cap; 518 518 props->max_qp = dev->dev->quotas.qp; 519 519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 520 - props->max_send_sge = dev->dev->caps.max_sq_sg; 521 - props->max_recv_sge = dev->dev->caps.max_rq_sg; 522 - props->max_sge_rd = MLX4_MAX_SGE_RD; 520 + props->max_send_sge = 521 + min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); 522 + props->max_recv_sge = 523 + min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); 524 + props->max_sge_rd = MLX4_MAX_SGE_RD; 523 525 props->max_cq = dev->dev->quotas.cq; 524 526 props->max_cqe = dev->dev->caps.max_cqes; 525 527 props->max_mr = dev->dev->quotas.mpt;
+2
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 1027 1027 1028 1028 skb_queue_head_init(&skqueue); 1029 1029 1030 + netif_tx_lock_bh(p->dev); 1030 1031 spin_lock_irq(&priv->lock); 1031 1032 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 1032 1033 if (p->neigh) 1033 1034 while ((skb = __skb_dequeue(&p->neigh->queue))) 1034 1035 __skb_queue_tail(&skqueue, skb); 1035 1036 spin_unlock_irq(&priv->lock); 1037 + netif_tx_unlock_bh(p->dev); 1036 1038 1037 1039 while ((skb = __skb_dequeue(&skqueue))) { 1038 1040 skb->dev = p->dev;
+3 -1
drivers/irqchip/irq-gic-v3-its.c
··· 1439 1439 * The consequence of the above is that allocation is cost is low, but 1440 1440 * freeing is expensive. We assumes that freeing rarely occurs. 1441 1441 */ 1442 + #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1442 1443 1443 1444 static DEFINE_MUTEX(lpi_range_lock); 1444 1445 static LIST_HEAD(lpi_range_list); ··· 1626 1625 { 1627 1626 phys_addr_t paddr; 1628 1627 1629 - lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); 1628 + lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), 1629 + ITS_MAX_LPI_NRBITS); 1630 1630 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 1631 1631 if (!gic_rdists->prop_page) { 1632 1632 pr_err("Failed to allocate PROPBASE\n");
+5 -5
drivers/md/md-cluster.c
··· 1276 1276 static int resync_finish(struct mddev *mddev) 1277 1277 { 1278 1278 struct md_cluster_info *cinfo = mddev->cluster_info; 1279 + int ret = 0; 1279 1280 1280 1281 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); 1281 - dlm_unlock_sync(cinfo->resync_lockres); 1282 1282 1283 1283 /* 1284 1284 * If resync thread is interrupted so we can't say resync is finished, 1285 1285 * another node will launch resync thread to continue. 1286 1286 */ 1287 - if (test_bit(MD_CLOSING, &mddev->flags)) 1288 - return 0; 1289 - else 1290 - return resync_info_update(mddev, 0, 0); 1287 + if (!test_bit(MD_CLOSING, &mddev->flags)) 1288 + ret = resync_info_update(mddev, 0, 0); 1289 + dlm_unlock_sync(cinfo->resync_lockres); 1290 + return ret; 1291 1291 } 1292 1292 1293 1293 static int area_resyncing(struct mddev *mddev, int direction,
+4 -1
drivers/md/raid10.c
··· 4529 4529 allow_barrier(conf); 4530 4530 } 4531 4531 4532 + raise_barrier(conf, 0); 4532 4533 read_more: 4533 4534 /* Now schedule reads for blocks from sector_nr to last */ 4534 4535 r10_bio = raid10_alloc_init_r10buf(conf); 4535 4536 r10_bio->state = 0; 4536 - raise_barrier(conf, sectors_done != 0); 4537 + raise_barrier(conf, 1); 4537 4538 atomic_set(&r10_bio->remaining, 0); 4538 4539 r10_bio->mddev = mddev; 4539 4540 r10_bio->sector = sector_nr; ··· 4629 4628 sectors_done += nr_sectors; 4630 4629 if (sector_nr <= last) 4631 4630 goto read_more; 4631 + 4632 + lower_barrier(conf); 4632 4633 4633 4634 /* Now that we have done the whole section we can 4634 4635 * update reshape_progress
+5
drivers/md/raid5-log.h
··· 46 46 extern void ppl_quiesce(struct r5conf *conf, int quiesce); 47 47 extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); 48 48 49 + static inline bool raid5_has_log(struct r5conf *conf) 50 + { 51 + return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 52 + } 53 + 49 54 static inline bool raid5_has_ppl(struct r5conf *conf) 50 55 { 51 56 return test_bit(MD_HAS_PPL, &conf->mddev->flags);
+3 -3
drivers/md/raid5.c
··· 733 733 { 734 734 struct r5conf *conf = sh->raid_conf; 735 735 736 - if (conf->log || raid5_has_ppl(conf)) 736 + if (raid5_has_log(conf) || raid5_has_ppl(conf)) 737 737 return false; 738 738 return test_bit(STRIPE_BATCH_READY, &sh->state) && 739 739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && ··· 7737 7737 sector_t newsize; 7738 7738 struct r5conf *conf = mddev->private; 7739 7739 7740 - if (conf->log || raid5_has_ppl(conf)) 7740 + if (raid5_has_log(conf) || raid5_has_ppl(conf)) 7741 7741 return -EINVAL; 7742 7742 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7743 7743 newsize = raid5_size(mddev, sectors, mddev->raid_disks); ··· 7788 7788 { 7789 7789 struct r5conf *conf = mddev->private; 7790 7790 7791 - if (conf->log || raid5_has_ppl(conf)) 7791 + if (raid5_has_log(conf) || raid5_has_ppl(conf)) 7792 7792 return -EINVAL; 7793 7793 if (mddev->delta_disks == 0 && 7794 7794 mddev->new_layout == mddev->layout &&
+1 -1
drivers/memory/ti-aemif.c
··· 411 411 if (ret < 0) 412 412 goto error; 413 413 } 414 - } else { 414 + } else if (pdata) { 415 415 for (i = 0; i < pdata->num_sub_devices; i++) { 416 416 pdata->sub_devices[i].dev.parent = dev; 417 417 ret = platform_device_register(&pdata->sub_devices[i]);
+11 -13
drivers/net/ethernet/amazon/ena/ena_com.c
··· 459 459 cqe = &admin_queue->cq.entries[head_masked]; 460 460 461 461 /* Go over all the completions */ 462 - while ((cqe->acq_common_descriptor.flags & 462 + while ((READ_ONCE(cqe->acq_common_descriptor.flags) & 463 463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 464 464 /* Do not read the rest of the completion entry before the 465 465 * phase bit was validated 466 466 */ 467 - rmb(); 467 + dma_rmb(); 468 468 ena_com_handle_single_admin_completion(admin_queue, cqe); 469 469 470 470 head_masked++; ··· 627 627 mmio_read_reg |= mmio_read->seq_num & 628 628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 629 629 630 - /* make sure read_resp->req_id get updated before the hw can write 631 - * there 632 - */ 633 - wmb(); 630 + writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 634 631 635 - writel_relaxed(mmio_read_reg, 636 - ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 637 - 638 - mmiowb(); 639 632 for (i = 0; i < timeout; i++) { 640 - if (read_resp->req_id == mmio_read->seq_num) 633 + if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) 641 634 break; 642 635 643 636 udelay(1); ··· 1789 1796 aenq_common = &aenq_e->aenq_common_desc; 1790 1797 1791 1798 /* Go over all the events */ 1792 - while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1793 - phase) { 1799 + while ((READ_ONCE(aenq_common->flags) & 1800 + ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 1801 + /* Make sure the phase bit (ownership) is as expected before 1802 + * reading the rest of the descriptor. 1803 + */ 1804 + dma_rmb(); 1805 + 1794 1806 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1795 1807 aenq_common->group, aenq_common->syndrom, 1796 1808 (u64)aenq_common->timestamp_low +
+6
drivers/net/ethernet/amazon/ena/ena_eth_com.c
··· 51 51 if (desc_phase != expected_phase) 52 52 return NULL; 53 53 54 + /* Make sure we read the rest of the descriptor after the phase bit 55 + * has been read 56 + */ 57 + dma_rmb(); 58 + 54 59 return cdesc; 55 60 } 56 61 ··· 498 493 if (cdesc_phase != expected_phase) 499 494 return -EAGAIN; 500 495 496 + dma_rmb(); 501 497 if (unlikely(cdesc->req_id >= io_cq->q_depth)) { 502 498 pr_err("Invalid req id %d\n", cdesc->req_id); 503 499 return -EINVAL;
+2 -6
drivers/net/ethernet/amazon/ena/ena_eth_com.h
··· 107 107 return io_sq->q_depth - 1 - cnt; 108 108 } 109 109 110 - static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, 111 - bool relaxed) 110 + static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 112 111 { 113 112 u16 tail; 114 113 ··· 116 117 pr_debug("write submission queue doorbell for queue: %d tail: %d\n", 117 118 io_sq->qid, tail); 118 119 119 - if (relaxed) 120 - writel_relaxed(tail, io_sq->db_addr); 121 - else 122 - writel(tail, io_sq->db_addr); 120 + writel(tail, io_sq->db_addr); 123 121 124 122 return 0; 125 123 }
+37 -45
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 76 76 77 77 static int ena_rss_init_default(struct ena_adapter *adapter); 78 78 static void check_for_admin_com_state(struct ena_adapter *adapter); 79 - static void ena_destroy_device(struct ena_adapter *adapter); 79 + static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); 80 80 static int ena_restore_device(struct ena_adapter *adapter); 81 81 82 82 static void ena_tx_timeout(struct net_device *dev) ··· 461 461 return -ENOMEM; 462 462 } 463 463 464 - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, 464 + dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, 465 465 DMA_FROM_DEVICE); 466 466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 467 467 u64_stats_update_begin(&rx_ring->syncp); ··· 478 478 rx_info->page_offset = 0; 479 479 ena_buf = &rx_info->ena_buf; 480 480 ena_buf->paddr = dma; 481 - ena_buf->len = PAGE_SIZE; 481 + ena_buf->len = ENA_PAGE_SIZE; 482 482 483 483 return 0; 484 484 } ··· 495 495 return; 496 496 } 497 497 498 - dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, 498 + dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, 499 499 DMA_FROM_DEVICE); 500 500 501 501 __free_page(page); ··· 551 551 rx_ring->qid, i, num); 552 552 } 553 553 554 - if (likely(i)) { 555 - /* Add memory barrier to make sure the desc were written before 556 - * issue a doorbell 557 - */ 558 - wmb(); 559 - ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true); 560 - mmiowb(); 561 - } 554 + /* ena_com_write_sq_doorbell issues a wmb() */ 555 + if (likely(i)) 556 + ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 562 557 563 558 rx_ring->next_to_use = next_to_use; 564 559 ··· 911 916 do { 912 917 dma_unmap_page(rx_ring->dev, 913 918 dma_unmap_addr(&rx_info->ena_buf, paddr), 914 - PAGE_SIZE, DMA_FROM_DEVICE); 919 + ENA_PAGE_SIZE, DMA_FROM_DEVICE); 915 920 916 921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 917 - rx_info->page_offset, len, PAGE_SIZE); 922 + rx_info->page_offset, len, ENA_PAGE_SIZE); 918 923 919 924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 920 925 "rx skb updated. len %d. data_len %d\n", ··· 1895 1900 "Destroy failure, restarting device\n"); 1896 1901 ena_dump_stats_to_dmesg(adapter); 1897 1902 /* rtnl lock already obtained in dev_ioctl() layer */ 1898 - ena_destroy_device(adapter); 1903 + ena_destroy_device(adapter, false); 1899 1904 ena_restore_device(adapter); 1900 1905 } 1901 1906 ··· 2107 2112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2108 2113 tx_ring->ring_size); 2109 2114 2110 - /* This WMB is aimed to: 2111 - * 1 - perform smp barrier before reading next_to_completion 2112 - * 2 - make sure the desc were written before trigger DB 2113 - */ 2114 - wmb(); 2115 - 2116 2115 /* stop the queue when no more space available, the packet can have up 2117 2116 * to sgl_size + 2. one for the meta descriptor and one for header 2118 2117 * (if the header is larger than tx_max_header_size). ··· 2125 2136 * stop the queue but meanwhile clean_tx_irq updates 2126 2137 * next_to_completion and terminates. 2127 2138 * The queue will remain stopped forever. 2128 - * To solve this issue this function perform rmb, check 2129 - * the wakeup condition and wake up the queue if needed. 2139 + * To solve this issue add a mb() to make sure that 2140 + * netif_tx_stop_queue() write is vissible before checking if 2141 + * there is additional space in the queue. 2130 2142 */ 2131 - smp_rmb(); 2143 + smp_mb(); 2132 2144 2133 2145 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) 2134 2146 > ENA_TX_WAKEUP_THRESH) { ··· 2141 2151 } 2142 2152 2143 2153 if (netif_xmit_stopped(txq) || !skb->xmit_more) { 2144 - /* trigger the dma engine */ 2145 - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); 2154 + /* trigger the dma engine. ena_com_write_sq_doorbell() 2155 + * has a mb 2156 + */ 2157 + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2146 2158 u64_stats_update_begin(&tx_ring->syncp); 2147 2159 tx_ring->tx_stats.doorbells++; 2148 2160 u64_stats_update_end(&tx_ring->syncp); ··· 2542 2550 return rc; 2543 2551 } 2544 2552 2545 - static void ena_destroy_device(struct ena_adapter *adapter) 2553 + static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) 2546 2554 { 2547 2555 struct net_device *netdev = adapter->netdev; 2548 2556 struct ena_com_dev *ena_dev = adapter->ena_dev; 2549 2557 bool dev_up; 2558 + 2559 + if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 2560 + return; 2550 2561 2551 2562 netif_carrier_off(netdev); 2552 2563 ··· 2558 2563 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2559 2564 adapter->dev_up_before_reset = dev_up; 2560 2565 2561 - ena_com_set_admin_running_state(ena_dev, false); 2566 + if (!graceful) 2567 + ena_com_set_admin_running_state(ena_dev, false); 2562 2568 2563 2569 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2564 2570 ena_down(adapter); ··· 2587 2591 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2588 2592 2589 2593 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2594 + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2590 2595 } 2591 2596 2592 2597 static int ena_restore_device(struct ena_adapter *adapter) ··· 2632 2635 } 2633 2636 } 2634 2637 2638 + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2635 2639 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2636 2640 dev_err(&pdev->dev, "Device reset completed successfully\n"); 2637 2641 ··· 2663 2665 return; 2664 2666 } 2665 2667 rtnl_lock(); 2666 - ena_destroy_device(adapter); 2668 + ena_destroy_device(adapter, false); 2667 2669 ena_restore_device(adapter); 2668 2670 rtnl_unlock(); 2669 2671 } ··· 3407 3409 netdev->rx_cpu_rmap = NULL; 3408 3410 } 3409 3411 #endif /* CONFIG_RFS_ACCEL */ 3410 - 3411 - unregister_netdev(netdev); 3412 3412 del_timer_sync(&adapter->timer_service); 3413 3413 3414 3414 cancel_work_sync(&adapter->reset_task); 3415 3415 3416 - /* Reset the device only if the device is running. */ 3416 + unregister_netdev(netdev); 3417 + 3418 + /* If the device is running then we want to make sure the device will be 3419 + * reset to make sure no more events will be issued by the device. 3420 + */ 3417 3421 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 3418 - ena_com_dev_reset(ena_dev, adapter->reset_reason); 3422 + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3419 3423 3420 - ena_free_mgmnt_irq(adapter); 3421 - 3422 - ena_disable_msix(adapter); 3424 + rtnl_lock(); 3425 + ena_destroy_device(adapter, true); 3426 + rtnl_unlock(); 3423 3427 3424 3428 free_netdev(netdev); 3425 - 3426 - ena_com_mmio_reg_read_request_destroy(ena_dev); 3427 - 3428 - ena_com_abort_admin_commands(ena_dev); 3429 - 3430 - ena_com_wait_for_abort_completion(ena_dev); 3431 - 3432 - ena_com_admin_destroy(ena_dev); 3433 3429 3434 3430 ena_com_rss_destroy(ena_dev); 3435 3431 ··· 3459 3467 "ignoring device reset request as the device is being suspended\n"); 3460 3468 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3461 3469 } 3462 - ena_destroy_device(adapter); 3470 + ena_destroy_device(adapter, true); 3463 3471 rtnl_unlock(); 3464 3472 return 0; 3465 3473 }
+11
drivers/net/ethernet/amazon/ena/ena_netdev.h
··· 355 355 356 356 int ena_get_sset_count(struct net_device *netdev, int sset); 357 357 358 + /* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the 359 + * driver passas 0. 360 + * Since the max packet size the ENA handles is ~9kB limit the buffer length to 361 + * 16kB. 362 + */ 363 + #if PAGE_SIZE > SZ_16K 364 + #define ENA_PAGE_SIZE SZ_16K 365 + #else 366 + #define ENA_PAGE_SIZE PAGE_SIZE 367 + #endif 368 + 358 369 #endif /* !(ENA_H) */
+1 -1
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 4500 4500 port_res->max_vfs += le16_to_cpu(pcie->num_vfs); 4501 4501 } 4502 4502 } 4503 - return status; 4503 + goto err; 4504 4504 } 4505 4505 4506 4506 pcie = be_get_pcie_desc(resp->func_param, desc_count,
+1
drivers/net/ethernet/lantiq_etop.c
··· 274 274 struct ltq_etop_chan *ch = &priv->ch[i]; 275 275 276 276 ch->idx = ch->dma.nr = i; 277 + ch->dma.dev = &priv->pdev->dev; 277 278 278 279 if (IS_TX(i)) { 279 280 ltq_dma_alloc_tx(&ch->dma);
+14 -8
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 132 132 delayed_event_start(priv); 133 133 134 134 dev_ctx->context = intf->add(dev); 135 - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 136 - if (intf->attach) 137 - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 138 - 139 135 if (dev_ctx->context) { 136 + set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 137 + if (intf->attach) 138 + set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 139 + 140 140 spin_lock_irq(&priv->ctx_lock); 141 141 list_add_tail(&dev_ctx->list, &priv->ctx_list); 142 142 ··· 211 211 if (intf->attach) { 212 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 213 213 goto out; 214 - intf->attach(dev, dev_ctx->context); 214 + if (intf->attach(dev, dev_ctx->context)) 215 + goto out; 216 + 215 217 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 216 218 } else { 217 219 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 218 220 goto out; 219 221 dev_ctx->context = intf->add(dev); 222 + if (!dev_ctx->context) 223 + goto out; 224 + 220 225 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 221 226 } 222 227 ··· 396 391 } 397 392 } 398 393 399 - static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) 394 + static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) 400 395 { 401 - return (u16)((dev->pdev->bus->number << 8) | 396 + return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | 397 + (dev->pdev->bus->number << 8) | 402 398 PCI_SLOT(dev->pdev->devfn)); 403 399 } 404 400 405 401 /* Must be called with intf_mutex held */ 406 402 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 407 403 { 408 - u16 pci_id = mlx5_gen_pci_id(dev); 404 + u32 pci_id = mlx5_gen_pci_id(dev); 409 405 struct mlx5_core_dev *res = NULL; 410 406 struct mlx5_core_dev *tmp_dev; 411 407 struct mlx5_priv *priv;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 191 191 { 192 192 if (psrc_m) { 193 193 MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); 194 - MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); 194 + MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); 195 195 } 196 196 197 197 if (pdst_m) {
+1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 663 663 if (err) 664 664 goto miss_rule_err; 665 665 666 + kvfree(flow_group_in); 666 667 return 0; 667 668 668 669 miss_rule_err:
+39 -37
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1578 1578 return version; 1579 1579 } 1580 1580 1581 + static struct fs_fte * 1582 + lookup_fte_locked(struct mlx5_flow_group *g, 1583 + u32 *match_value, 1584 + bool take_write) 1585 + { 1586 + struct fs_fte *fte_tmp; 1587 + 1588 + if (take_write) 1589 + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1590 + else 1591 + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); 1592 + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, 1593 + rhash_fte); 1594 + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { 1595 + fte_tmp = NULL; 1596 + goto out; 1597 + } 1598 + 1599 + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1600 + out: 1601 + if (take_write) 1602 + up_write_ref_node(&g->node); 1603 + else 1604 + up_read_ref_node(&g->node); 1605 + return fte_tmp; 1606 + } 1607 + 1581 1608 static struct mlx5_flow_handle * 1582 1609 try_add_to_existing_fg(struct mlx5_flow_table *ft, 1583 1610 struct list_head *match_head, ··· 1627 1600 if (IS_ERR(fte)) 1628 1601 return ERR_PTR(-ENOMEM); 1629 1602 1630 - list_for_each_entry(iter, match_head, list) { 1631 - nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); 1632 - } 1633 - 1634 1603 search_again_locked: 1635 1604 version = matched_fgs_get_version(match_head); 1636 1605 /* Try to find a fg that already contains a matching fte */ ··· 1634 1611 struct fs_fte *fte_tmp; 1635 1612 1636 1613 g = iter->g; 1637 - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, 1638 - rhash_fte); 1639 - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) 1614 + fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); 1615 + if (!fte_tmp) 1640 1616 continue; 1641 - 1642 - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1643 - if (!take_write) { 1644 - list_for_each_entry(iter, match_head, list) 1645 - up_read_ref_node(&iter->g->node); 1646 - } else { 1647 - list_for_each_entry(iter, match_head, list) 1648 - up_write_ref_node(&iter->g->node); 1649 - } 1650 - 1651 1617 rule = add_rule_fg(g, spec->match_value, 1652 1618 flow_act, dest, dest_num, fte_tmp); 1653 1619 up_write_ref_node(&fte_tmp->node); 1654 1620 tree_put_node(&fte_tmp->node); 1655 1621 kmem_cache_free(steering->ftes_cache, fte); 1656 1622 return rule; 1657 - } 1658 - 1659 - /* No group with matching fte found. Try to add a new fte to any 1660 - * matching fg. 1661 - */ 1662 - 1663 - if (!take_write) { 1664 - list_for_each_entry(iter, match_head, list) 1665 - up_read_ref_node(&iter->g->node); 1666 - list_for_each_entry(iter, match_head, list) 1667 - nested_down_write_ref_node(&iter->g->node, 1668 - FS_LOCK_PARENT); 1669 - take_write = true; 1670 1623 } 1671 1624 1672 1625 /* Check the ft version, for case that new flow group ··· 1656 1657 /* Check the fgs version, for case the new FTE with the 1657 1658 * same values was added while the fgs weren't locked 1658 1659 */ 1659 - if (version != matched_fgs_get_version(match_head)) 1660 + if (version != matched_fgs_get_version(match_head)) { 1661 + take_write = true; 1660 1662 goto search_again_locked; 1663 + } 1661 1664 1662 1665 list_for_each_entry(iter, match_head, list) { 1663 1666 g = iter->g; 1664 1667 1665 1668 if (!g->node.active) 1666 1669 continue; 1670 + 1671 + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1672 + 1667 1673 err = insert_fte(g, fte); 1668 1674 if (err) { 1675 + up_write_ref_node(&g->node); 1669 1676 if (err == -ENOSPC) 1670 1677 continue; 1671 - list_for_each_entry(iter, match_head, list) 1672 - up_write_ref_node(&iter->g->node); 1673 1678 kmem_cache_free(steering->ftes_cache, fte); 1674 1679 return ERR_PTR(err); 1675 1680 } 1676 1681 1677 1682 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); 1678 - list_for_each_entry(iter, match_head, list) 1679 - up_write_ref_node(&iter->g->node); 1683 + up_write_ref_node(&g->node); 1680 1684 rule = add_rule_fg(g, spec->match_value, 1681 1685 flow_act, dest, dest_num, fte); 1682 1686 up_write_ref_node(&fte->node); ··· 1688 1686 } 1689 1687 rule = ERR_PTR(-ENOENT); 1690 1688 out: 1691 - list_for_each_entry(iter, match_head, list) 1692 - up_write_ref_node(&iter->g->node); 1693 1689 kmem_cache_free(steering->ftes_cache, fte); 1694 1690 return rule; 1695 1691 } ··· 1726 1726 if (err) { 1727 1727 if (take_write) 1728 1728 up_write_ref_node(&ft->node); 1729 + else 1730 + up_read_ref_node(&ft->node); 1729 1731 return ERR_PTR(err); 1730 1732 } 1731 1733
+9 -1
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 331 331 add_timer(&health->timer); 332 332 } 333 333 334 - void mlx5_stop_health_poll(struct mlx5_core_dev *dev) 334 + void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) 335 335 { 336 336 struct mlx5_core_health *health = &dev->priv.health; 337 + unsigned long flags; 338 + 339 + if (disable_health) { 340 + spin_lock_irqsave(&health->wq_lock, flags); 341 + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); 342 + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); 343 + spin_unlock_irqrestore(&health->wq_lock, flags); 344 + } 337 345 338 346 del_timer_sync(&health->timer); 339 347 }
+7 -5
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 878 878 priv->numa_node = dev_to_node(&dev->pdev->dev); 879 879 880 880 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); 881 - if (!priv->dbg_root) 881 + if (!priv->dbg_root) { 882 + dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); 882 883 return -ENOMEM; 884 + } 883 885 884 886 err = mlx5_pci_enable_device(dev); 885 887 if (err) { ··· 930 928 pci_clear_master(dev->pdev); 931 929 release_bar(dev->pdev); 932 930 mlx5_pci_disable_device(dev); 933 - debugfs_remove(priv->dbg_root); 931 + debugfs_remove_recursive(priv->dbg_root); 934 932 } 935 933 936 934 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ··· 1288 1286 mlx5_cleanup_once(dev); 1289 1287 1290 1288 err_stop_poll: 1291 - mlx5_stop_health_poll(dev); 1289 + mlx5_stop_health_poll(dev, boot); 1292 1290 if (mlx5_cmd_teardown_hca(dev)) { 1293 1291 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); 1294 1292 goto out_err; ··· 1348 1346 mlx5_free_irq_vectors(dev); 1349 1347 if (cleanup) 1350 1348 mlx5_cleanup_once(dev); 1351 - mlx5_stop_health_poll(dev); 1349 + mlx5_stop_health_poll(dev, cleanup); 1352 1350 err = mlx5_cmd_teardown_hca(dev); 1353 1351 if (err) { 1354 1352 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); ··· 1610 1608 * with the HCA, so the health polll is no longer needed. 1611 1609 */ 1612 1610 mlx5_drain_health_wq(dev); 1613 - mlx5_stop_health_poll(dev); 1611 + mlx5_stop_health_poll(dev, false); 1614 1612 1615 1613 ret = mlx5_cmd_force_teardown_hca(dev); 1616 1614 if (ret) {
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/wq.c
··· 39 39 return (u32)wq->fbc.sz_m1 + 1; 40 40 } 41 41 42 - u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) 42 + u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) 43 43 { 44 - return (u32)wq->fbc.frag_sz_m1 + 1; 44 + return wq->fbc.frag_sz_m1 + 1; 45 45 } 46 46 47 47 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) ··· 138 138 void *qpc, struct mlx5_wq_qp *wq, 139 139 struct mlx5_wq_ctrl *wq_ctrl) 140 140 { 141 - u32 sq_strides_offset; 141 + u16 sq_strides_offset; 142 142 u32 rq_pg_remainder; 143 143 int err; 144 144
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/wq.h
··· 80 80 void *wqc, struct mlx5_wq_cyc *wq, 81 81 struct mlx5_wq_ctrl *wq_ctrl); 82 82 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 83 - u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); 83 + u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); 84 84 85 85 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 86 86 void *qpc, struct mlx5_wq_qp *wq,
+8 -8
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
··· 337 337 MLXSW_SP_SB_CM(1500, 9, 0), 338 338 MLXSW_SP_SB_CM(1500, 9, 0), 339 339 MLXSW_SP_SB_CM(1500, 9, 0), 340 - MLXSW_SP_SB_CM(0, 0, 0), 341 - MLXSW_SP_SB_CM(0, 0, 0), 342 - MLXSW_SP_SB_CM(0, 0, 0), 343 - MLXSW_SP_SB_CM(0, 0, 0), 344 - MLXSW_SP_SB_CM(0, 0, 0), 345 - MLXSW_SP_SB_CM(0, 0, 0), 346 - MLXSW_SP_SB_CM(0, 0, 0), 347 - MLXSW_SP_SB_CM(0, 0, 0), 340 + MLXSW_SP_SB_CM(0, 140000, 15), 341 + MLXSW_SP_SB_CM(0, 140000, 15), 342 + MLXSW_SP_SB_CM(0, 140000, 15), 343 + MLXSW_SP_SB_CM(0, 140000, 15), 344 + MLXSW_SP_SB_CM(0, 140000, 15), 345 + MLXSW_SP_SB_CM(0, 140000, 15), 346 + MLXSW_SP_SB_CM(0, 140000, 15), 347 + MLXSW_SP_SB_CM(0, 140000, 15), 348 348 MLXSW_SP_SB_CM(1, 0xff, 0), 349 349 }; 350 350
+6
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 52 52 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) 53 53 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) 54 54 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) 55 + #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX 55 56 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ 56 57 NFP_FL_TUNNEL_KEY | \ 57 58 NFP_FL_TUNNEL_GENEVE_OPT) ··· 742 741 nfp_fl_push_vlan(psh_v, a); 743 742 *a_len += sizeof(struct nfp_fl_push_vlan); 744 743 } else if (is_tcf_tunnel_set(a)) { 744 + struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); 745 745 struct nfp_repr *repr = netdev_priv(netdev); 746 + 746 747 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); 747 748 if (*tun_type == NFP_FL_TUNNEL_NONE) 749 + return -EOPNOTSUPP; 750 + 751 + if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) 748 752 return -EOPNOTSUPP; 749 753 750 754 /* Pre-tunnel action is required for tunnel encap.
+1
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 70 70 #define NFP_FL_FEATS_GENEVE BIT(0) 71 71 #define NFP_FL_NBI_MTU_SETTING BIT(1) 72 72 #define NFP_FL_FEATS_GENEVE_OPT BIT(2) 73 + #define NFP_FL_FEATS_VLAN_PCP BIT(3) 73 74 #define NFP_FL_FEATS_LAG BIT(31) 74 75 75 76 struct nfp_fl_mask_id {
+1 -1
drivers/net/ethernet/netronome/nfp/flower/match.c
··· 56 56 FLOW_DISSECTOR_KEY_VLAN, 57 57 target); 58 58 /* Populate the tci field. */ 59 - if (flow_vlan->vlan_id) { 59 + if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { 60 60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 61 61 flow_vlan->vlan_priority) | 62 62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+11
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 192 192 key_size += sizeof(struct nfp_flower_mac_mpls); 193 193 } 194 194 195 + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 196 + struct flow_dissector_key_vlan *flow_vlan; 197 + 198 + flow_vlan = skb_flow_dissector_target(flow->dissector, 199 + FLOW_DISSECTOR_KEY_VLAN, 200 + flow->mask); 201 + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && 202 + flow_vlan->vlan_priority) 203 + return -EOPNOTSUPP; 204 + } 205 + 195 206 if (dissector_uses_key(flow->dissector, 196 207 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 197 208 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
+36 -40
drivers/net/ethernet/qualcomm/qca_7k.c
··· 45 45 { 46 46 __be16 rx_data; 47 47 __be16 tx_data; 48 - struct spi_transfer *transfer; 49 - struct spi_message *msg; 48 + struct spi_transfer transfer[2]; 49 + struct spi_message msg; 50 50 int ret; 51 51 52 + memset(transfer, 0, sizeof(transfer)); 53 + 54 + spi_message_init(&msg); 55 + 52 56 tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); 57 + *result = 0; 58 + 59 + transfer[0].tx_buf = &tx_data; 60 + transfer[0].len = QCASPI_CMD_LEN; 61 + transfer[1].rx_buf = &rx_data; 62 + transfer[1].len = QCASPI_CMD_LEN; 63 + 64 + spi_message_add_tail(&transfer[0], &msg); 53 65 54 66 if (qca->legacy_mode) { 55 - msg = &qca->spi_msg1; 56 - transfer = &qca->spi_xfer1; 57 - transfer->tx_buf = &tx_data; 58 - transfer->rx_buf = NULL; 59 - transfer->len = QCASPI_CMD_LEN; 60 - spi_sync(qca->spi_dev, msg); 61 - } else { 62 - msg = &qca->spi_msg2; 63 - transfer = &qca->spi_xfer2[0]; 64 - transfer->tx_buf = &tx_data; 65 - transfer->rx_buf = NULL; 66 - transfer->len = QCASPI_CMD_LEN; 67 - transfer = &qca->spi_xfer2[1]; 67 + spi_sync(qca->spi_dev, &msg); 68 + spi_message_init(&msg); 68 69 } 69 - transfer->tx_buf = NULL; 70 - transfer->rx_buf = &rx_data; 71 - transfer->len = QCASPI_CMD_LEN; 72 - ret = spi_sync(qca->spi_dev, msg); 70 + spi_message_add_tail(&transfer[1], &msg); 71 + ret = spi_sync(qca->spi_dev, &msg); 73 72 74 73 if (!ret) 75 - ret = msg->status; 74 + ret = msg.status; 76 75 77 76 if (ret) 78 77 qcaspi_spi_error(qca); ··· 85 86 qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) 86 87 { 87 88 __be16 tx_data[2]; 88 - struct spi_transfer *transfer; 89 - struct spi_message *msg; 89 + struct spi_transfer transfer[2]; 90 + struct spi_message msg; 90 91 int ret; 92 + 93 + memset(&transfer, 0, sizeof(transfer)); 94 + 95 + spi_message_init(&msg); 91 96 92 97 tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); 93 98 tx_data[1] = cpu_to_be16(value); 94 99 100 + transfer[0].tx_buf = &tx_data[0]; 101 + transfer[0].len = QCASPI_CMD_LEN; 102 + transfer[1].tx_buf = &tx_data[1]; 103 + transfer[1].len = QCASPI_CMD_LEN; 104 + 105 + spi_message_add_tail(&transfer[0], &msg); 95 106 if (qca->legacy_mode) { 96 - msg = &qca->spi_msg1; 97 - transfer = &qca->spi_xfer1; 98 - transfer->tx_buf = &tx_data[0]; 99 - transfer->rx_buf = NULL; 100 - transfer->len = QCASPI_CMD_LEN; 101 - spi_sync(qca->spi_dev, msg); 102 - } else { 103 - msg = &qca->spi_msg2; 104 - transfer = &qca->spi_xfer2[0]; 105 - transfer->tx_buf = &tx_data[0]; 106 - transfer->rx_buf = NULL; 107 - transfer->len = QCASPI_CMD_LEN; 108 - transfer = &qca->spi_xfer2[1]; 107 + spi_sync(qca->spi_dev, &msg); 108 + spi_message_init(&msg); 109 109 } 110 - transfer->tx_buf = &tx_data[1]; 111 - transfer->rx_buf = NULL; 112 - transfer->len = QCASPI_CMD_LEN; 113 - ret = spi_sync(qca->spi_dev, msg); 110 + spi_message_add_tail(&transfer[1], &msg); 111 + ret = spi_sync(qca->spi_dev, &msg); 114 112 115 113 if (!ret) 116 - ret = msg->status; 114 + ret = msg.status; 117 115 118 116 if (ret) 119 117 qcaspi_spi_error(qca);
+58 -54
drivers/net/ethernet/qualcomm/qca_spi.c
··· 99 99 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) 100 100 { 101 101 __be16 cmd; 102 - struct spi_message *msg = &qca->spi_msg2; 103 - struct spi_transfer *transfer = &qca->spi_xfer2[0]; 102 + struct spi_message msg; 103 + struct spi_transfer transfer[2]; 104 104 int ret; 105 105 106 + memset(&transfer, 0, sizeof(transfer)); 107 + spi_message_init(&msg); 108 + 106 109 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); 107 - transfer->tx_buf = &cmd; 108 - transfer->rx_buf = NULL; 109 - transfer->len = QCASPI_CMD_LEN; 110 - transfer = &qca->spi_xfer2[1]; 111 - transfer->tx_buf = src; 112 - transfer->rx_buf = NULL; 113 - transfer->len = len; 110 + transfer[0].tx_buf = &cmd; 111 + transfer[0].len = QCASPI_CMD_LEN; 112 + transfer[1].tx_buf = src; 113 + transfer[1].len = len; 114 114 115 - ret = spi_sync(qca->spi_dev, msg); 115 + spi_message_add_tail(&transfer[0], &msg); 116 + spi_message_add_tail(&transfer[1], &msg); 117 + ret = spi_sync(qca->spi_dev, &msg); 116 118 117 - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 119 + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { 118 120 qcaspi_spi_error(qca); 119 121 return 0; 120 122 } ··· 127 125 static u32 128 126 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) 129 127 { 130 - struct spi_message *msg = &qca->spi_msg1; 131 - struct spi_transfer *transfer = &qca->spi_xfer1; 128 + struct spi_message msg; 129 + struct spi_transfer transfer; 132 130 int ret; 133 131 134 - transfer->tx_buf = src; 135 - transfer->rx_buf = NULL; 136 - transfer->len = len; 132 + memset(&transfer, 0, sizeof(transfer)); 133 + spi_message_init(&msg); 137 134 138 - ret = spi_sync(qca->spi_dev, msg); 135 + transfer.tx_buf = src; 136 + transfer.len = len; 139 137 140 - if (ret || (msg->actual_length != len)) { 138 + spi_message_add_tail(&transfer, &msg); 139 + ret = spi_sync(qca->spi_dev, &msg); 140 + 141 + if (ret || (msg.actual_length != len)) { 141 142 qcaspi_spi_error(qca); 142 143 return 0; 143 144 } ··· 151 146 static u32 152 147 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) 153 148 { 154 - struct spi_message *msg = &qca->spi_msg2; 149 + struct spi_message msg; 155 150 __be16 cmd; 156 - struct spi_transfer *transfer = &qca->spi_xfer2[0]; 151 + struct spi_transfer transfer[2]; 157 152 int ret; 158 153 154 + memset(&transfer, 0, sizeof(transfer)); 155 + spi_message_init(&msg); 156 + 159 157 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); 160 - transfer->tx_buf = &cmd; 161 - transfer->rx_buf = NULL; 162 - transfer->len = QCASPI_CMD_LEN; 163 - transfer = &qca->spi_xfer2[1]; 164 - transfer->tx_buf = NULL; 165 - transfer->rx_buf = dst; 166 - transfer->len = len; 158 + transfer[0].tx_buf = &cmd; 159 + transfer[0].len = QCASPI_CMD_LEN; 160 + transfer[1].rx_buf = dst; 161 + transfer[1].len = len; 167 162 168 - ret = spi_sync(qca->spi_dev, msg); 163 + spi_message_add_tail(&transfer[0], &msg); 164 + spi_message_add_tail(&transfer[1], &msg); 165 + ret = spi_sync(qca->spi_dev, &msg); 169 166 170 - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 167 + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { 171 168 qcaspi_spi_error(qca); 172 169 return 0; 173 170 } ··· 180 173 static u32 181 174 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) 182 175 { 183 - struct spi_message *msg = &qca->spi_msg1; 184 - struct spi_transfer *transfer = &qca->spi_xfer1; 176 + struct spi_message msg; 177 + struct spi_transfer transfer; 185 178 int ret; 186 179 187 - transfer->tx_buf = NULL; 188 - transfer->rx_buf = dst; 189 - transfer->len = len; 180 + memset(&transfer, 0, sizeof(transfer)); 181 + spi_message_init(&msg); 190 182 191 - ret = spi_sync(qca->spi_dev, msg); 183 + transfer.rx_buf = dst; 184 + transfer.len = len; 192 185 193 - if (ret || (msg->actual_length != len)) { 186 + spi_message_add_tail(&transfer, &msg); 187 + ret = spi_sync(qca->spi_dev, &msg); 188 + 189 + if (ret || (msg.actual_length != len)) { 194 190 qcaspi_spi_error(qca); 195 191 return 0; 196 192 } ··· 205 195 qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) 206 196 { 207 197 __be16 tx_data; 208 - struct spi_message *msg = &qca->spi_msg1; 209 - struct spi_transfer *transfer = &qca->spi_xfer1; 198 + struct spi_message msg; 199 + struct spi_transfer transfer; 210 200 int ret; 211 201 212 - tx_data = cpu_to_be16(cmd); 213 - transfer->len = sizeof(tx_data); 214 - transfer->tx_buf = &tx_data; 215 - transfer->rx_buf = NULL; 202 + memset(&transfer, 0, sizeof(transfer)); 216 203 217 - ret = spi_sync(qca->spi_dev, msg); 204 + spi_message_init(&msg); 205 + 206 + tx_data = cpu_to_be16(cmd); 207 + transfer.len = sizeof(cmd); 208 + transfer.tx_buf = &tx_data; 209 + spi_message_add_tail(&transfer, &msg); 210 + 211 + ret = spi_sync(qca->spi_dev, &msg); 218 212 219 213 if (!ret) 220 - ret = msg->status; 214 + ret = msg.status; 221 215 222 216 if (ret) 223 217 qcaspi_spi_error(qca); ··· 848 834 849 835 qca = netdev_priv(dev); 850 836 memset(qca, 0, sizeof(struct qcaspi)); 851 - 852 - memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); 853 - memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); 854 - 855 - spi_message_init(&qca->spi_msg1); 856 - spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); 857 - 858 - spi_message_init(&qca->spi_msg2); 859 - spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); 860 - spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); 861 837 862 838 memset(&qca->txr, 0, sizeof(qca->txr)); 863 839 qca->txr.count = TX_RING_MAX_LEN;
-5
drivers/net/ethernet/qualcomm/qca_spi.h
··· 83 83 struct tx_ring txr; 84 84 struct qcaspi_stats stats; 85 85 86 - struct spi_message spi_msg1; 87 - struct spi_message spi_msg2; 88 - struct spi_transfer spi_xfer1; 89 - struct spi_transfer spi_xfer2[2]; 90 - 91 86 u8 *rx_buffer; 92 87 u32 buffer_size; 93 88 u8 sync;
+7 -4
drivers/net/ethernet/realtek/r8169.c
··· 631 631 }; 632 632 633 633 enum rtl_flag { 634 - RTL_FLAG_TASK_ENABLED, 634 + RTL_FLAG_TASK_ENABLED = 0, 635 635 RTL_FLAG_TASK_SLOW_PENDING, 636 636 RTL_FLAG_TASK_RESET_PENDING, 637 637 RTL_FLAG_MAX ··· 4634 4634 4635 4635 rtl_set_rx_max_size(tp); 4636 4636 rtl_set_rx_tx_desc_registers(tp); 4637 - rtl_set_tx_config_registers(tp); 4638 4637 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 4639 4638 4640 4639 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ 4641 4640 RTL_R8(tp, IntrMask); 4642 4641 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); 4643 4642 rtl_init_rxcfg(tp); 4643 + rtl_set_tx_config_registers(tp); 4644 4644 4645 4645 rtl_set_rx_mode(tp->dev); 4646 4646 /* no early-rx interrupts */ ··· 6655 6655 rtl8169_update_counters(tp); 6656 6656 6657 6657 rtl_lock_work(tp); 6658 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6658 + /* Clear all task flags */ 6659 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); 6659 6660 6660 6661 rtl8169_down(dev); 6661 6662 rtl_unlock_work(tp); ··· 6839 6838 6840 6839 rtl_lock_work(tp); 6841 6840 napi_disable(&tp->napi); 6842 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6841 + /* Clear all task flags */ 6842 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); 6843 + 6843 6844 rtl_unlock_work(tp); 6844 6845 6845 6846 rtl_pll_power_down(tp);
+1
drivers/net/ethernet/renesas/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 1 2 # 2 3 # Renesas device configuration 3 4 #
+1
drivers/net/ethernet/renesas/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 1 2 # 2 3 # Makefile for the Renesas device drivers. 3 4 #
+1 -5
drivers/net/ethernet/renesas/ravb_ptp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* PTP 1588 clock using the Renesas Ethernet AVB 2 3 * 3 4 * Copyright (C) 2013-2015 Renesas Electronics Corporation 4 5 * Copyright (C) 2015 Renesas Solutions Corp. 5 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 7 */ 12 8 13 9 #include "ravb.h"
+29 -1
drivers/net/usb/qmi_wwan.c
··· 967 967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 968 968 .driver_info = (unsigned long)&qmi_wwan_info, 969 969 }, 970 + { /* Quectel EP06/EG06/EM06 */ 971 + USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, 972 + USB_CLASS_VENDOR_SPEC, 973 + USB_SUBCLASS_VENDOR_SPEC, 974 + 0xff), 975 + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, 976 + }, 970 977 971 978 /* 3. Combined interface devices matching on interface number */ 972 979 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ ··· 1262 1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1263 1256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ 1264 1257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1265 - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ 1266 1258 1267 1259 /* 4. Gobi 1000 devices */ 1268 1260 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ ··· 1337 1331 return false; 1338 1332 } 1339 1333 1334 + static bool quectel_ep06_diag_detected(struct usb_interface *intf) 1335 + { 1336 + struct usb_device *dev = interface_to_usbdev(intf); 1337 + struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; 1338 + 1339 + if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && 1340 + le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && 1341 + intf_desc.bNumEndpoints == 2) 1342 + return true; 1343 + 1344 + return false; 1345 + } 1346 + 1340 1347 static int qmi_wwan_probe(struct usb_interface *intf, 1341 1348 const struct usb_device_id *prod) 1342 1349 { ··· 1383 1364 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); 1384 1365 return -ENODEV; 1385 1366 } 1367 + 1368 + /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so 1369 + * we need to match on class/subclass/protocol. These values are 1370 + * identical for the diagnostic- and QMI-interface, but bNumEndpoints is 1371 + * different. Ignore the current interface if the number of endpoints 1372 + * the number for the diag interface (two). 1373 + */ 1374 + if (quectel_ep06_diag_detected(intf)) 1375 + return -ENODEV; 1386 1376 1387 1377 return usbnet_probe(intf, id); 1388 1378 }
+10 -14
drivers/net/xen-netfront.c
··· 87 87 /* IRQ name is queue name with "-tx" or "-rx" appended */ 88 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 89 90 - static DECLARE_WAIT_QUEUE_HEAD(module_load_q); 91 - static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 90 + static DECLARE_WAIT_QUEUE_HEAD(module_wq); 92 91 93 92 struct netfront_stats { 94 93 u64 packets; ··· 1331 1332 netif_carrier_off(netdev); 1332 1333 1333 1334 xenbus_switch_state(dev, XenbusStateInitialising); 1334 - wait_event(module_load_q, 1335 - xenbus_read_driver_state(dev->otherend) != 1336 - XenbusStateClosed && 1337 - xenbus_read_driver_state(dev->otherend) != 1338 - XenbusStateUnknown); 1335 + wait_event(module_wq, 1336 + xenbus_read_driver_state(dev->otherend) != 1337 + XenbusStateClosed && 1338 + xenbus_read_driver_state(dev->otherend) != 1339 + XenbusStateUnknown); 1339 1340 return netdev; 1340 1341 1341 1342 exit: ··· 2009 2010 2010 2011 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2011 2012 2013 + wake_up_all(&module_wq); 2014 + 2012 2015 switch (backend_state) { 2013 2016 case XenbusStateInitialising: 2014 2017 case XenbusStateInitialised: 2015 2018 case XenbusStateReconfiguring: 2016 2019 case XenbusStateReconfigured: 2017 - break; 2018 - 2019 2020 case XenbusStateUnknown: 2020 - wake_up_all(&module_unload_q); 2021 2021 break; 2022 2022 2023 2023 case XenbusStateInitWait: ··· 2032 2034 break; 2033 2035 2034 2036 case XenbusStateClosed: 2035 - wake_up_all(&module_unload_q); 2036 2037 if (dev->state == XenbusStateClosed) 2037 2038 break; 2038 2039 /* Missed the backend's CLOSING state -- fallthrough */ 2039 2040 case XenbusStateClosing: 2040 - wake_up_all(&module_unload_q); 2041 2041 xenbus_frontend_closed(dev); 2042 2042 break; 2043 2043 } ··· 2143 2147 2144 2148 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2145 2149 xenbus_switch_state(dev, XenbusStateClosing); 2146 - wait_event(module_unload_q, 2150 + wait_event(module_wq, 2147 2151 xenbus_read_driver_state(dev->otherend) == 2148 2152 XenbusStateClosing || 2149 2153 xenbus_read_driver_state(dev->otherend) == 2150 2154 XenbusStateUnknown); 2151 2155 2152 2156 xenbus_switch_state(dev, XenbusStateClosed); 2153 - wait_event(module_unload_q, 2157 + wait_event(module_wq, 2154 2158 xenbus_read_driver_state(dev->otherend) == 2155 2159 XenbusStateClosed || 2156 2160 xenbus_read_driver_state(dev->otherend) ==
+8 -3
drivers/s390/net/qeth_core_main.c
··· 25 25 #include <linux/netdevice.h> 26 26 #include <linux/netdev_features.h> 27 27 #include <linux/skbuff.h> 28 + #include <linux/vmalloc.h> 28 29 29 30 #include <net/iucv/af_iucv.h> 30 31 #include <net/dsfield.h> ··· 4700 4699 4701 4700 priv.buffer_len = oat_data.buffer_len; 4702 4701 priv.response_len = 0; 4703 - priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); 4702 + priv.buffer = vzalloc(oat_data.buffer_len); 4704 4703 if (!priv.buffer) { 4705 4704 rc = -ENOMEM; 4706 4705 goto out; ··· 4741 4740 rc = -EFAULT; 4742 4741 4743 4742 out_free: 4744 - kfree(priv.buffer); 4743 + vfree(priv.buffer); 4745 4744 out: 4746 4745 return rc; 4747 4746 } ··· 5707 5706 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5708 5707 dev->hw_features |= NETIF_F_SG; 5709 5708 dev->vlan_features |= NETIF_F_SG; 5709 + if (IS_IQD(card)) 5710 + dev->features |= NETIF_F_SG; 5710 5711 } 5711 5712 5712 5713 return dev; ··· 5771 5768 qeth_update_from_chp_desc(card); 5772 5769 5773 5770 card->dev = qeth_alloc_netdev(card); 5774 - if (!card->dev) 5771 + if (!card->dev) { 5772 + rc = -ENOMEM; 5775 5773 goto err_card; 5774 + } 5776 5775 5777 5776 qeth_determine_capabilities(card); 5778 5777 enforced_disc = qeth_enforce_discipline(card);
+1 -1
drivers/s390/net/qeth_l2_main.c
··· 423 423 default: 424 424 dev_kfree_skb_any(skb); 425 425 QETH_CARD_TEXT(card, 3, "inbunkno"); 426 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 426 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); 427 427 continue; 428 428 } 429 429 work_done++;
+1 -1
drivers/s390/net/qeth_l3_main.c
··· 1390 1390 default: 1391 1391 dev_kfree_skb_any(skb); 1392 1392 QETH_CARD_TEXT(card, 3, "inbunkno"); 1393 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 1393 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); 1394 1394 continue; 1395 1395 } 1396 1396 work_done++;
+5 -5
drivers/scsi/Kconfig
··· 52 52 default y 53 53 depends on SCSI 54 54 ---help--- 55 - This option enables the new blk-mq based I/O path for SCSI 56 - devices by default. With the option the scsi_mod.use_blk_mq 57 - module/boot option defaults to Y, without it to N, but it can 58 - still be overridden either way. 55 + This option enables the blk-mq based I/O path for SCSI devices by 56 + default. With this option the scsi_mod.use_blk_mq module/boot 57 + option defaults to Y, without it to N, but it can still be 58 + overridden either way. 59 59 60 - If unsure say N. 60 + If unsure say Y. 61 61 62 62 config SCSI_PROC_FS 63 63 bool "legacy /proc/scsi/ support"
+1 -1
drivers/scsi/aacraid/aacraid.h
··· 1346 1346 struct aac_hba_map_info { 1347 1347 __le32 rmw_nexus; /* nexus for native HBA devices */ 1348 1348 u8 devtype; /* device type */ 1349 - u8 reset_state; /* 0 - no reset, 1..x - */ 1349 + s8 reset_state; /* 0 - no reset, 1..x - */ 1350 1350 /* after xth TM LUN reset */ 1351 1351 u16 qd_limit; 1352 1352 u32 scan_counter;
+53 -18
drivers/scsi/csiostor/csio_hw.c
··· 1602 1602 } 1603 1603 1604 1604 /** 1605 + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits 1606 + * @caps32: a 32-bit Port Capabilities value 1607 + * 1608 + * Returns the equivalent 16-bit Port Capabilities value. Note that 1609 + * not all 32-bit Port Capabilities can be represented in the 16-bit 1610 + * Port Capabilities and some fields/values may not make it. 1611 + */ 1612 + fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) 1613 + { 1614 + fw_port_cap16_t caps16 = 0; 1615 + 1616 + #define CAP32_TO_CAP16(__cap) \ 1617 + do { \ 1618 + if (caps32 & FW_PORT_CAP32_##__cap) \ 1619 + caps16 |= FW_PORT_CAP_##__cap; \ 1620 + } while (0) 1621 + 1622 + CAP32_TO_CAP16(SPEED_100M); 1623 + CAP32_TO_CAP16(SPEED_1G); 1624 + CAP32_TO_CAP16(SPEED_10G); 1625 + CAP32_TO_CAP16(SPEED_25G); 1626 + CAP32_TO_CAP16(SPEED_40G); 1627 + CAP32_TO_CAP16(SPEED_100G); 1628 + CAP32_TO_CAP16(FC_RX); 1629 + CAP32_TO_CAP16(FC_TX); 1630 + CAP32_TO_CAP16(802_3_PAUSE); 1631 + CAP32_TO_CAP16(802_3_ASM_DIR); 1632 + CAP32_TO_CAP16(ANEG); 1633 + CAP32_TO_CAP16(FORCE_PAUSE); 1634 + CAP32_TO_CAP16(MDIAUTO); 1635 + CAP32_TO_CAP16(MDISTRAIGHT); 1636 + CAP32_TO_CAP16(FEC_RS); 1637 + CAP32_TO_CAP16(FEC_BASER_RS); 1638 + 1639 + #undef CAP32_TO_CAP16 1640 + 1641 + return caps16; 1642 + } 1643 + 1644 + /** 1605 1645 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 1606 1646 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 1607 1647 * ··· 1799 1759 val = 1; 1800 1760 1801 1761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, 1802 - hw->pfn, 0, 1, &param, &val, false, 1762 + hw->pfn, 0, 1, &param, &val, true, 1803 1763 NULL); 1804 1764 1805 1765 if (csio_mb_issue(hw, mbp)) { ··· 1809 1769 return -EINVAL; 1810 1770 } 1811 1771 1812 - csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, 1813 - &val); 1814 - if (retval != FW_SUCCESS) { 1815 - csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", 1816 - portid, retval); 1817 - mempool_free(mbp, hw->mb_mempool); 1818 - return -EINVAL; 1819 - } 1820 - 1821 - fw_caps = val; 1772 + csio_mb_process_read_params_rsp(hw, mbp, &retval, 1773 + 0, NULL); 1774 + fw_caps = retval ? FW_CAPS16 : FW_CAPS32; 1822 1775 } 1823 1776 1824 1777 /* Read PORT information */ ··· 2397 2364 } 2398 2365 2399 2366 /* 2400 - * Returns -EINVAL if attempts to flash the firmware failed 2401 - * else returns 0, 2367 + * Returns -EINVAL if attempts to flash the firmware failed, 2368 + * -ENOMEM if memory allocation failed else returns 0, 2402 2369 * if flashing was not attempted because the card had the 2403 2370 * latest firmware ECANCELED is returned 2404 2371 */ ··· 2426 2393 return -EINVAL; 2427 2394 } 2428 2395 2396 + /* allocate memory to read the header of the firmware on the 2397 + * card 2398 + */ 2399 + card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2400 + if (!card_fw) 2401 + return -ENOMEM; 2402 + 2429 2403 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2430 2404 fw_bin_file = FW_FNAME_T5; 2431 2405 else ··· 2445 2405 fw_data = fw->data; 2446 2406 fw_size = fw->size; 2447 2407 } 2448 - 2449 - /* allocate memory to read the header of the firmware on the 2450 - * card 2451 - */ 2452 - card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); 2453 2408 2454 2409 /* upgrade FW logic */ 2455 2410 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+1
drivers/scsi/csiostor/csio_hw.h
··· 639 639 640 640 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); 641 641 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); 642 + fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); 642 643 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); 643 644 644 645 int csio_hw_start(struct csio_hw *);
+3 -3
drivers/scsi/csiostor/csio_mb.c
··· 368 368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 369 369 370 370 if (fw_caps == FW_CAPS16) 371 - cmdp->u.l1cfg.rcap = cpu_to_be32(fc); 371 + cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); 372 372 else 373 373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); 374 374 } ··· 395 395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); 396 396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); 397 397 } else { 398 - *pcaps = ntohs(rsp->u.info32.pcaps32); 399 - *acaps = ntohs(rsp->u.info32.acaps32); 398 + *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); 399 + *acaps = be32_to_cpu(rsp->u.info32.acaps32); 400 400 } 401 401 } 402 402 }
+1 -23
drivers/scsi/hosts.c
··· 563 563 } 564 564 EXPORT_SYMBOL(scsi_host_get); 565 565 566 - struct scsi_host_mq_in_flight { 567 - int cnt; 568 - }; 569 - 570 - static void scsi_host_check_in_flight(struct request *rq, void *data, 571 - bool reserved) 572 - { 573 - struct scsi_host_mq_in_flight *in_flight = data; 574 - 575 - if (blk_mq_request_started(rq)) 576 - in_flight->cnt++; 577 - } 578 - 579 566 /** 580 567 * scsi_host_busy - Return the host busy counter 581 568 * @shost: Pointer to Scsi_Host to inc. 582 569 **/ 583 570 int scsi_host_busy(struct Scsi_Host *shost) 584 571 { 585 - struct scsi_host_mq_in_flight in_flight = { 586 - .cnt = 0, 587 - }; 588 - 589 - if (!shost->use_blk_mq) 590 - return atomic_read(&shost->host_busy); 591 - 592 - blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, 593 - &in_flight); 594 - return in_flight.cnt; 572 + return atomic_read(&shost->host_busy); 595 573 } 596 574 EXPORT_SYMBOL(scsi_host_busy); 597 575
+1 -1
drivers/scsi/hpsa.c
··· 976 976 #endif 977 977 .sdev_attrs = hpsa_sdev_attrs, 978 978 .shost_attrs = hpsa_shost_attrs, 979 - .max_sectors = 1024, 979 + .max_sectors = 2048, 980 980 .no_write_same = 1, 981 981 }; 982 982
+1 -1
drivers/scsi/lpfc/lpfc.h
··· 672 672 #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 673 673 #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 674 674 #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ 675 - #define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ 675 + #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ 676 676 677 677 uint32_t hba_flag; /* hba generic flags */ 678 678 #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
+4 -4
drivers/scsi/lpfc/lpfc_attr.c
··· 5122 5122 5123 5123 /* 5124 5124 # lpfc_fdmi_on: Controls FDMI support. 5125 - # 0 No FDMI support (default) 5126 - # 1 Traditional FDMI support 5125 + # 0 No FDMI support 5126 + # 1 Traditional FDMI support (default) 5127 5127 # Traditional FDMI support means the driver will assume FDMI-2 support; 5128 5128 # however, if that fails, it will fallback to FDMI-1. 5129 5129 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5130 5130 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5131 5131 # lpfc_fdmi_on. 5132 - # Value range [0,1]. Default value is 0. 5132 + # Value range [0,1]. Default value is 1. 5133 5133 */ 5134 - LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); 5134 + LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); 5135 5135 5136 5136 /* 5137 5137 # Specifies the maximum number of ELS cmds we can have outstanding (for
+6 -1
drivers/scsi/qedi/qedi.h
··· 77 77 QEDI_NVM_TGT_SEC, 78 78 }; 79 79 80 + struct qedi_nvm_iscsi_image { 81 + struct nvm_iscsi_cfg iscsi_cfg; 82 + u32 crc; 83 + }; 84 + 80 85 struct qedi_uio_ctrl { 81 86 /* meta data */ 82 87 u32 uio_hsi_version; ··· 299 294 void *bdq_pbl_list; 300 295 dma_addr_t bdq_pbl_list_dma; 301 296 u8 bdq_pbl_list_num_entries; 302 - struct nvm_iscsi_cfg *iscsi_cfg; 297 + struct qedi_nvm_iscsi_image *iscsi_image; 303 298 dma_addr_t nvm_buf_dma; 304 299 void __iomem *bdq_primary_prod; 305 300 void __iomem *bdq_secondary_prod;
+15 -13
drivers/scsi/qedi/qedi_main.c
··· 1346 1346 1347 1347 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1348 1348 { 1349 - if (qedi->iscsi_cfg) 1349 + if (qedi->iscsi_image) 1350 1350 dma_free_coherent(&qedi->pdev->dev, 1351 - sizeof(struct nvm_iscsi_cfg), 1352 - qedi->iscsi_cfg, qedi->nvm_buf_dma); 1351 + sizeof(struct qedi_nvm_iscsi_image), 1352 + qedi->iscsi_image, qedi->nvm_buf_dma); 1353 1353 } 1354 1354 1355 1355 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1356 1356 { 1357 - qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, 1358 - sizeof(struct nvm_iscsi_cfg), 1359 - &qedi->nvm_buf_dma, GFP_KERNEL); 1360 - if (!qedi->iscsi_cfg) { 1357 + struct qedi_nvm_iscsi_image nvm_image; 1358 + 1359 + qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1360 + sizeof(nvm_image), 1361 + &qedi->nvm_buf_dma, 1362 + GFP_KERNEL); 1363 + if (!qedi->iscsi_image) { 1361 1364 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1362 1365 return -ENOMEM; 1363 1366 } 1364 1367 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1365 - "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, 1368 + "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image, 1366 1369 qedi->nvm_buf_dma); 1367 1370 1368 1371 return 0; ··· 1908 1905 struct nvm_iscsi_block *block; 1909 1906 1910 1907 pf = qedi->dev_info.common.abs_pf_id; 1911 - block = &qedi->iscsi_cfg->block[0]; 1908 + block = &qedi->iscsi_image->iscsi_cfg.block[0]; 1912 1909 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { 1913 1910 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> 1914 1911 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; ··· 2197 2194 static int qedi_get_boot_info(struct qedi_ctx *qedi) 2198 2195 { 2199 2196 int ret = 1; 2200 - u16 len; 2201 - 2202 - len = sizeof(struct nvm_iscsi_cfg); 2197 + struct qedi_nvm_iscsi_image nvm_image; 2203 2198 2204 2199 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 2205 2200 "Get NVM iSCSI CFG image\n"); 2206 2201 ret = qedi_ops->common->nvm_get_image(qedi->cdev, 2207 2202 QED_NVM_IMAGE_ISCSI_CFG, 2208 - (char *)qedi->iscsi_cfg, len); 2203 + (char *)qedi->iscsi_image, 2204 + sizeof(nvm_image)); 2209 2205 if (ret) 2210 2206 QEDI_ERR(&qedi->dbg_ctx, 2211 2207 "Could not get NVM image. ret = %d\n", ret);
+5 -16
drivers/scsi/scsi_lib.c
··· 345 345 unsigned long flags; 346 346 347 347 rcu_read_lock(); 348 - if (!shost->use_blk_mq) 349 - atomic_dec(&shost->host_busy); 348 + atomic_dec(&shost->host_busy); 350 349 if (unlikely(scsi_host_in_recovery(shost))) { 351 350 spin_lock_irqsave(shost->host_lock, flags); 352 351 if (shost->host_failed || shost->host_eh_scheduled) ··· 444 445 445 446 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 446 447 { 447 - /* 448 - * blk-mq can handle host queue busy efficiently via host-wide driver 449 - * tag allocation 450 - */ 451 - 452 - if (!shost->use_blk_mq && shost->can_queue > 0 && 448 + if (shost->can_queue > 0 && 453 449 atomic_read(&shost->host_busy) >= shost->can_queue) 454 450 return true; 455 451 if (atomic_read(&shost->host_blocked) > 0) ··· 1600 1606 if (scsi_host_in_recovery(shost)) 1601 1607 return 0; 1602 1608 1603 - if (!shost->use_blk_mq) 1604 - busy = atomic_inc_return(&shost->host_busy) - 1; 1605 - else 1606 - busy = 0; 1609 + busy = atomic_inc_return(&shost->host_busy) - 1; 1607 1610 if (atomic_read(&shost->host_blocked) > 0) { 1608 1611 if (busy) 1609 1612 goto starved; ··· 1616 1625 "unblocking host at zero depth\n")); 1617 1626 } 1618 1627 1619 - if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) 1628 + if (shost->can_queue > 0 && busy >= shost->can_queue) 1620 1629 goto starved; 1621 1630 if (shost->host_self_blocked) 1622 1631 goto starved; ··· 1702 1711 * with the locks as normal issue path does. 1703 1712 */ 1704 1713 atomic_inc(&sdev->device_busy); 1705 - 1706 - if (!shost->use_blk_mq) 1707 - atomic_inc(&shost->host_busy); 1714 + atomic_inc(&shost->host_busy); 1708 1715 if (starget->can_queue > 0) 1709 1716 atomic_inc(&starget->target_busy); 1710 1717
+4 -4
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
··· 207 207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 208 208 sgl->offset = sg_offset; 209 209 if (!ret) { 210 - pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 211 - __func__, 0, xferlen, sgcnt); 210 + pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 211 + __func__, 0, xferlen, sgcnt); 212 212 goto rel_ppods; 213 213 } 214 214 ··· 250 250 251 251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); 252 252 if (ret < 0) { 253 - pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", 254 - csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); 253 + pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", 254 + csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); 255 255 256 256 ttinfo->sgl = NULL; 257 257 ttinfo->nents = 0;
+1 -8
drivers/target/iscsi/iscsi_target.c
··· 4208 4208 crypto_free_ahash(tfm); 4209 4209 } 4210 4210 4211 - free_cpumask_var(conn->conn_cpumask); 4212 - 4213 - kfree(conn->conn_ops); 4214 - conn->conn_ops = NULL; 4215 - 4216 4211 if (conn->sock) 4217 4212 sock_release(conn->sock); 4218 4213 4219 4214 if (conn->conn_transport->iscsit_free_conn) 4220 4215 conn->conn_transport->iscsit_free_conn(conn); 4221 4216 4222 - iscsit_put_transport(conn->conn_transport); 4223 - 4224 4217 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4225 4218 conn->conn_state = TARG_CONN_STATE_FREE; 4226 - kfree(conn); 4219 + iscsit_free_conn(conn); 4227 4220 4228 4221 spin_lock_bh(&sess->conn_lock); 4229 4222 atomic_dec(&sess->nconn);
+78 -71
drivers/target/iscsi/iscsi_target_login.c
··· 67 67 goto out_req_buf; 68 68 } 69 69 70 - conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 71 - if (!conn->conn_ops) { 72 - pr_err("Unable to allocate memory for" 73 - " struct iscsi_conn_ops.\n"); 74 - goto out_rsp_buf; 75 - } 76 - 77 - init_waitqueue_head(&conn->queues_wq); 78 - INIT_LIST_HEAD(&conn->conn_list); 79 - INIT_LIST_HEAD(&conn->conn_cmd_list); 80 - INIT_LIST_HEAD(&conn->immed_queue_list); 81 - INIT_LIST_HEAD(&conn->response_queue_list); 82 - init_completion(&conn->conn_post_wait_comp); 83 - init_completion(&conn->conn_wait_comp); 84 - init_completion(&conn->conn_wait_rcfr_comp); 85 - init_completion(&conn->conn_waiting_on_uc_comp); 86 - init_completion(&conn->conn_logout_comp); 87 - init_completion(&conn->rx_half_close_comp); 88 - init_completion(&conn->tx_half_close_comp); 89 - init_completion(&conn->rx_login_comp); 90 - spin_lock_init(&conn->cmd_lock); 91 - spin_lock_init(&conn->conn_usage_lock); 92 - spin_lock_init(&conn->immed_queue_lock); 93 - spin_lock_init(&conn->nopin_timer_lock); 94 - spin_lock_init(&conn->response_queue_lock); 95 - spin_lock_init(&conn->state_lock); 96 - 97 - if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { 98 - pr_err("Unable to allocate conn->conn_cpumask\n"); 99 - goto out_conn_ops; 100 - } 101 70 conn->conn_login = login; 102 71 103 72 return login; 104 73 105 - out_conn_ops: 106 - kfree(conn->conn_ops); 107 - out_rsp_buf: 108 - kfree(login->rsp_buf); 109 74 out_req_buf: 110 75 kfree(login->req_buf); 111 76 out_login: ··· 275 310 return -ENOMEM; 276 311 } 277 312 278 - ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); 279 - if (unlikely(ret)) { 280 - kfree(sess); 281 - return ret; 282 - } 313 + if (iscsi_login_set_conn_values(sess, conn, pdu->cid)) 314 + goto free_sess; 315 + 283 316 sess->init_task_tag = pdu->itt; 284 317 memcpy(&sess->isid, pdu->isid, 6); 285 318 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); ··· 1112 1149 return 0; 1113 1150 } 1114 1151 1152 + static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np) 1153 + { 1154 + struct iscsi_conn *conn; 1155 + 1156 + conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1157 + if (!conn) { 1158 + pr_err("Could not allocate memory for new connection\n"); 1159 + return NULL; 1160 + } 1161 + pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 1162 + conn->conn_state = TARG_CONN_STATE_FREE; 1163 + 1164 + init_waitqueue_head(&conn->queues_wq); 1165 + INIT_LIST_HEAD(&conn->conn_list); 1166 + INIT_LIST_HEAD(&conn->conn_cmd_list); 1167 + INIT_LIST_HEAD(&conn->immed_queue_list); 1168 + INIT_LIST_HEAD(&conn->response_queue_list); 1169 + init_completion(&conn->conn_post_wait_comp); 1170 + init_completion(&conn->conn_wait_comp); 1171 + init_completion(&conn->conn_wait_rcfr_comp); 1172 + init_completion(&conn->conn_waiting_on_uc_comp); 1173 + init_completion(&conn->conn_logout_comp); 1174 + init_completion(&conn->rx_half_close_comp); 1175 + init_completion(&conn->tx_half_close_comp); 1176 + init_completion(&conn->rx_login_comp); 1177 + spin_lock_init(&conn->cmd_lock); 1178 + spin_lock_init(&conn->conn_usage_lock); 1179 + spin_lock_init(&conn->immed_queue_lock); 1180 + spin_lock_init(&conn->nopin_timer_lock); 1181 + spin_lock_init(&conn->response_queue_lock); 1182 + spin_lock_init(&conn->state_lock); 1183 + 1184 + timer_setup(&conn->nopin_response_timer, 1185 + iscsit_handle_nopin_response_timeout, 0); 1186 + timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); 1187 + 1188 + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) 1189 + goto free_conn; 1190 + 1191 + conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 1192 + if (!conn->conn_ops) { 1193 + pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n"); 1194 + goto put_transport; 1195 + } 1196 + 1197 + if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { 1198 + pr_err("Unable to allocate conn->conn_cpumask\n"); 1199 + goto free_mask; 1200 + } 1201 + 1202 + return conn; 1203 + 1204 + free_mask: 1205 + free_cpumask_var(conn->conn_cpumask); 1206 + put_transport: 1207 + iscsit_put_transport(conn->conn_transport); 1208 + free_conn: 1209 + kfree(conn); 1210 + return NULL; 1211 + } 1212 + 1213 + void iscsit_free_conn(struct iscsi_conn *conn) 1214 + { 1215 + free_cpumask_var(conn->conn_cpumask); 1216 + kfree(conn->conn_ops); 1217 + iscsit_put_transport(conn->conn_transport); 1218 + kfree(conn); 1219 + } 1220 + 1115 1221 void iscsi_target_login_sess_out(struct iscsi_conn *conn, 1116 1222 struct iscsi_np *np, bool zero_tsih, bool new_sess) 1117 1223 { ··· 1230 1198 crypto_free_ahash(tfm); 1231 1199 } 1232 1200 1233 - free_cpumask_var(conn->conn_cpumask); 1234 - 1235 - kfree(conn->conn_ops); 1236 - 1237 1201 if (conn->param_list) { 1238 1202 iscsi_release_param_list(conn->param_list); 1239 1203 conn->param_list = NULL; ··· 1247 1219 if (conn->conn_transport->iscsit_free_conn) 1248 1220 conn->conn_transport->iscsit_free_conn(conn); 1249 1221 1250 - iscsit_put_transport(conn->conn_transport); 1251 - kfree(conn); 1222 + iscsit_free_conn(conn); 1252 1223 } 1253 1224 1254 1225 static int __iscsi_target_login_thread(struct iscsi_np *np) ··· 1277 1250 } 1278 1251 spin_unlock_bh(&np->np_thread_lock); 1279 1252 1280 - conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1253 + conn = iscsit_alloc_conn(np); 1281 1254 if (!conn) { 1282 - pr_err("Could not allocate memory for" 1283 - " new connection\n"); 1284 1255 /* Get another socket */ 1285 - return 1; 1286 - } 1287 - pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 1288 - conn->conn_state = TARG_CONN_STATE_FREE; 1289 - 1290 - timer_setup(&conn->nopin_response_timer, 1291 - iscsit_handle_nopin_response_timeout, 0); 1292 - timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); 1293 - 1294 - if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { 1295 - kfree(conn); 1296 1256 return 1; 1297 1257 } 1298 1258 1299 1259 rc = np->np_transport->iscsit_accept_np(np, conn); 1300 1260 if (rc == -ENOSYS) { 1301 1261 complete(&np->np_restart_comp); 1302 - iscsit_put_transport(conn->conn_transport); 1303 - kfree(conn); 1304 - conn = NULL; 1262 + iscsit_free_conn(conn); 1305 1263 goto exit; 1306 1264 } else if (rc < 0) { 1307 1265 spin_lock_bh(&np->np_thread_lock); ··· 1294 1282 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1295 1283 spin_unlock_bh(&np->np_thread_lock); 1296 1284 complete(&np->np_restart_comp); 1297 - iscsit_put_transport(conn->conn_transport); 1298 - kfree(conn); 1299 - conn = NULL; 1285 + iscsit_free_conn(conn); 1300 1286 /* Get another socket */ 1301 1287 return 1; 1302 1288 } 1303 1289 spin_unlock_bh(&np->np_thread_lock); 1304 - iscsit_put_transport(conn->conn_transport); 1305 - kfree(conn); 1306 - conn = NULL; 1307 - goto out; 1290 + iscsit_free_conn(conn); 1291 + return 1; 1308 1292 } 1309 1293 /* 1310 1294 * Perform the remaining iSCSI connection initialization items.. ··· 1450 1442 tpg_np = NULL; 1451 1443 } 1452 1444 1453 - out: 1454 1445 return 1; 1455 1446 1456 1447 exit:
+1 -1
drivers/target/iscsi/iscsi_target_login.h
··· 19 19 extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); 20 20 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 21 21 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 22 - extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 22 + extern void iscsit_free_conn(struct iscsi_conn *); 23 23 extern int iscsit_start_kthreads(struct iscsi_conn *); 24 24 extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 25 25 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+7 -8
fs/afs/proc.c
··· 98 98 goto inval; 99 99 100 100 args = strchr(name, ' '); 101 - if (!args) 102 - goto inval; 103 - do { 104 - *args++ = 0; 105 - } while(*args == ' '); 106 - if (!*args) 107 - goto inval; 101 + if (args) { 102 + do { 103 + *args++ = 0; 104 + } while(*args == ' '); 105 + if (!*args) 106 + goto inval; 107 + } 108 108 109 109 /* determine command to perform */ 110 110 _debug("cmd=%s name=%s args=%s", buf, name, args); ··· 120 120 121 121 if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) 122 122 afs_put_cell(net, cell); 123 - printk("kAFS: Added new cell '%s'\n", name); 124 123 } else { 125 124 goto inval; 126 125 }
+10 -2
fs/btrfs/ctree.h
··· 1280 1280 int send_in_progress; 1281 1281 struct btrfs_subvolume_writers *subv_writers; 1282 1282 atomic_t will_be_snapshotted; 1283 + atomic_t snapshot_force_cow; 1283 1284 1284 1285 /* For qgroup metadata reserved space */ 1285 1286 spinlock_t qgroup_meta_rsv_lock; ··· 3391 3390 #define btrfs_debug(fs_info, fmt, args...) \ 3392 3391 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3393 3392 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3394 - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3393 + btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3395 3394 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3396 - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3395 + btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3397 3396 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3398 3397 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3399 3398 #endif ··· 3402 3401 do { \ 3403 3402 rcu_read_lock(); \ 3404 3403 btrfs_printk(fs_info, fmt, ##args); \ 3404 + rcu_read_unlock(); \ 3405 + } while (0) 3406 + 3407 + #define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \ 3408 + do { \ 3409 + rcu_read_lock(); \ 3410 + btrfs_no_printk(fs_info, fmt, ##args); \ 3405 3411 rcu_read_unlock(); \ 3406 3412 } while (0) 3407 3413
+1
fs/btrfs/disk-io.c
··· 1187 1187 atomic_set(&root->log_batch, 0); 1188 1188 refcount_set(&root->refs, 1); 1189 1189 atomic_set(&root->will_be_snapshotted, 0); 1190 + atomic_set(&root->snapshot_force_cow, 0); 1190 1191 root->log_transid = 0; 1191 1192 root->log_transid_committed = -1; 1192 1193 root->last_log_commit = 0;
+8 -9
fs/btrfs/extent-tree.c
··· 5800 5800 * root: the root of the parent directory 5801 5801 * rsv: block reservation 5802 5802 * items: the number of items that we need do reservation 5803 - * qgroup_reserved: used to return the reserved size in qgroup 5803 + * use_global_rsv: allow fallback to the global block reservation 5804 5804 * 5805 5805 * This function is used to reserve the space for snapshot/subvolume 5806 5806 * creation and deletion. Those operations are different with the ··· 5810 5810 * the space reservation mechanism in start_transaction(). 5811 5811 */ 5812 5812 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 5813 - struct btrfs_block_rsv *rsv, 5814 - int items, 5813 + struct btrfs_block_rsv *rsv, int items, 5815 5814 bool use_global_rsv) 5816 5815 { 5816 + u64 qgroup_num_bytes = 0; 5817 5817 u64 num_bytes; 5818 5818 int ret; 5819 5819 struct btrfs_fs_info *fs_info = root->fs_info; ··· 5821 5821 5822 5822 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 5823 5823 /* One for parent inode, two for dir entries */ 5824 - num_bytes = 3 * fs_info->nodesize; 5825 - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); 5824 + qgroup_num_bytes = 3 * fs_info->nodesize; 5825 + ret = btrfs_qgroup_reserve_meta_prealloc(root, 5826 + qgroup_num_bytes, true); 5826 5827 if (ret) 5827 5828 return ret; 5828 - } else { 5829 - num_bytes = 0; 5830 5829 } 5831 5830 5832 5831 num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); ··· 5837 5838 if (ret == -ENOSPC && use_global_rsv) 5838 5839 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1); 5839 5840 5840 - if (ret && num_bytes) 5841 - btrfs_qgroup_free_meta_prealloc(root, num_bytes); 5841 + if (ret && qgroup_num_bytes) 5842 + btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); 5842 5843 5843 5844 return ret; 5844 5845 }
+84 -33
fs/btrfs/inode.c
··· 1271 1271 u64 disk_num_bytes; 1272 1272 u64 ram_bytes; 1273 1273 int extent_type; 1274 - int ret, err; 1274 + int ret; 1275 1275 int type; 1276 1276 int nocow; 1277 1277 int check_prev = 1; ··· 1403 1403 * if there are pending snapshots for this root, 1404 1404 * we fall into common COW way. 1405 1405 */ 1406 - if (!nolock) { 1407 - err = btrfs_start_write_no_snapshotting(root); 1408 - if (!err) 1409 - goto out_check; 1410 - } 1406 + if (!nolock && atomic_read(&root->snapshot_force_cow)) 1407 + goto out_check; 1411 1408 /* 1412 1409 * force cow if csum exists in the range. 1413 1410 * this ensure that csum for a given extent are ··· 1413 1416 ret = csum_exist_in_range(fs_info, disk_bytenr, 1414 1417 num_bytes); 1415 1418 if (ret) { 1416 - if (!nolock) 1417 - btrfs_end_write_no_snapshotting(root); 1418 - 1419 1419 /* 1420 1420 * ret could be -EIO if the above fails to read 1421 1421 * metadata. ··· 1425 1431 WARN_ON_ONCE(nolock); 1426 1432 goto out_check; 1427 1433 } 1428 - if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { 1429 - if (!nolock) 1430 - btrfs_end_write_no_snapshotting(root); 1434 + if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) 1431 1435 goto out_check; 1432 - } 1433 1436 nocow = 1; 1434 1437 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1435 1438 extent_end = found_key.offset + ··· 1439 1448 out_check: 1440 1449 if (extent_end <= start) { 1441 1450 path->slots[0]++; 1442 - if (!nolock && nocow) 1443 - btrfs_end_write_no_snapshotting(root); 1444 1451 if (nocow) 1445 1452 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1446 1453 goto next_slot; ··· 1460 1471 end, page_started, nr_written, 1, 1461 1472 NULL); 1462 1473 if (ret) { 1463 - if (!nolock && nocow) 1464 - btrfs_end_write_no_snapshotting(root); 1465 1474 if (nocow) 1466 1475 btrfs_dec_nocow_writers(fs_info, 1467 1476 disk_bytenr); ··· 1479 1492 ram_bytes, BTRFS_COMPRESS_NONE, 1480 1493 BTRFS_ORDERED_PREALLOC); 1481 1494 if (IS_ERR(em)) { 1482 - if (!nolock && nocow) 1483 - btrfs_end_write_no_snapshotting(root); 1484 1495 if (nocow) 1485 1496 btrfs_dec_nocow_writers(fs_info, 1486 1497 disk_bytenr); ··· 1517 1532 EXTENT_CLEAR_DATA_RESV, 1518 1533 PAGE_UNLOCK | PAGE_SET_PRIVATE2); 1519 1534 1520 - if (!nolock && nocow) 1521 - btrfs_end_write_no_snapshotting(root); 1522 1535 cur_offset = extent_end; 1523 1536 1524 1537 /* ··· 6622 6639 drop_inode = 1; 6623 6640 } else { 6624 6641 struct dentry *parent = dentry->d_parent; 6642 + int ret; 6643 + 6625 6644 err = btrfs_update_inode(trans, root, inode); 6626 6645 if (err) 6627 6646 goto fail; ··· 6637 6652 goto fail; 6638 6653 } 6639 6654 d_instantiate(dentry, inode); 6640 - btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); 6655 + ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent, 6656 + true, NULL); 6657 + if (ret == BTRFS_NEED_TRANS_COMMIT) { 6658 + err = btrfs_commit_transaction(trans); 6659 + trans = NULL; 6660 + } 6641 6661 } 6642 6662 6643 6663 fail: ··· 9378 9388 u64 new_idx = 0; 9379 9389 u64 root_objectid; 9380 9390 int ret; 9381 - int ret2; 9382 9391 bool root_log_pinned = false; 9383 9392 bool dest_log_pinned = false; 9393 + struct btrfs_log_ctx ctx_root; 9394 + struct btrfs_log_ctx ctx_dest; 9395 + bool sync_log_root = false; 9396 + bool sync_log_dest = false; 9397 + bool commit_transaction = false; 9384 9398 9385 9399 /* we only allow rename subvolume link between subvolumes */ 9386 9400 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9387 9401 return -EXDEV; 9402 + 9403 + btrfs_init_log_ctx(&ctx_root, old_inode); 9404 + btrfs_init_log_ctx(&ctx_dest, new_inode); 9388 9405 9389 9406 /* close the race window with snapshot create/destroy ioctl */ 9390 9407 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) ··· 9539 9542 9540 9543 if (root_log_pinned) { 9541 9544 parent = new_dentry->d_parent; 9542 - btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9543 - parent); 9545 + ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), 9546 + BTRFS_I(old_dir), parent, 9547 + false, &ctx_root); 9548 + if (ret == BTRFS_NEED_LOG_SYNC) 9549 + sync_log_root = true; 9550 + else if (ret == BTRFS_NEED_TRANS_COMMIT) 9551 + commit_transaction = true; 9552 + ret = 0; 9544 9553 btrfs_end_log_trans(root); 9545 9554 root_log_pinned = false; 9546 9555 } 9547 9556 if (dest_log_pinned) { 9548 - parent = old_dentry->d_parent; 9549 - btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), 9550 - parent); 9557 + if (!commit_transaction) { 9558 + parent = old_dentry->d_parent; 9559 + ret = btrfs_log_new_name(trans, BTRFS_I(new_inode), 9560 + BTRFS_I(new_dir), parent, 9561 + false, &ctx_dest); 9562 + if (ret == BTRFS_NEED_LOG_SYNC) 9563 + sync_log_dest = true; 9564 + else if (ret == BTRFS_NEED_TRANS_COMMIT) 9565 + commit_transaction = true; 9566 + ret = 0; 9567 + } 9551 9568 btrfs_end_log_trans(dest); 9552 9569 dest_log_pinned = false; 9553 9570 } ··· 9594 9583 dest_log_pinned = false; 9595 9584 } 9596 9585 } 9597 - ret2 = btrfs_end_transaction(trans); 9598 - ret = ret ? ret : ret2; 9586 + if (!ret && sync_log_root && !commit_transaction) { 9587 + ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, 9588 + &ctx_root); 9589 + if (ret) 9590 + commit_transaction = true; 9591 + } 9592 + if (!ret && sync_log_dest && !commit_transaction) { 9593 + ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root, 9594 + &ctx_dest); 9595 + if (ret) 9596 + commit_transaction = true; 9597 + } 9598 + if (commit_transaction) { 9599 + ret = btrfs_commit_transaction(trans); 9600 + } else { 9601 + int ret2; 9602 + 9603 + ret2 = btrfs_end_transaction(trans); 9604 + ret = ret ? ret : ret2; 9605 + } 9599 9606 out_notrans: 9600 9607 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9601 9608 up_read(&fs_info->subvol_sem); ··· 9690 9661 int ret; 9691 9662 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9692 9663 bool log_pinned = false; 9664 + struct btrfs_log_ctx ctx; 9665 + bool sync_log = false; 9666 + bool commit_transaction = false; 9693 9667 9694 9668 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9695 9669 return -EPERM; ··· 9850 9818 if (log_pinned) { 9851 9819 struct dentry *parent = new_dentry->d_parent; 9852 9820 9853 - btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9854 - parent); 9821 + btrfs_init_log_ctx(&ctx, old_inode); 9822 + ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), 9823 + BTRFS_I(old_dir), parent, 9824 + false, &ctx); 9825 + if (ret == BTRFS_NEED_LOG_SYNC) 9826 + sync_log = true; 9827 + else if (ret == BTRFS_NEED_TRANS_COMMIT) 9828 + commit_transaction = true; 9829 + ret = 0; 9855 9830 btrfs_end_log_trans(root); 9856 9831 log_pinned = false; 9857 9832 } ··· 9895 9856 btrfs_end_log_trans(root); 9896 9857 log_pinned = false; 9897 9858 } 9898 - btrfs_end_transaction(trans); 9859 + if (!ret && sync_log) { 9860 + ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx); 9861 + if (ret) 9862 + commit_transaction = true; 9863 + } 9864 + if (commit_transaction) { 9865 + ret = btrfs_commit_transaction(trans); 9866 + } else { 9867 + int ret2; 9868 + 9869 + ret2 = btrfs_end_transaction(trans); 9870 + ret = ret ? ret : ret2; 9871 + } 9899 9872 out_notrans: 9900 9873 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9901 9874 up_read(&fs_info->subvol_sem);
+35
fs/btrfs/ioctl.c
··· 747 747 struct btrfs_pending_snapshot *pending_snapshot; 748 748 struct btrfs_trans_handle *trans; 749 749 int ret; 750 + bool snapshot_force_cow = false; 750 751 751 752 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 752 753 return -EINVAL; ··· 764 763 goto free_pending; 765 764 } 766 765 766 + /* 767 + * Force new buffered writes to reserve space even when NOCOW is 768 + * possible. This is to avoid later writeback (running dealloc) to 769 + * fallback to COW mode and unexpectedly fail with ENOSPC. 770 + */ 767 771 atomic_inc(&root->will_be_snapshotted); 768 772 smp_mb__after_atomic(); 769 773 /* wait for no snapshot writes */ ··· 778 772 ret = btrfs_start_delalloc_inodes(root); 779 773 if (ret) 780 774 goto dec_and_free; 775 + 776 + /* 777 + * All previous writes have started writeback in NOCOW mode, so now 778 + * we force future writes to fallback to COW mode during snapshot 779 + * creation. 780 + */ 781 + atomic_inc(&root->snapshot_force_cow); 782 + snapshot_force_cow = true; 781 783 782 784 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 783 785 ··· 851 837 fail: 852 838 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); 853 839 dec_and_free: 840 + if (snapshot_force_cow) 841 + atomic_dec(&root->snapshot_force_cow); 854 842 if (atomic_dec_and_test(&root->will_be_snapshotted)) 855 843 wake_up_var(&root->will_be_snapshotted); 856 844 free_pending: ··· 3469 3453 3470 3454 same_lock_start = min_t(u64, loff, dst_loff); 3471 3455 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; 3456 + } else { 3457 + /* 3458 + * If the source and destination inodes are different, the 3459 + * source's range end offset matches the source's i_size, that 3460 + * i_size is not a multiple of the sector size, and the 3461 + * destination range does not go past the destination's i_size, 3462 + * we must round down the length to the nearest sector size 3463 + * multiple. If we don't do this adjustment we end replacing 3464 + * with zeroes the bytes in the range that starts at the 3465 + * deduplication range's end offset and ends at the next sector 3466 + * size multiple. 3467 + */ 3468 + if (loff + olen == i_size_read(src) && 3469 + dst_loff + len < i_size_read(dst)) { 3470 + const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; 3471 + 3472 + len = round_down(i_size_read(src), sz) - loff; 3473 + olen = len; 3474 + } 3472 3475 } 3473 3476 3474 3477 again:
+2 -3
fs/btrfs/qgroup.c
··· 1019 1019 spin_unlock(&fs_info->qgroup_lock); 1020 1020 1021 1021 ret = btrfs_commit_transaction(trans); 1022 - if (ret) { 1023 - trans = NULL; 1022 + trans = NULL; 1023 + if (ret) 1024 1024 goto out_free_path; 1025 - } 1026 1025 1027 1026 ret = qgroup_rescan_init(fs_info, 0, 1); 1028 1027 if (!ret) {
+42 -6
fs/btrfs/tree-log.c
··· 6025 6025 * Call this after adding a new name for a file and it will properly 6026 6026 * update the log to reflect the new name. 6027 6027 * 6028 - * It will return zero if all goes well, and it will return 1 if a 6029 - * full transaction commit is required. 6028 + * @ctx can not be NULL when @sync_log is false, and should be NULL when it's 6029 + * true (because it's not used). 6030 + * 6031 + * Return value depends on whether @sync_log is true or false. 6032 + * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6033 + * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT 6034 + * otherwise. 6035 + * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to 6036 + * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, 6037 + * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6038 + * committed (without attempting to sync the log). 6030 6039 */ 6031 6040 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6032 6041 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6033 - struct dentry *parent) 6042 + struct dentry *parent, 6043 + bool sync_log, struct btrfs_log_ctx *ctx) 6034 6044 { 6035 6045 struct btrfs_fs_info *fs_info = trans->fs_info; 6046 + int ret; 6036 6047 6037 6048 /* 6038 6049 * this will force the logging code to walk the dentry chain ··· 6058 6047 */ 6059 6048 if (inode->logged_trans <= fs_info->last_trans_committed && 6060 6049 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6061 - return 0; 6050 + return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : 6051 + BTRFS_DONT_NEED_LOG_SYNC; 6062 6052 6063 - return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6064 - LOG_INODE_EXISTS, NULL); 6053 + if (sync_log) { 6054 + struct btrfs_log_ctx ctx2; 6055 + 6056 + btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); 6057 + ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6058 + LOG_INODE_EXISTS, &ctx2); 6059 + if (ret == BTRFS_NO_LOG_SYNC) 6060 + return BTRFS_DONT_NEED_TRANS_COMMIT; 6061 + else if (ret) 6062 + return BTRFS_NEED_TRANS_COMMIT; 6063 + 6064 + ret = btrfs_sync_log(trans, inode->root, &ctx2); 6065 + if (ret) 6066 + return BTRFS_NEED_TRANS_COMMIT; 6067 + return BTRFS_DONT_NEED_TRANS_COMMIT; 6068 + } 6069 + 6070 + ASSERT(ctx); 6071 + ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6072 + LOG_INODE_EXISTS, ctx); 6073 + if (ret == BTRFS_NO_LOG_SYNC) 6074 + return BTRFS_DONT_NEED_LOG_SYNC; 6075 + else if (ret) 6076 + return BTRFS_NEED_TRANS_COMMIT; 6077 + 6078 + return BTRFS_NEED_LOG_SYNC; 6065 6079 } 6066 6080
+9 -1
fs/btrfs/tree-log.h
··· 71 71 int for_rename); 72 72 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 73 73 struct btrfs_inode *dir); 74 + /* Return values for btrfs_log_new_name() */ 75 + enum { 76 + BTRFS_DONT_NEED_TRANS_COMMIT, 77 + BTRFS_NEED_TRANS_COMMIT, 78 + BTRFS_DONT_NEED_LOG_SYNC, 79 + BTRFS_NEED_LOG_SYNC, 80 + }; 74 81 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 75 82 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 76 - struct dentry *parent); 83 + struct dentry *parent, 84 + bool sync_log, struct btrfs_log_ctx *ctx); 77 85 78 86 #endif
+6 -1
fs/btrfs/volumes.c
··· 4491 4491 4492 4492 /* Now btrfs_update_device() will change the on-disk size. */ 4493 4493 ret = btrfs_update_device(trans, device); 4494 - btrfs_end_transaction(trans); 4494 + if (ret < 0) { 4495 + btrfs_abort_transaction(trans, ret); 4496 + btrfs_end_transaction(trans); 4497 + } else { 4498 + ret = btrfs_commit_transaction(trans); 4499 + } 4495 4500 done: 4496 4501 btrfs_free_path(path); 4497 4502 if (ret) {
+11 -5
fs/ceph/super.c
··· 602 602 603 603 /* 604 604 * create a new fs client 605 + * 606 + * Success or not, this function consumes @fsopt and @opt. 605 607 */ 606 608 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, 607 609 struct ceph_options *opt) ··· 611 609 struct ceph_fs_client *fsc; 612 610 int page_count; 613 611 size_t size; 614 - int err = -ENOMEM; 612 + int err; 615 613 616 614 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); 617 - if (!fsc) 618 - return ERR_PTR(-ENOMEM); 615 + if (!fsc) { 616 + err = -ENOMEM; 617 + goto fail; 618 + } 619 619 620 620 fsc->client = ceph_create_client(opt, fsc); 621 621 if (IS_ERR(fsc->client)) { 622 622 err = PTR_ERR(fsc->client); 623 623 goto fail; 624 624 } 625 + opt = NULL; /* fsc->client now owns this */ 625 626 626 627 fsc->client->extra_mon_dispatch = extra_mon_dispatch; 627 628 fsc->client->osdc.abort_on_full = true; ··· 682 677 ceph_destroy_client(fsc->client); 683 678 fail: 684 679 kfree(fsc); 680 + if (opt) 681 + ceph_destroy_options(opt); 682 + destroy_mount_options(fsopt); 685 683 return ERR_PTR(err); 686 684 } 687 685 ··· 1050 1042 fsc = create_fs_client(fsopt, opt); 1051 1043 if (IS_ERR(fsc)) { 1052 1044 res = ERR_CAST(fsc); 1053 - destroy_mount_options(fsopt); 1054 - ceph_destroy_options(opt); 1055 1045 goto out_final; 1056 1046 } 1057 1047
-3
fs/cifs/cifs_unicode.c
··· 105 105 case SFM_LESSTHAN: 106 106 *target = '<'; 107 107 break; 108 - case SFM_SLASH: 109 - *target = '\\'; 110 - break; 111 108 case SFM_SPACE: 112 109 *target = ' '; 113 110 break;
+1 -1
fs/cifs/connect.c
··· 2547 2547 if (tcon == NULL) 2548 2548 return -ENOMEM; 2549 2549 2550 - snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName); 2550 + snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname); 2551 2551 2552 2552 /* cannot fail */ 2553 2553 nls_codepage = load_nls_default();
+2
fs/cifs/inode.c
··· 467 467 oparms.cifs_sb = cifs_sb; 468 468 oparms.desired_access = GENERIC_READ; 469 469 oparms.create_options = CREATE_NOT_DIR; 470 + if (backup_cred(cifs_sb)) 471 + oparms.create_options |= CREATE_OPEN_BACKUP_INTENT; 470 472 oparms.disposition = FILE_OPEN; 471 473 oparms.path = path; 472 474 oparms.fid = &fid;
+9 -5
fs/cifs/smb2misc.c
··· 248 248 * MacOS server pads after SMB2.1 write response with 3 bytes 249 249 * of junk. Other servers match RFC1001 len to actual 250 250 * SMB2/SMB3 frame length (header + smb2 response specific data) 251 - * Some windows servers do too when compounding is used. 252 - * Log the server error (once), but allow it and continue 251 + * Some windows servers also pad up to 8 bytes when compounding. 252 + * If pad is longer than eight bytes, log the server behavior 253 + * (once), since may indicate a problem but allow it and continue 253 254 * since the frame is parseable. 254 255 */ 255 256 if (clc_len < len) { 256 - printk_once(KERN_WARNING 257 - "SMB2 server sent bad RFC1001 len %d not %d\n", 258 - len, clc_len); 257 + pr_warn_once( 258 + "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n", 259 + len, clc_len, command, mid); 259 260 return 0; 260 261 } 262 + pr_warn_once( 263 + "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", 264 + len, clc_len, command, mid); 261 265 262 266 return 1; 263 267 }
+25 -10
fs/cifs/smb2ops.c
··· 630 630 oparms.tcon = tcon; 631 631 oparms.desired_access = FILE_READ_ATTRIBUTES; 632 632 oparms.disposition = FILE_OPEN; 633 - oparms.create_options = 0; 633 + if (backup_cred(cifs_sb)) 634 + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; 635 + else 636 + oparms.create_options = 0; 634 637 oparms.fid = &fid; 635 638 oparms.reconnect = false; 636 639 ··· 782 779 oparms.tcon = tcon; 783 780 oparms.desired_access = FILE_READ_EA; 784 781 oparms.disposition = FILE_OPEN; 785 - oparms.create_options = 0; 782 + if (backup_cred(cifs_sb)) 783 + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; 784 + else 785 + oparms.create_options = 0; 786 786 oparms.fid = &fid; 787 787 oparms.reconnect = false; 788 788 ··· 864 858 oparms.tcon = tcon; 865 859 oparms.desired_access = FILE_WRITE_EA; 866 860 oparms.disposition = FILE_OPEN; 867 - oparms.create_options = 0; 861 + if (backup_cred(cifs_sb)) 862 + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; 863 + else 864 + oparms.create_options = 0; 868 865 oparms.fid = &fid; 869 866 oparms.reconnect = false; 870 867 ··· 1462 1453 oparms.tcon = tcon; 1463 1454 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; 1464 1455 oparms.disposition = FILE_OPEN; 1465 - oparms.create_options = 0; 1456 + if (backup_cred(cifs_sb)) 1457 + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; 1458 + else 1459 + oparms.create_options = 0; 1466 1460 oparms.fid = fid; 1467 1461 oparms.reconnect = false; 1468 1462 ··· 1869 1857 oparms.tcon = tcon; 1870 1858 oparms.desired_access = FILE_READ_ATTRIBUTES; 1871 1859 oparms.disposition = FILE_OPEN; 1872 - oparms.create_options = 0; 1860 + if (backup_cred(cifs_sb)) 1861 + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; 1862 + else 1863 + oparms.create_options = 0; 1873 1864 oparms.fid = &fid; 1874 1865 oparms.reconnect = false; 1875 1866 ··· 3654 3639 struct smb_version_values smb3any_values = { 3655 3640 .version_string = SMB3ANY_VERSION_STRING, 3656 3641 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 3657 - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3642 + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 3658 3643 .large_lock_type = 0, 3659 3644 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3660 3645 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, ··· 3675 3660 struct smb_version_values smbdefault_values = { 3676 3661 .version_string = SMBDEFAULT_VERSION_STRING, 3677 3662 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 3678 - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3663 + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 3679 3664 .large_lock_type = 0, 3680 3665 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3681 3666 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, ··· 3696 3681 struct smb_version_values smb30_values = { 3697 3682 .version_string = SMB30_VERSION_STRING, 3698 3683 .protocol_id = SMB30_PROT_ID, 3699 - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3684 + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 3700 3685 .large_lock_type = 0, 3701 3686 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3702 3687 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, ··· 3717 3702 struct smb_version_values smb302_values = { 3718 3703 .version_string = SMB302_VERSION_STRING, 3719 3704 .protocol_id = SMB302_PROT_ID, 3720 - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3705 + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 3721 3706 .large_lock_type = 0, 3722 3707 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3723 3708 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, ··· 3738 3723 struct smb_version_values smb311_values = { 3739 3724 .version_string = SMB311_VERSION_STRING, 3740 3725 .protocol_id = SMB311_PROT_ID, 3741 - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3726 + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 3742 3727 .large_lock_type = 0, 3743 3728 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3744 3729 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+3
fs/cifs/smb2pdu.c
··· 2178 2178 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 2179 2179 *oplock == SMB2_OPLOCK_LEVEL_NONE) 2180 2180 req->RequestedOplockLevel = *oplock; 2181 + else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && 2182 + (oparms->create_options & CREATE_NOT_FILE)) 2183 + req->RequestedOplockLevel = *oplock; /* no srv lease support */ 2181 2184 else { 2182 2185 rc = add_lease_context(server, iov, &n_iov, 2183 2186 oparms->fid->lease_key, oplock);
+1 -10
fs/nilfs2/alloc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * alloc.c - NILFS dat/inode allocator 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Originally written by Koji Sato. 17 8 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
+1 -10
fs/nilfs2/alloc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Originally written by Koji Sato. 17 8 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
+1 -10
fs/nilfs2/bmap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * bmap.c - NILFS block mapping. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/bmap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * bmap.h - NILFS block mapping. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/btnode.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * btnode.c - NILFS B-tree node cache 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Originally written by Seiji Kihara. 17 8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
+1 -10
fs/nilfs2/btnode.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * btnode.h - NILFS B-tree node cache 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Seiji Kihara. 17 8 * Revised by Ryusuke Konishi.
+1 -10
fs/nilfs2/btree.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * btree.c - NILFS B-tree. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/btree.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * btree.h - NILFS B-tree. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/cpfile.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * cpfile.c - NILFS checkpoint file. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/cpfile.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * cpfile.h - NILFS checkpoint file. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/dat.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * dat.c - NILFS disk address translation. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/dat.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * dat.h - NILFS disk address translation. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/dir.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * dir.c - NILFS directory entry operations 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Modified for NILFS by Amagai Yoshiji. 17 8 */
+1 -10
fs/nilfs2/direct.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * direct.c - NILFS direct block pointer. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/direct.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * direct.h - NILFS direct block pointer. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/file.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * file.c - NILFS regular file handling primitives including fsync(). 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Amagai Yoshiji and Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/gcinode.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * gcinode.c - dummy inodes to buffer blocks for garbage collection 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. 17 8 * Revised by Ryusuke Konishi.
+1 -10
fs/nilfs2/ifile.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * ifile.c - NILFS inode file 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Amagai Yoshiji. 17 8 * Revised by Ryusuke Konishi.
+1 -10
fs/nilfs2/ifile.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * ifile.h - NILFS inode file 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Amagai Yoshiji. 17 8 * Revised by Ryusuke Konishi.
+1 -10
fs/nilfs2/inode.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * inode.c - NILFS inode operations. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/ioctl.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * ioctl.c - NILFS ioctl operations. 3 4 * 4 5 * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/mdt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * mdt.c - meta data file for NILFS 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/mdt.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * mdt.h - NILFS meta data file prototype and definitions 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/namei.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * namei.c - NILFS pathname lookup operations. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/nilfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * nilfs.h - NILFS local header file. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato and Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/page.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * page.c - buffer/page management specific to NILFS 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi and Seiji Kihara. 17 8 */
+1 -10
fs/nilfs2/page.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * page.h - buffer/page management specific to NILFS 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi and Seiji Kihara. 17 8 */
+1 -10
fs/nilfs2/recovery.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * recovery.c - NILFS recovery logic 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/segbuf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * segbuf.c - NILFS segment buffer 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/segbuf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * segbuf.h - NILFS Segment buffer prototypes and definitions 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/segment.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * segment.c - NILFS segment constructor. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/segment.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * segment.h - NILFS Segment constructor prototypes and definitions 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/sufile.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * sufile.c - NILFS segment usage file. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 * Revised by Ryusuke Konishi.
+1 -10
fs/nilfs2/sufile.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * sufile.h - NILFS segment usage file. 3 4 * 4 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Koji Sato. 17 8 */
+1 -10
fs/nilfs2/super.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * super.c - NILFS module and super block management. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 */
+1 -10
fs/nilfs2/sysfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * sysfs.c - sysfs support implementation. 3 4 * 4 5 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. 5 6 * Copyright (C) 2014 HGST, Inc., a Western Digital Company. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 7 * 17 8 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> 18 9 */
+1 -10
fs/nilfs2/sysfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * sysfs.h - sysfs support declarations. 3 4 * 4 5 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. 5 6 * Copyright (C) 2014 HGST, Inc., a Western Digital Company. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 7 * 17 8 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> 18 9 */
+1 -10
fs/nilfs2/the_nilfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * the_nilfs.c - the_nilfs shared structure. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+1 -10
fs/nilfs2/the_nilfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 1 2 /* 2 3 * the_nilfs.h - the_nilfs shared structure. 3 4 * 4 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 6 * 16 7 * Written by Ryusuke Konishi. 17 8 *
+3 -10
fs/notify/fsnotify.c
··· 351 351 352 352 iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); 353 353 354 - if ((mask & FS_MODIFY) || 355 - (test_mask & to_tell->i_fsnotify_mask)) { 356 - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = 357 - fsnotify_first_mark(&to_tell->i_fsnotify_marks); 358 - } 359 - 360 - if (mnt && ((mask & FS_MODIFY) || 361 - (test_mask & mnt->mnt_fsnotify_mask))) { 362 - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = 363 - fsnotify_first_mark(&to_tell->i_fsnotify_marks); 354 + iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = 355 + fsnotify_first_mark(&to_tell->i_fsnotify_marks); 356 + if (mnt) { 364 357 iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = 365 358 fsnotify_first_mark(&mnt->mnt_fsnotify_marks); 366 359 }
+44 -1
include/linux/blk-cgroup.h
··· 56 56 struct list_head all_blkcgs_node; 57 57 #ifdef CONFIG_CGROUP_WRITEBACK 58 58 struct list_head cgwb_list; 59 + refcount_t cgwb_refcnt; 59 60 #endif 60 61 }; 61 62 ··· 90 89 /* the blkg and policy id this per-policy data belongs to */ 91 90 struct blkcg_gq *blkg; 92 91 int plid; 93 - bool offline; 94 92 }; 95 93 96 94 /* ··· 386 386 { 387 387 return cpd ? cpd->blkcg : NULL; 388 388 } 389 + 390 + extern void blkcg_destroy_blkgs(struct blkcg *blkcg); 391 + 392 + #ifdef CONFIG_CGROUP_WRITEBACK 393 + 394 + /** 395 + * blkcg_cgwb_get - get a reference for blkcg->cgwb_list 396 + * @blkcg: blkcg of interest 397 + * 398 + * This is used to track the number of active wb's related to a blkcg. 399 + */ 400 + static inline void blkcg_cgwb_get(struct blkcg *blkcg) 401 + { 402 + refcount_inc(&blkcg->cgwb_refcnt); 403 + } 404 + 405 + /** 406 + * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list 407 + * @blkcg: blkcg of interest 408 + * 409 + * This is used to track the number of active wb's related to a blkcg. 410 + * When this count goes to zero, all active wb has finished so the 411 + * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 412 + * This work may occur in cgwb_release_workfn() on the cgwb_release 413 + * workqueue. 414 + */ 415 + static inline void blkcg_cgwb_put(struct blkcg *blkcg) 416 + { 417 + if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) 418 + blkcg_destroy_blkgs(blkcg); 419 + } 420 + 421 + #else 422 + 423 + static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } 424 + 425 + static inline void blkcg_cgwb_put(struct blkcg *blkcg) 426 + { 427 + /* wb isn't being accounted, so trigger destruction right away */ 428 + blkcg_destroy_blkgs(blkcg); 429 + } 430 + 431 + #endif 389 432 390 433 /** 391 434 * blkg_path - format cgroup path of blkg
+1
include/linux/hid.h
··· 526 526 const char *name; 527 527 bool registered; 528 528 struct list_head reports; /* the list of reports */ 529 + unsigned int application; /* application usage for this input */ 529 530 }; 530 531 531 532 enum hid_type {
+4 -4
include/linux/mlx5/driver.h
··· 362 362 struct mlx5_frag_buf_ctrl { 363 363 struct mlx5_frag_buf frag_buf; 364 364 u32 sz_m1; 365 - u32 frag_sz_m1; 366 - u32 strides_offset; 365 + u16 frag_sz_m1; 366 + u16 strides_offset; 367 367 u8 log_sz; 368 368 u8 log_stride; 369 369 u8 log_frag_strides; ··· 996 996 } 997 997 998 998 static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, 999 - u32 strides_offset, 999 + u16 strides_offset, 1000 1000 struct mlx5_frag_buf_ctrl *fbc) 1001 1001 { 1002 1002 fbc->log_stride = log_stride; ··· 1053 1053 void mlx5_health_cleanup(struct mlx5_core_dev *dev); 1054 1054 int mlx5_health_init(struct mlx5_core_dev *dev); 1055 1055 void mlx5_start_health_poll(struct mlx5_core_dev *dev); 1056 - void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 1056 + void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); 1057 1057 void mlx5_drain_health_wq(struct mlx5_core_dev *dev); 1058 1058 void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 1059 1059 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
+2 -2
include/linux/timekeeping.h
··· 258 258 extern int persistent_clock_is_local; 259 259 260 260 extern void read_persistent_clock64(struct timespec64 *ts); 261 - void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock, 262 - struct timespec64 *boot_offset); 261 + void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, 262 + struct timespec64 *boot_offset); 263 263 extern int update_persistent_clock64(struct timespec64 now); 264 264 265 265 /*
+6 -2
include/linux/tracepoint.h
··· 158 158 * For rcuidle callers, use srcu since sched-rcu \ 159 159 * doesn't work from the idle path. \ 160 160 */ \ 161 - if (rcuidle) \ 161 + if (rcuidle) { \ 162 162 idx = srcu_read_lock_notrace(&tracepoint_srcu); \ 163 + rcu_irq_enter_irqson(); \ 164 + } \ 163 165 \ 164 166 it_func_ptr = rcu_dereference_raw((tp)->funcs); \ 165 167 \ ··· 173 171 } while ((++it_func_ptr)->func); \ 174 172 } \ 175 173 \ 176 - if (rcuidle) \ 174 + if (rcuidle) { \ 175 + rcu_irq_exit_irqson(); \ 177 176 srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ 177 + } \ 178 178 \ 179 179 preempt_enable_notrace(); \ 180 180 } while (0)
+1 -1
include/net/netfilter/nf_conntrack_timeout.h
··· 30 30 }; 31 31 32 32 static inline unsigned int * 33 - nf_ct_timeout_data(struct nf_conn_timeout *t) 33 + nf_ct_timeout_data(const struct nf_conn_timeout *t) 34 34 { 35 35 struct nf_ct_timeout *timeout; 36 36
+1 -1
include/uapi/linux/keyctl.h
··· 65 65 66 66 /* keyctl structures */ 67 67 struct keyctl_dh_params { 68 - __s32 private; 68 + __s32 dh_private; 69 69 __s32 prime; 70 70 __s32 base; 71 71 };
+1
ipc/shm.c
··· 199 199 } 200 200 201 201 ipc_unlock_object(ipcp); 202 + ipcp = ERR_PTR(-EIDRM); 202 203 err: 203 204 rcu_read_unlock(); 204 205 /*
+6 -5
kernel/cpu.c
··· 607 607 bool bringup = st->bringup; 608 608 enum cpuhp_state state; 609 609 610 + if (WARN_ON_ONCE(!st->should_run)) 611 + return; 612 + 610 613 /* 611 614 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures 612 615 * that if we see ->should_run we also see the rest of the state. 613 616 */ 614 617 smp_mb(); 615 - 616 - if (WARN_ON_ONCE(!st->should_run)) 617 - return; 618 618 619 619 cpuhp_lock_acquire(bringup); 620 620 ··· 916 916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 917 917 if (ret) { 918 918 st->target = prev_state; 919 - undo_cpu_down(cpu, st); 919 + if (st->state < prev_state) 920 + undo_cpu_down(cpu, st); 920 921 break; 921 922 } 922 923 } ··· 970 969 * to do the further cleanups. 971 970 */ 972 971 ret = cpuhp_down_callbacks(cpu, st, target); 973 - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 972 + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { 974 973 cpuhp_reset_state(st, prev_state); 975 974 __cpuhp_kick_ap(st); 976 975 }
+1 -2
kernel/fork.c
··· 550 550 goto out; 551 551 } 552 552 /* a new mm has just been created */ 553 - arch_dup_mmap(oldmm, mm); 554 - retval = 0; 553 + retval = arch_dup_mmap(oldmm, mm); 555 554 out: 556 555 up_write(&mm->mmap_sem); 557 556 flush_tlb_mm(oldmm);
+2 -2
kernel/printk/printk_safe.c
··· 306 306 return printk_safe_log_store(s, fmt, args); 307 307 } 308 308 309 - void printk_nmi_enter(void) 309 + void notrace printk_nmi_enter(void) 310 310 { 311 311 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); 312 312 } 313 313 314 - void printk_nmi_exit(void) 314 + void notrace printk_nmi_exit(void) 315 315 { 316 316 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); 317 317 }
+30 -10
kernel/time/clocksource.c
··· 133 133 spin_unlock_irqrestore(&watchdog_lock, *flags); 134 134 } 135 135 136 + static int clocksource_watchdog_kthread(void *data); 137 + static void __clocksource_change_rating(struct clocksource *cs, int rating); 138 + 136 139 /* 137 140 * Interval: 0.5sec Threshold: 0.0625s 138 141 */ 139 142 #define WATCHDOG_INTERVAL (HZ >> 1) 140 143 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 144 + 145 + static void clocksource_watchdog_work(struct work_struct *work) 146 + { 147 + /* 148 + * We cannot directly run clocksource_watchdog_kthread() here, because 149 + * clocksource_select() calls timekeeping_notify() which uses 150 + * stop_machine(). One cannot use stop_machine() from a workqueue() due 151 + * lock inversions wrt CPU hotplug. 152 + * 153 + * Also, we only ever run this work once or twice during the lifetime 154 + * of the kernel, so there is no point in creating a more permanent 155 + * kthread for this. 156 + * 157 + * If kthread_run fails the next watchdog scan over the 158 + * watchdog_list will find the unstable clock again. 159 + */ 160 + kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 161 + } 141 162 142 163 static void __clocksource_unstable(struct clocksource *cs) 143 164 { ··· 166 145 cs->flags |= CLOCK_SOURCE_UNSTABLE; 167 146 168 147 /* 169 - * If the clocksource is registered clocksource_watchdog_work() will 148 + * If the clocksource is registered clocksource_watchdog_kthread() will 170 149 * re-rate and re-select. 171 150 */ 172 151 if (list_empty(&cs->list)) { ··· 177 156 if (cs->mark_unstable) 178 157 cs->mark_unstable(cs); 179 158 180 - /* kick clocksource_watchdog_work() */ 159 + /* kick clocksource_watchdog_kthread() */ 181 160 if (finished_booting) 182 161 schedule_work(&watchdog_work); 183 162 } ··· 187 166 * @cs: clocksource to be marked unstable 188 167 * 189 168 * This function is called by the x86 TSC code to mark clocksources as unstable; 190 - * it defers demotion and re-selection to a work. 169 + * it defers demotion and re-selection to a kthread. 191 170 */ 192 171 void clocksource_mark_unstable(struct clocksource *cs) 193 172 { ··· 412 391 } 413 392 } 414 393 415 - static void __clocksource_change_rating(struct clocksource *cs, int rating); 416 - 417 - static int __clocksource_watchdog_work(void) 394 + static int __clocksource_watchdog_kthread(void) 418 395 { 419 396 struct clocksource *cs, *tmp; 420 397 unsigned long flags; ··· 437 418 return select; 438 419 } 439 420 440 - static void clocksource_watchdog_work(struct work_struct *work) 421 + static int clocksource_watchdog_kthread(void *data) 441 422 { 442 423 mutex_lock(&clocksource_mutex); 443 - if (__clocksource_watchdog_work()) 424 + if (__clocksource_watchdog_kthread()) 444 425 clocksource_select(); 445 426 mutex_unlock(&clocksource_mutex); 427 + return 0; 446 428 } 447 429 448 430 static bool clocksource_is_watchdog(struct clocksource *cs) ··· 462 442 static void clocksource_select_watchdog(bool fallback) { } 463 443 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 464 444 static inline void clocksource_resume_watchdog(void) { } 465 - static inline int __clocksource_watchdog_work(void) { return 0; } 445 + static inline int __clocksource_watchdog_kthread(void) { return 0; } 466 446 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 467 447 void clocksource_mark_unstable(struct clocksource *cs) { } 468 448 ··· 830 810 /* 831 811 * Run the watchdog first to eliminate unstable clock sources 832 812 */ 833 - __clocksource_watchdog_work(); 813 + __clocksource_watchdog_kthread(); 834 814 clocksource_select(); 835 815 mutex_unlock(&clocksource_mutex); 836 816 return 0;
+2 -2
lib/Kconfig.debug
··· 1277 1277 time. This is really bad from a security perspective, and 1278 1278 so architecture maintainers really need to do what they can 1279 1279 to get the CRNG seeded sooner after the system is booted. 1280 - However, since users can not do anything actionble to 1280 + However, since users cannot do anything actionable to 1281 1281 address this, by default the kernel will issue only a single 1282 1282 warning for the first use of unseeded randomness. 1283 1283 1284 1284 Say Y here if you want to receive warnings for all uses of 1285 1285 unseeded randomness. This will be of use primarily for 1286 - those developers interersted in improving the security of 1286 + those developers interested in improving the security of 1287 1287 Linux kernels running on their architecture (or 1288 1288 subarchitecture). 1289 1289
+5
mm/backing-dev.c
··· 491 491 { 492 492 struct bdi_writeback *wb = container_of(work, struct bdi_writeback, 493 493 release_work); 494 + struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); 494 495 495 496 mutex_lock(&wb->bdi->cgwb_release_mutex); 496 497 wb_shutdown(wb); ··· 499 498 css_put(wb->memcg_css); 500 499 css_put(wb->blkcg_css); 501 500 mutex_unlock(&wb->bdi->cgwb_release_mutex); 501 + 502 + /* triggers blkg destruction if cgwb_refcnt becomes zero */ 503 + blkcg_cgwb_put(blkcg); 502 504 503 505 fprop_local_destroy_percpu(&wb->memcg_completions); 504 506 percpu_ref_exit(&wb->refcnt); ··· 601 597 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); 602 598 list_add(&wb->memcg_node, memcg_cgwb_list); 603 599 list_add(&wb->blkcg_node, blkcg_cgwb_list); 600 + blkcg_cgwb_get(blkcg); 604 601 css_get(memcg_css); 605 602 css_get(blkcg_css); 606 603 }
+2 -2
mm/huge_memory.c
··· 821 821 * but we need to be consistent with PTEs and architectures that 822 822 * can't support a 'special' bit. 823 823 */ 824 - BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 824 + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 825 + !pfn_t_devmap(pfn)); 825 826 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 826 827 (VM_PFNMAP|VM_MIXEDMAP)); 827 828 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 828 - BUG_ON(!pfn_t_devmap(pfn)); 829 829 830 830 if (addr < vma->vm_start || addr >= vma->vm_end) 831 831 return VM_FAULT_SIGBUS;
+5 -4
mm/kmemleak.c
··· 2097 2097 2098 2098 kmemleak_initialized = 1; 2099 2099 2100 + dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, 2101 + &kmemleak_fops); 2102 + if (!dentry) 2103 + pr_warn("Failed to create the debugfs kmemleak file\n"); 2104 + 2100 2105 if (kmemleak_error) { 2101 2106 /* 2102 2107 * Some error occurred and kmemleak was disabled. There is a ··· 2113 2108 return -ENOMEM; 2114 2109 } 2115 2110 2116 - dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, 2117 - &kmemleak_fops); 2118 - if (!dentry) 2119 - pr_warn("Failed to create the debugfs kmemleak file\n"); 2120 2111 mutex_lock(&scan_mutex); 2121 2112 start_scan_thread(); 2122 2113 mutex_unlock(&scan_mutex);
-2
mm/memcontrol.c
··· 1701 1701 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1702 1702 return OOM_SUCCESS; 1703 1703 1704 - WARN(1,"Memory cgroup charge failed because of no reclaimable memory! " 1705 - "This looks like a misconfiguration or a kernel bug."); 1706 1704 return OOM_FAILED; 1707 1705 } 1708 1706
+2 -1
mm/memory_hotplug.c
··· 1333 1333 if (__PageMovable(page)) 1334 1334 return pfn; 1335 1335 if (PageHuge(page)) { 1336 - if (page_huge_active(page)) 1336 + if (hugepage_migration_supported(page_hstate(page)) && 1337 + page_huge_active(page)) 1337 1338 return pfn; 1338 1339 else 1339 1340 pfn = round_up(pfn + 1,
+11 -3
mm/oom_kill.c
··· 522 522 523 523 tlb_gather_mmu(&tlb, mm, start, end); 524 524 if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { 525 + tlb_finish_mmu(&tlb, start, end); 525 526 ret = false; 526 527 continue; 527 528 } ··· 1104 1103 } 1105 1104 1106 1105 select_bad_process(oc); 1107 - /* Found nothing?!?! Either we hang forever, or we panic. */ 1108 - if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { 1106 + /* Found nothing?!?! */ 1107 + if (!oc->chosen) { 1109 1108 dump_header(oc, NULL); 1110 - panic("Out of memory and no killable processes...\n"); 1109 + pr_warn("Out of memory and no killable processes...\n"); 1110 + /* 1111 + * If we got here due to an actual allocation at the 1112 + * system level, we cannot survive this and will enter 1113 + * an endless loop in the allocator. Bail out now. 1114 + */ 1115 + if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) 1116 + panic("System is deadlocked on memory\n"); 1111 1117 } 1112 1118 if (oc->chosen && oc->chosen != (void *)-1UL) 1113 1119 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
+4
mm/page_alloc.c
··· 7708 7708 * handle each tail page individually in migration. 7709 7709 */ 7710 7710 if (PageHuge(page)) { 7711 + 7712 + if (!hugepage_migration_supported(page_hstate(page))) 7713 + goto unmovable; 7714 + 7711 7715 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 7712 7716 continue; 7713 7717 }
+7 -4
mm/util.c
··· 435 435 EXPORT_SYMBOL(kvmalloc_node); 436 436 437 437 /** 438 - * kvfree - free memory allocated with kvmalloc 439 - * @addr: pointer returned by kvmalloc 438 + * kvfree() - Free memory. 439 + * @addr: Pointer to allocated memory. 440 440 * 441 - * If the memory is allocated from vmalloc area it is freed with vfree(). 442 - * Otherwise kfree() is used. 441 + * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). 442 + * It is slightly more efficient to use kfree() or vfree() if you are certain 443 + * that you know which one to use. 444 + * 445 + * Context: Any context except NMI. 443 446 */ 444 447 void kvfree(const void *addr) 445 448 {
-3
net/core/skbuff.c
··· 939 939 940 940 WARN_ON_ONCE(!in_task()); 941 941 942 - if (!sock_flag(sk, SOCK_ZEROCOPY)) 943 - return NULL; 944 - 945 942 skb = sock_omalloc(sk, 0, GFP_KERNEL); 946 943 if (!skb) 947 944 return NULL;
+1
net/ipv4/ip_fragment.c
··· 602 602 nextp = &fp->next; 603 603 fp->prev = NULL; 604 604 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 605 + fp->sk = NULL; 605 606 head->data_len += fp->len; 606 607 head->len += fp->len; 607 608 if (head->ip_summed != fp->ip_summed)
+5
net/ipv4/ip_gre.c
··· 178 178 179 179 if (tpi->proto == htons(ETH_P_TEB)) 180 180 itn = net_generic(net, gre_tap_net_id); 181 + else if (tpi->proto == htons(ETH_P_ERSPAN) || 182 + tpi->proto == htons(ETH_P_ERSPAN2)) 183 + itn = net_generic(net, erspan_net_id); 181 184 else 182 185 itn = net_generic(net, ipgre_net_id); 183 186 ··· 331 328 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 332 329 return PACKET_RCVD; 333 330 } 331 + return PACKET_REJECT; 332 + 334 333 drop: 335 334 kfree_skb(skb); 336 335 return PACKET_RCVD;
+5 -3
net/ipv4/netfilter/Kconfig
··· 106 106 107 107 if NF_NAT_IPV4 108 108 109 + config NF_NAT_MASQUERADE_IPV4 110 + bool 111 + 112 + if NF_TABLES 109 113 config NFT_CHAIN_NAT_IPV4 110 114 depends on NF_TABLES_IPV4 111 115 tristate "IPv4 nf_tables nat chain support" ··· 118 114 chain type is used to perform Network Address Translation (NAT) 119 115 packet transformations such as the source, destination address and 120 116 source and destination ports. 121 - 122 - config NF_NAT_MASQUERADE_IPV4 123 - bool 124 117 125 118 config NFT_MASQ_IPV4 126 119 tristate "IPv4 masquerading support for nf_tables" ··· 136 135 help 137 136 This is the expression that provides IPv4 redirect support for 138 137 nf_tables. 138 + endif # NF_TABLES 139 139 140 140 config NF_NAT_SNMP_BASIC 141 141 tristate "Basic SNMP-ALG support"
+1 -1
net/ipv4/tcp.c
··· 1185 1185 1186 1186 flags = msg->msg_flags; 1187 1187 1188 - if (flags & MSG_ZEROCOPY && size) { 1188 + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1189 1189 if (sk->sk_state != TCP_ESTABLISHED) { 1190 1190 err = -EINVAL; 1191 1191 goto out_err;
+2 -2
net/ipv4/tcp_input.c
··· 6380 6380 if (!queue->synflood_warned && 6381 6381 net->ipv4.sysctl_tcp_syncookies != 2 && 6382 6382 xchg(&queue->synflood_warned, 1) == 0) 6383 - pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", 6384 - proto, ntohs(tcp_hdr(skb)->dest), msg); 6383 + net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", 6384 + proto, ntohs(tcp_hdr(skb)->dest), msg); 6385 6385 6386 6386 return want_cookie; 6387 6387 }
+1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 445 445 else if (head->ip_summed == CHECKSUM_COMPLETE) 446 446 head->csum = csum_add(head->csum, fp->csum); 447 447 head->truesize += fp->truesize; 448 + fp->sk = NULL; 448 449 } 449 450 sub_frag_mem_limit(fq->q.net, head->truesize); 450 451
+25 -13
net/iucv/af_iucv.c
··· 351 351 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 352 352 353 353 skb->dev = iucv->hs_dev; 354 - if (!skb->dev) 355 - return -ENODEV; 356 - if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) 357 - return -ENETDOWN; 354 + if (!skb->dev) { 355 + err = -ENODEV; 356 + goto err_free; 357 + } 358 + if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { 359 + err = -ENETDOWN; 360 + goto err_free; 361 + } 358 362 if (skb->len > skb->dev->mtu) { 359 - if (sock->sk_type == SOCK_SEQPACKET) 360 - return -EMSGSIZE; 361 - else 362 - skb_trim(skb, skb->dev->mtu); 363 + if (sock->sk_type == SOCK_SEQPACKET) { 364 + err = -EMSGSIZE; 365 + goto err_free; 366 + } 367 + skb_trim(skb, skb->dev->mtu); 363 368 } 364 369 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 365 370 nskb = skb_clone(skb, GFP_ATOMIC); 366 - if (!nskb) 367 - return -ENOMEM; 371 + if (!nskb) { 372 + err = -ENOMEM; 373 + goto err_free; 374 + } 375 + 368 376 skb_queue_tail(&iucv->send_skb_q, nskb); 369 377 err = dev_queue_xmit(skb); 370 378 if (net_xmit_eval(err)) { ··· 383 375 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 384 376 } 385 377 return net_xmit_eval(err); 378 + 379 + err_free: 380 + kfree_skb(skb); 381 + return err; 386 382 } 387 383 388 384 static struct sock *__iucv_get_sock_by_name(char *nm) ··· 1179 1167 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1180 1168 if (err) { 1181 1169 atomic_dec(&iucv->msg_sent); 1182 - goto fail; 1170 + goto out; 1183 1171 } 1184 1172 } else { /* Classic VM IUCV transport */ 1185 1173 skb_queue_tail(&iucv->send_skb_q, skb); ··· 2167 2155 struct sock *sk; 2168 2156 struct iucv_sock *iucv; 2169 2157 struct af_iucv_trans_hdr *trans_hdr; 2158 + int err = NET_RX_SUCCESS; 2170 2159 char nullstring[8]; 2171 - int err = 0; 2172 2160 2173 2161 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { 2174 2162 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", ··· 2266 2254 err = afiucv_hs_callback_rx(sk, skb); 2267 2255 break; 2268 2256 default: 2269 - ; 2257 + kfree_skb(skb); 2270 2258 } 2271 2259 2272 2260 return err;
+1 -1
net/iucv/iucv.c
··· 1874 1874 * Returns 0 if there are still iucv pathes defined 1875 1875 * 1 if there are no iucv pathes defined 1876 1876 */ 1877 - int iucv_path_table_empty(void) 1877 + static int iucv_path_table_empty(void) 1878 1878 { 1879 1879 int i; 1880 1880
+6 -6
net/netfilter/Kconfig
··· 771 771 depends on NETFILTER_ADVANCED 772 772 ---help--- 773 773 This option adds a `CHECKSUM' target, which can be used in the iptables mangle 774 - table. 774 + table to work around buggy DHCP clients in virtualized environments. 775 775 776 - You can use this target to compute and fill in the checksum in 777 - a packet that lacks a checksum. This is particularly useful, 778 - if you need to work around old applications such as dhcp clients, 779 - that do not work well with checksum offloads, but don't want to disable 780 - checksum offload in your device. 776 + Some old DHCP clients drop packets because they are not aware 777 + that the checksum would normally be offloaded to hardware and 778 + thus should be considered valid. 779 + This target can be used to fill in the checksum using iptables 780 + when such packets are sent via a virtual network device. 781 781 782 782 To compile it as a module, choose M here. If unsure, say N. 783 783
+26
net/netfilter/nf_conntrack_proto.c
··· 776 776 }; 777 777 #endif 778 778 779 + static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto) 780 + { 781 + u8 nfproto = (unsigned long)_nfproto; 782 + 783 + if (nf_ct_l3num(ct) != nfproto) 784 + return 0; 785 + 786 + if (nf_ct_protonum(ct) == IPPROTO_TCP && 787 + ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) { 788 + ct->proto.tcp.seen[0].td_maxwin = 0; 789 + ct->proto.tcp.seen[1].td_maxwin = 0; 790 + } 791 + 792 + return 0; 793 + } 794 + 779 795 static int nf_ct_netns_do_get(struct net *net, u8 nfproto) 780 796 { 781 797 struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); 798 + bool fixup_needed = false; 782 799 int err = 0; 783 800 784 801 mutex_lock(&nf_ct_proto_mutex); ··· 815 798 ARRAY_SIZE(ipv4_conntrack_ops)); 816 799 if (err) 817 800 cnet->users4 = 0; 801 + else 802 + fixup_needed = true; 818 803 break; 819 804 #if IS_ENABLED(CONFIG_IPV6) 820 805 case NFPROTO_IPV6: ··· 833 814 ARRAY_SIZE(ipv6_conntrack_ops)); 834 815 if (err) 835 816 cnet->users6 = 0; 817 + else 818 + fixup_needed = true; 836 819 break; 837 820 #endif 838 821 default: ··· 843 822 } 844 823 out_unlock: 845 824 mutex_unlock(&nf_ct_proto_mutex); 825 + 826 + if (fixup_needed) 827 + nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup, 828 + (void *)(unsigned long)nfproto, 0, 0); 829 + 846 830 return err; 847 831 } 848 832
+13 -6
net/netfilter/nf_conntrack_proto_dccp.c
··· 675 675 } 676 676 #endif 677 677 678 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 678 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 679 679 680 680 #include <linux/netfilter/nfnetlink.h> 681 681 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 697 697 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; 698 698 } 699 699 } 700 + 701 + timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST]; 700 702 return 0; 701 703 } 702 704 ··· 728 726 [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, 729 727 [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, 730 728 }; 731 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 729 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 732 730 733 731 #ifdef CONFIG_SYSCTL 734 732 /* template, data assigned later */ ··· 829 827 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 830 828 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 831 829 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 830 + 831 + /* timeouts[0] is unused, make it same as SYN_SENT so 832 + * ->timeouts[0] contains 'new' timeout, like udp or icmp. 833 + */ 834 + dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; 832 835 } 833 836 834 837 return dccp_kmemdup_sysctl_table(net, pn, dn); ··· 863 856 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 864 857 .nla_policy = nf_ct_port_nla_policy, 865 858 #endif 866 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 859 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 867 860 .ctnl_timeout = { 868 861 .nlattr_to_obj = dccp_timeout_nlattr_to_obj, 869 862 .obj_to_nlattr = dccp_timeout_obj_to_nlattr, ··· 871 864 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, 872 865 .nla_policy = dccp_timeout_nla_policy, 873 866 }, 874 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 867 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 875 868 .init_net = dccp_init_net, 876 869 .get_net_proto = dccp_get_net_proto, 877 870 }; ··· 896 889 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 897 890 .nla_policy = nf_ct_port_nla_policy, 898 891 #endif 899 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 892 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 900 893 .ctnl_timeout = { 901 894 .nlattr_to_obj = dccp_timeout_nlattr_to_obj, 902 895 .obj_to_nlattr = dccp_timeout_obj_to_nlattr, ··· 904 897 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, 905 898 .nla_policy = dccp_timeout_nla_policy, 906 899 }, 907 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 900 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 908 901 .init_net = dccp_init_net, 909 902 .get_net_proto = dccp_get_net_proto, 910 903 };
+4 -4
net/netfilter/nf_conntrack_proto_generic.c
··· 70 70 return ret; 71 71 } 72 72 73 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 73 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 74 74 75 75 #include <linux/netfilter/nfnetlink.h> 76 76 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 113 113 generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { 114 114 [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, 115 115 }; 116 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 116 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 117 117 118 118 #ifdef CONFIG_SYSCTL 119 119 static struct ctl_table generic_sysctl_table[] = { ··· 164 164 .pkt_to_tuple = generic_pkt_to_tuple, 165 165 .packet = generic_packet, 166 166 .new = generic_new, 167 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 167 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 168 168 .ctnl_timeout = { 169 169 .nlattr_to_obj = generic_timeout_nlattr_to_obj, 170 170 .obj_to_nlattr = generic_timeout_obj_to_nlattr, ··· 172 172 .obj_size = sizeof(unsigned int), 173 173 .nla_policy = generic_timeout_nla_policy, 174 174 }, 175 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 175 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 176 176 .init_net = generic_init_net, 177 177 .get_net_proto = generic_get_net_proto, 178 178 };
+4 -4
net/netfilter/nf_conntrack_proto_gre.c
··· 285 285 nf_ct_gre_keymap_destroy(master); 286 286 } 287 287 288 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 288 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 289 289 290 290 #include <linux/netfilter/nfnetlink.h> 291 291 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 334 334 [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, 335 335 [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, 336 336 }; 337 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 337 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 338 338 339 339 static int gre_init_net(struct net *net, u_int16_t proto) 340 340 { ··· 367 367 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 368 368 .nla_policy = nf_ct_port_nla_policy, 369 369 #endif 370 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 370 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 371 371 .ctnl_timeout = { 372 372 .nlattr_to_obj = gre_timeout_nlattr_to_obj, 373 373 .obj_to_nlattr = gre_timeout_obj_to_nlattr, ··· 375 375 .obj_size = sizeof(unsigned int) * GRE_CT_MAX, 376 376 .nla_policy = gre_timeout_nla_policy, 377 377 }, 378 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 378 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 379 379 .net_id = &proto_gre_net_id, 380 380 .init_net = gre_init_net, 381 381 };
+4 -4
net/netfilter/nf_conntrack_proto_icmp.c
··· 273 273 } 274 274 #endif 275 275 276 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 276 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 277 277 278 278 #include <linux/netfilter/nfnetlink.h> 279 279 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 313 313 icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { 314 314 [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, 315 315 }; 316 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 316 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 317 317 318 318 #ifdef CONFIG_SYSCTL 319 319 static struct ctl_table icmp_sysctl_table[] = { ··· 374 374 .nlattr_to_tuple = icmp_nlattr_to_tuple, 375 375 .nla_policy = icmp_nla_policy, 376 376 #endif 377 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 377 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 378 378 .ctnl_timeout = { 379 379 .nlattr_to_obj = icmp_timeout_nlattr_to_obj, 380 380 .obj_to_nlattr = icmp_timeout_obj_to_nlattr, ··· 382 382 .obj_size = sizeof(unsigned int), 383 383 .nla_policy = icmp_timeout_nla_policy, 384 384 }, 385 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 385 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 386 386 .init_net = icmp_init_net, 387 387 .get_net_proto = icmp_get_net_proto, 388 388 };
+4 -4
net/netfilter/nf_conntrack_proto_icmpv6.c
··· 274 274 } 275 275 #endif 276 276 277 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 277 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 278 278 279 279 #include <linux/netfilter/nfnetlink.h> 280 280 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 314 314 icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { 315 315 [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, 316 316 }; 317 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 317 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 318 318 319 319 #ifdef CONFIG_SYSCTL 320 320 static struct ctl_table icmpv6_sysctl_table[] = { ··· 373 373 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 374 374 .nla_policy = icmpv6_nla_policy, 375 375 #endif 376 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 376 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 377 377 .ctnl_timeout = { 378 378 .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, 379 379 .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, ··· 381 381 .obj_size = sizeof(unsigned int), 382 382 .nla_policy = icmpv6_timeout_nla_policy, 383 383 }, 384 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 384 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 385 385 .init_net = icmpv6_init_net, 386 386 .get_net_proto = icmpv6_get_net_proto, 387 387 };
+14 -7
net/netfilter/nf_conntrack_proto_sctp.c
··· 591 591 } 592 592 #endif 593 593 594 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 594 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 595 595 596 596 #include <linux/netfilter/nfnetlink.h> 597 597 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 613 613 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; 614 614 } 615 615 } 616 + 617 + timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED]; 616 618 return 0; 617 619 } 618 620 ··· 646 644 [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, 647 645 [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, 648 646 }; 649 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 647 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 650 648 651 649 652 650 #ifdef CONFIG_SYSCTL ··· 745 743 746 744 for (i = 0; i < SCTP_CONNTRACK_MAX; i++) 747 745 sn->timeouts[i] = sctp_timeouts[i]; 746 + 747 + /* timeouts[0] is unused, init it so ->timeouts[0] contains 748 + * 'new' timeout, like udp or icmp. 749 + */ 750 + sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; 748 751 } 749 752 750 753 return sctp_kmemdup_sysctl_table(pn, sn); ··· 780 773 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 781 774 .nla_policy = nf_ct_port_nla_policy, 782 775 #endif 783 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 776 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 784 777 .ctnl_timeout = { 785 778 .nlattr_to_obj = sctp_timeout_nlattr_to_obj, 786 779 .obj_to_nlattr = sctp_timeout_obj_to_nlattr, ··· 788 781 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, 789 782 .nla_policy = sctp_timeout_nla_policy, 790 783 }, 791 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 784 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 792 785 .init_net = sctp_init_net, 793 786 .get_net_proto = sctp_get_net_proto, 794 787 }; ··· 813 806 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 814 807 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 815 808 .nla_policy = nf_ct_port_nla_policy, 816 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 809 + #endif 810 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 817 811 .ctnl_timeout = { 818 812 .nlattr_to_obj = sctp_timeout_nlattr_to_obj, 819 813 .obj_to_nlattr = sctp_timeout_obj_to_nlattr, ··· 822 814 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, 823 815 .nla_policy = sctp_timeout_nla_policy, 824 816 }, 825 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 826 - #endif 817 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 827 818 .init_net = sctp_init_net, 828 819 .get_net_proto = sctp_get_net_proto, 829 820 };
+13 -6
net/netfilter/nf_conntrack_proto_tcp.c
··· 1279 1279 } 1280 1280 #endif 1281 1281 1282 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1282 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 1283 1283 1284 1284 #include <linux/netfilter/nfnetlink.h> 1285 1285 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 1301 1301 timeouts[TCP_CONNTRACK_SYN_SENT] = 1302 1302 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; 1303 1303 } 1304 + 1304 1305 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { 1305 1306 timeouts[TCP_CONNTRACK_SYN_RECV] = 1306 1307 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; ··· 1342 1341 timeouts[TCP_CONNTRACK_UNACK] = 1343 1342 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; 1344 1343 } 1344 + 1345 + timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT]; 1345 1346 return 0; 1346 1347 } 1347 1348 ··· 1394 1391 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, 1395 1392 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, 1396 1393 }; 1397 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1394 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 1398 1395 1399 1396 #ifdef CONFIG_SYSCTL 1400 1397 static struct ctl_table tcp_sysctl_table[] = { ··· 1521 1518 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) 1522 1519 tn->timeouts[i] = tcp_timeouts[i]; 1523 1520 1521 + /* timeouts[0] is unused, make it same as SYN_SENT so 1522 + * ->timeouts[0] contains 'new' timeout, like udp or icmp. 1523 + */ 1524 + tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; 1524 1525 tn->tcp_loose = nf_ct_tcp_loose; 1525 1526 tn->tcp_be_liberal = nf_ct_tcp_be_liberal; 1526 1527 tn->tcp_max_retrans = nf_ct_tcp_max_retrans; ··· 1558 1551 .nlattr_size = TCP_NLATTR_SIZE, 1559 1552 .nla_policy = nf_ct_port_nla_policy, 1560 1553 #endif 1561 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1554 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 1562 1555 .ctnl_timeout = { 1563 1556 .nlattr_to_obj = tcp_timeout_nlattr_to_obj, 1564 1557 .obj_to_nlattr = tcp_timeout_obj_to_nlattr, ··· 1567 1560 TCP_CONNTRACK_TIMEOUT_MAX, 1568 1561 .nla_policy = tcp_timeout_nla_policy, 1569 1562 }, 1570 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1563 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 1571 1564 .init_net = tcp_init_net, 1572 1565 .get_net_proto = tcp_get_net_proto, 1573 1566 }; ··· 1593 1586 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1594 1587 .nla_policy = nf_ct_port_nla_policy, 1595 1588 #endif 1596 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1589 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 1597 1590 .ctnl_timeout = { 1598 1591 .nlattr_to_obj = tcp_timeout_nlattr_to_obj, 1599 1592 .obj_to_nlattr = tcp_timeout_obj_to_nlattr, ··· 1602 1595 TCP_CONNTRACK_TIMEOUT_MAX, 1603 1596 .nla_policy = tcp_timeout_nla_policy, 1604 1597 }, 1605 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1598 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 1606 1599 .init_net = tcp_init_net, 1607 1600 .get_net_proto = tcp_get_net_proto, 1608 1601 };
+10 -11
net/netfilter/nf_conntrack_proto_udp.c
··· 171 171 return NF_ACCEPT; 172 172 } 173 173 174 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 174 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 175 175 176 176 #include <linux/netfilter/nfnetlink.h> 177 177 #include <linux/netfilter/nfnetlink_cttimeout.h> ··· 221 221 [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, 222 222 [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, 223 223 }; 224 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 224 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 225 225 226 226 #ifdef CONFIG_SYSCTL 227 227 static struct ctl_table udp_sysctl_table[] = { ··· 292 292 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 293 293 .nla_policy = nf_ct_port_nla_policy, 294 294 #endif 295 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 295 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 296 296 .ctnl_timeout = { 297 297 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 298 298 .obj_to_nlattr = udp_timeout_obj_to_nlattr, ··· 300 300 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 301 301 .nla_policy = udp_timeout_nla_policy, 302 302 }, 303 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 303 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 304 304 .init_net = udp_init_net, 305 305 .get_net_proto = udp_get_net_proto, 306 306 }; ··· 321 321 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 322 322 .nla_policy = nf_ct_port_nla_policy, 323 323 #endif 324 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 324 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 325 325 .ctnl_timeout = { 326 326 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 327 327 .obj_to_nlattr = udp_timeout_obj_to_nlattr, ··· 329 329 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 330 330 .nla_policy = udp_timeout_nla_policy, 331 331 }, 332 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 332 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 333 333 .init_net = udp_init_net, 334 334 .get_net_proto = udp_get_net_proto, 335 335 }; ··· 350 350 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 351 351 .nla_policy = nf_ct_port_nla_policy, 352 352 #endif 353 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 353 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 354 354 .ctnl_timeout = { 355 355 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 356 356 .obj_to_nlattr = udp_timeout_obj_to_nlattr, ··· 358 358 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 359 359 .nla_policy = udp_timeout_nla_policy, 360 360 }, 361 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 361 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 362 362 .init_net = udp_init_net, 363 363 .get_net_proto = udp_get_net_proto, 364 364 }; ··· 379 379 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 380 380 .nla_policy = nf_ct_port_nla_policy, 381 381 #endif 382 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 382 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 383 383 .ctnl_timeout = { 384 384 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 385 385 .obj_to_nlattr = udp_timeout_obj_to_nlattr, ··· 387 387 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 388 388 .nla_policy = udp_timeout_nla_policy, 389 389 }, 390 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 390 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 391 391 .init_net = udp_init_net, 392 392 .get_net_proto = udp_get_net_proto, 393 393 }; 394 394 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); 395 395 #endif 396 - #include <net/netfilter/nf_conntrack_timeout.h>
+1
net/netfilter/nf_tables_api.c
··· 4637 4637 } 4638 4638 set->ndeact++; 4639 4639 4640 + nft_set_elem_deactivate(ctx->net, set, elem); 4640 4641 nft_trans_elem_set(trans) = set; 4641 4642 nft_trans_elem(trans) = *elem; 4642 4643 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+29 -30
net/netfilter/nft_ct.c
··· 799 799 } 800 800 801 801 struct nft_ct_timeout_obj { 802 - struct nf_conn *tmpl; 802 + struct nf_ct_timeout *timeout; 803 803 u8 l4proto; 804 804 }; 805 805 ··· 809 809 { 810 810 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 811 811 struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); 812 - struct sk_buff *skb = pkt->skb; 812 + struct nf_conn_timeout *timeout; 813 + const unsigned int *values; 813 814 814 - if (ct || 815 - priv->l4proto != pkt->tprot) 815 + if (priv->l4proto != pkt->tprot) 816 816 return; 817 817 818 - nf_ct_set(skb, priv->tmpl, IP_CT_NEW); 818 + if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) 819 + return; 820 + 821 + timeout = nf_ct_timeout_find(ct); 822 + if (!timeout) { 823 + timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC); 824 + if (!timeout) { 825 + regs->verdict.code = NF_DROP; 826 + return; 827 + } 828 + } 829 + 830 + rcu_assign_pointer(timeout->timeout, priv->timeout); 831 + 832 + /* adjust the timeout as per 'new' state. ct is unconfirmed, 833 + * so the current timestamp must not be added. 834 + */ 835 + values = nf_ct_timeout_data(timeout); 836 + if (values) 837 + nf_ct_refresh(ct, pkt->skb, values[0]); 819 838 } 820 839 821 840 static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, 822 841 const struct nlattr * const tb[], 823 842 struct nft_object *obj) 824 843 { 825 - const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; 826 844 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 827 845 const struct nf_conntrack_l4proto *l4proto; 828 - struct nf_conn_timeout *timeout_ext; 829 846 struct nf_ct_timeout *timeout; 830 847 int l3num = ctx->family; 831 - struct nf_conn *tmpl; 832 848 __u8 l4num; 833 849 int ret; 834 850 ··· 879 863 880 864 timeout->l3num = l3num; 881 865 timeout->l4proto = l4proto; 882 - tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC); 883 - if (!tmpl) { 884 - ret = -ENOMEM; 885 - goto err_free_timeout; 886 - } 887 - 888 - timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC); 889 - if (!timeout_ext) { 890 - ret = -ENOMEM; 891 - goto err_free_tmpl; 892 - } 893 866 894 867 ret = nf_ct_netns_get(ctx->net, ctx->family); 895 868 if (ret < 0) 896 - goto err_free_tmpl; 869 + goto err_free_timeout; 897 870 898 - priv->tmpl = tmpl; 899 - 871 + priv->timeout = timeout; 900 872 return 0; 901 873 902 - err_free_tmpl: 903 - nf_ct_tmpl_free(tmpl); 904 874 err_free_timeout: 905 875 kfree(timeout); 906 876 err_proto_put: ··· 898 896 struct nft_object *obj) 899 897 { 900 898 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 901 - struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); 902 - struct nf_ct_timeout *timeout; 899 + struct nf_ct_timeout *timeout = priv->timeout; 903 900 904 - timeout = rcu_dereference_raw(t->timeout); 905 901 nf_ct_untimeout(ctx->net, timeout); 906 902 nf_ct_l4proto_put(timeout->l4proto); 907 903 nf_ct_netns_put(ctx->net, ctx->family); 908 - nf_ct_tmpl_free(priv->tmpl); 904 + kfree(priv->timeout); 909 905 } 910 906 911 907 static int nft_ct_timeout_obj_dump(struct sk_buff *skb, 912 908 struct nft_object *obj, bool reset) 913 909 { 914 910 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 915 - const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); 916 - const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout); 911 + const struct nf_ct_timeout *timeout = priv->timeout; 917 912 struct nlattr *nest_params; 918 913 int ret; 919 914
+21 -1
net/netfilter/xt_CHECKSUM.c
··· 16 16 #include <linux/netfilter/x_tables.h> 17 17 #include <linux/netfilter/xt_CHECKSUM.h> 18 18 19 + #include <linux/netfilter_ipv4/ip_tables.h> 20 + #include <linux/netfilter_ipv6/ip6_tables.h> 21 + 19 22 MODULE_LICENSE("GPL"); 20 23 MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); 21 24 MODULE_DESCRIPTION("Xtables: checksum modification"); ··· 28 25 static unsigned int 29 26 checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) 30 27 { 31 - if (skb->ip_summed == CHECKSUM_PARTIAL) 28 + if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb)) 32 29 skb_checksum_help(skb); 33 30 34 31 return XT_CONTINUE; ··· 37 34 static int checksum_tg_check(const struct xt_tgchk_param *par) 38 35 { 39 36 const struct xt_CHECKSUM_info *einfo = par->targinfo; 37 + const struct ip6t_ip6 *i6 = par->entryinfo; 38 + const struct ipt_ip *i4 = par->entryinfo; 40 39 41 40 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { 42 41 pr_info_ratelimited("unsupported CHECKSUM operation %x\n", ··· 48 43 if (!einfo->operation) 49 44 return -EINVAL; 50 45 46 + switch (par->family) { 47 + case NFPROTO_IPV4: 48 + if (i4->proto == IPPROTO_UDP && 49 + (i4->invflags & XT_INV_PROTO) == 0) 50 + return 0; 51 + break; 52 + case NFPROTO_IPV6: 53 + if ((i6->flags & IP6T_F_PROTO) && 54 + i6->proto == IPPROTO_UDP && 55 + (i6->invflags & XT_INV_PROTO) == 0) 56 + return 0; 57 + break; 58 + } 59 + 60 + pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n"); 51 61 return 0; 52 62 } 53 63
+13 -1
net/netfilter/xt_cluster.c
··· 125 125 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) 126 126 { 127 127 struct xt_cluster_match_info *info = par->matchinfo; 128 + int ret; 128 129 129 130 if (info->total_nodes > XT_CLUSTER_NODES_MAX) { 130 131 pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", ··· 136 135 pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); 137 136 return -EDOM; 138 137 } 139 - return 0; 138 + 139 + ret = nf_ct_netns_get(par->net, par->family); 140 + if (ret < 0) 141 + pr_info_ratelimited("cannot load conntrack support for proto=%u\n", 142 + par->family); 143 + return ret; 144 + } 145 + 146 + static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par) 147 + { 148 + nf_ct_netns_put(par->net, par->family); 140 149 } 141 150 142 151 static struct xt_match xt_cluster_match __read_mostly = { ··· 155 144 .match = xt_cluster_mt, 156 145 .checkentry = xt_cluster_mt_checkentry, 157 146 .matchsize = sizeof(struct xt_cluster_match_info), 147 + .destroy = xt_cluster_mt_destroy, 158 148 .me = THIS_MODULE, 159 149 }; 160 150
+9 -9
net/netfilter/xt_hashlimit.c
··· 1057 1057 static void *dl_seq_start(struct seq_file *s, loff_t *pos) 1058 1058 __acquires(htable->lock) 1059 1059 { 1060 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1060 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1061 1061 unsigned int *bucket; 1062 1062 1063 1063 spin_lock_bh(&htable->lock); ··· 1074 1074 1075 1075 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 1076 1076 { 1077 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1077 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1078 1078 unsigned int *bucket = v; 1079 1079 1080 1080 *pos = ++(*bucket); ··· 1088 1088 static void dl_seq_stop(struct seq_file *s, void *v) 1089 1089 __releases(htable->lock) 1090 1090 { 1091 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1091 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1092 1092 unsigned int *bucket = v; 1093 1093 1094 1094 if (!IS_ERR(bucket)) ··· 1130 1130 static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, 1131 1131 struct seq_file *s) 1132 1132 { 1133 - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1133 + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); 1134 1134 1135 1135 spin_lock(&ent->lock); 1136 1136 /* recalculate to show accurate numbers */ ··· 1145 1145 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, 1146 1146 struct seq_file *s) 1147 1147 { 1148 - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1148 + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); 1149 1149 1150 1150 spin_lock(&ent->lock); 1151 1151 /* recalculate to show accurate numbers */ ··· 1160 1160 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, 1161 1161 struct seq_file *s) 1162 1162 { 1163 - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1163 + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); 1164 1164 1165 1165 spin_lock(&ent->lock); 1166 1166 /* recalculate to show accurate numbers */ ··· 1174 1174 1175 1175 static int dl_seq_show_v2(struct seq_file *s, void *v) 1176 1176 { 1177 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1177 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1178 1178 unsigned int *bucket = (unsigned int *)v; 1179 1179 struct dsthash_ent *ent; 1180 1180 ··· 1188 1188 1189 1189 static int dl_seq_show_v1(struct seq_file *s, void *v) 1190 1190 { 1191 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1191 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1192 1192 unsigned int *bucket = v; 1193 1193 struct dsthash_ent *ent; 1194 1194 ··· 1202 1202 1203 1203 static int dl_seq_show(struct seq_file *s, void *v) 1204 1204 { 1205 - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1205 + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); 1206 1206 unsigned int *bucket = v; 1207 1207 struct dsthash_ent *ent; 1208 1208
+4 -1
net/rds/bind.c
··· 76 76 struct rds_sock *rs; 77 77 78 78 __rds_create_bind_key(key, addr, port, scope_id); 79 - rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); 79 + rcu_read_lock(); 80 + rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); 80 81 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 81 82 rds_sock_addref(rs); 82 83 else 83 84 rs = NULL; 85 + rcu_read_unlock(); 84 86 85 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, 86 88 ntohs(port)); ··· 237 235 goto out; 238 236 } 239 237 238 + sock_set_flag(sk, SOCK_RCU_FREE); 240 239 ret = rds_add_bound(rs, binding_addr, &port, scope_id); 241 240 if (ret) 242 241 goto out;
+18 -10
net/sched/act_tunnel_key.c
··· 317 317 &metadata->u.tun_info, 318 318 opts_len, extack); 319 319 if (ret < 0) 320 - goto err_out; 320 + goto release_tun_meta; 321 321 } 322 322 323 323 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; ··· 333 333 &act_tunnel_key_ops, bind, true); 334 334 if (ret) { 335 335 NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); 336 - goto err_out; 336 + goto release_tun_meta; 337 337 } 338 338 339 339 ret = ACT_P_CREATED; 340 340 } else if (!ovr) { 341 - tcf_idr_release(*a, bind); 342 341 NL_SET_ERR_MSG(extack, "TC IDR already exists"); 343 - return -EEXIST; 342 + ret = -EEXIST; 343 + goto release_tun_meta; 344 344 } 345 345 346 346 t = to_tunnel_key(*a); 347 347 348 348 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 349 349 if (unlikely(!params_new)) { 350 - tcf_idr_release(*a, bind); 351 350 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); 352 - return -ENOMEM; 351 + ret = -ENOMEM; 352 + exists = true; 353 + goto release_tun_meta; 353 354 } 354 355 params_new->tcft_action = parm->t_action; 355 356 params_new->tcft_enc_metadata = metadata; ··· 367 366 tcf_idr_insert(tn, *a); 368 367 369 368 return ret; 369 + 370 + release_tun_meta: 371 + dst_release(&metadata->dst); 370 372 371 373 err_out: 372 374 if (exists) ··· 412 408 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, 413 409 opt->type) || 414 410 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, 415 - opt->length * 4, opt + 1)) 411 + opt->length * 4, opt + 1)) { 412 + nla_nest_cancel(skb, start); 416 413 return -EMSGSIZE; 414 + } 417 415 418 416 len -= sizeof(struct geneve_opt) + opt->length * 4; 419 417 src += sizeof(struct geneve_opt) + opt->length * 4; ··· 429 423 const struct ip_tunnel_info *info) 430 424 { 431 425 struct nlattr *start; 432 - int err; 426 + int err = -EINVAL; 433 427 434 428 if (!info->options_len) 435 429 return 0; ··· 441 435 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { 442 436 err = tunnel_key_geneve_opts_dump(skb, info); 443 437 if (err) 444 - return err; 438 + goto err_out; 445 439 } else { 446 - return -EINVAL; 440 + err_out: 441 + nla_nest_cancel(skb, start); 442 + return err; 447 443 } 448 444 449 445 nla_nest_end(skb, start);
+12 -6
net/tipc/socket.c
··· 576 576 sk_stop_timer(sk, &sk->sk_timer); 577 577 tipc_sk_remove(tsk); 578 578 579 + sock_orphan(sk); 579 580 /* Reject any messages that accumulated in backlog queue */ 580 581 release_sock(sk); 581 582 tipc_dest_list_purge(&tsk->cong_links); ··· 3230 3229 struct netlink_callback *cb, 3231 3230 struct tipc_sock *tsk)) 3232 3231 { 3233 - struct rhashtable_iter *iter = (void *)cb->args[0]; 3232 + struct rhashtable_iter *iter = (void *)cb->args[4]; 3234 3233 struct tipc_sock *tsk; 3235 3234 int err; 3236 3235 ··· 3266 3265 3267 3266 int tipc_dump_start(struct netlink_callback *cb) 3268 3267 { 3269 - struct rhashtable_iter *iter = (void *)cb->args[0]; 3270 - struct net *net = sock_net(cb->skb->sk); 3268 + return __tipc_dump_start(cb, sock_net(cb->skb->sk)); 3269 + } 3270 + EXPORT_SYMBOL(tipc_dump_start); 3271 + 3272 + int __tipc_dump_start(struct netlink_callback *cb, struct net *net) 3273 + { 3274 + /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ 3275 + struct rhashtable_iter *iter = (void *)cb->args[4]; 3271 3276 struct tipc_net *tn = tipc_net(net); 3272 3277 3273 3278 if (!iter) { ··· 3281 3274 if (!iter) 3282 3275 return -ENOMEM; 3283 3276 3284 - cb->args[0] = (long)iter; 3277 + cb->args[4] = (long)iter; 3285 3278 } 3286 3279 3287 3280 rhashtable_walk_enter(&tn->sk_rht, iter); 3288 3281 return 0; 3289 3282 } 3290 - EXPORT_SYMBOL(tipc_dump_start); 3291 3283 3292 3284 int tipc_dump_done(struct netlink_callback *cb) 3293 3285 { 3294 - struct rhashtable_iter *hti = (void *)cb->args[0]; 3286 + struct rhashtable_iter *hti = (void *)cb->args[4]; 3295 3287 3296 3288 rhashtable_walk_exit(hti); 3297 3289 kfree(hti);
+1
net/tipc/socket.h
··· 69 69 struct netlink_callback *cb, 70 70 struct tipc_sock *tsk)); 71 71 int tipc_dump_start(struct netlink_callback *cb); 72 + int __tipc_dump_start(struct netlink_callback *cb, struct net *net); 72 73 int tipc_dump_done(struct netlink_callback *cb); 73 74 #endif
+6
net/tls/tls_sw.c
··· 263 263 &ctx->sg_encrypted_num_elem, 264 264 &ctx->sg_encrypted_size, 0); 265 265 266 + if (rc == -ENOSPC) 267 + ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); 268 + 266 269 return rc; 267 270 } 268 271 ··· 278 275 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, 279 276 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, 280 277 tls_ctx->pending_open_record_frags); 278 + 279 + if (rc == -ENOSPC) 280 + ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); 281 281 282 282 return rc; 283 283 }
+2 -1
scripts/checkpatch.pl
··· 380 380 __noclone| 381 381 __deprecated| 382 382 __read_mostly| 383 + __ro_after_init| 383 384 __kprobes| 384 385 $InitAttribute| 385 386 ____cacheline_aligned| ··· 3312 3311 # known declaration macros 3313 3312 $sline =~ /^\+\s+$declaration_macros/ || 3314 3313 # start of struct or union or enum 3315 - $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ || 3314 + $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ || 3316 3315 # start or end of block or continuation of declaration 3317 3316 $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ || 3318 3317 # bitfield continuation
+3 -2
scripts/depmod.sh
··· 11 11 KERNELRELEASE=$2 12 12 13 13 if ! test -r System.map ; then 14 + echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2 14 15 exit 0 15 16 fi 16 17 17 18 if [ -z $(command -v $DEPMOD) ]; then 18 - echo "'make modules_install' requires $DEPMOD. Please install it." >&2 19 + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 19 20 echo "This is probably in the kmod package." >&2 20 - exit 1 21 + exit 0 21 22 fi 22 23 23 24 # older versions of depmod require the version string to start with three
-1
scripts/kconfig/Makefile
··· 221 221 222 222 # check if necessary packages are available, and configure build flags 223 223 define filechk_conf_cfg 224 - $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \ 225 224 $(CONFIG_SHELL) $< 226 225 endef 227 226
-8
scripts/kconfig/check-pkgconfig.sh
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - # Check for pkg-config presence 4 - 5 - if [ -z $(command -v pkg-config) ]; then 6 - echo "'make *config' requires 'pkg-config'. Please install it." 1>&2 7 - exit 1 8 - fi
+7
scripts/kconfig/gconf-cfg.sh
··· 3 3 4 4 PKG="gtk+-2.0 gmodule-2.0 libglade-2.0" 5 5 6 + if [ -z "$(command -v pkg-config)" ]; then 7 + echo >&2 "*" 8 + echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it." 9 + echo >&2 "*" 10 + exit 1 11 + fi 12 + 6 13 if ! pkg-config --exists $PKG; then 7 14 echo >&2 "*" 8 15 echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
+15 -12
scripts/kconfig/mconf-cfg.sh
··· 4 4 PKG="ncursesw" 5 5 PKG2="ncurses" 6 6 7 - if pkg-config --exists $PKG; then 8 - echo cflags=\"$(pkg-config --cflags $PKG)\" 9 - echo libs=\"$(pkg-config --libs $PKG)\" 10 - exit 0 7 + if [ -n "$(command -v pkg-config)" ]; then 8 + if pkg-config --exists $PKG; then 9 + echo cflags=\"$(pkg-config --cflags $PKG)\" 10 + echo libs=\"$(pkg-config --libs $PKG)\" 11 + exit 0 12 + fi 13 + 14 + if pkg-config --exists $PKG2; then 15 + echo cflags=\"$(pkg-config --cflags $PKG2)\" 16 + echo libs=\"$(pkg-config --libs $PKG2)\" 17 + exit 0 18 + fi 11 19 fi 12 20 13 - if pkg-config --exists $PKG2; then 14 - echo cflags=\"$(pkg-config --cflags $PKG2)\" 15 - echo libs=\"$(pkg-config --libs $PKG2)\" 16 - exit 0 17 - fi 18 - 19 - # Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses 20 - # by pkg-config. 21 + # Check the default paths in case pkg-config is not installed. 22 + # (Even if it is installed, some distributions such as openSUSE cannot 23 + # find ncurses by pkg-config.) 21 24 if [ -f /usr/include/ncursesw/ncurses.h ]; then 22 25 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" 23 26 echo libs=\"-lncursesw\"
-1
scripts/kconfig/mconf.c
··· 490 490 switch (prop->type) { 491 491 case P_MENU: 492 492 child_count++; 493 - prompt = prompt; 494 493 if (single_menu_mode) { 495 494 item_make("%s%*c%s", 496 495 menu->data ? "-->" : "++>",
+15 -12
scripts/kconfig/nconf-cfg.sh
··· 4 4 PKG="ncursesw menuw panelw" 5 5 PKG2="ncurses menu panel" 6 6 7 - if pkg-config --exists $PKG; then 8 - echo cflags=\"$(pkg-config --cflags $PKG)\" 9 - echo libs=\"$(pkg-config --libs $PKG)\" 10 - exit 0 7 + if [ -n "$(command -v pkg-config)" ]; then 8 + if pkg-config --exists $PKG; then 9 + echo cflags=\"$(pkg-config --cflags $PKG)\" 10 + echo libs=\"$(pkg-config --libs $PKG)\" 11 + exit 0 12 + fi 13 + 14 + if pkg-config --exists $PKG2; then 15 + echo cflags=\"$(pkg-config --cflags $PKG2)\" 16 + echo libs=\"$(pkg-config --libs $PKG2)\" 17 + exit 0 18 + fi 11 19 fi 12 20 13 - if pkg-config --exists $PKG2; then 14 - echo cflags=\"$(pkg-config --cflags $PKG2)\" 15 - echo libs=\"$(pkg-config --libs $PKG2)\" 16 - exit 0 17 - fi 18 - 19 - # Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses 20 - # by pkg-config. 21 + # Check the default paths in case pkg-config is not installed. 22 + # (Even if it is installed, some distributions such as openSUSE cannot 23 + # find ncurses by pkg-config.) 21 24 if [ -f /usr/include/ncursesw/ncurses.h ]; then 22 25 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" 23 26 echo libs=\"-lncursesw -lmenuw -lpanelw\"
+7
scripts/kconfig/qconf-cfg.sh
··· 4 4 PKG="Qt5Core Qt5Gui Qt5Widgets" 5 5 PKG2="QtCore QtGui" 6 6 7 + if [ -z "$(command -v pkg-config)" ]; then 8 + echo >&2 "*" 9 + echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it." 10 + echo >&2 "*" 11 + exit 1 12 + fi 13 + 7 14 if pkg-config --exists $PKG; then 8 15 echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\" 9 16 echo libs=\"$(pkg-config --libs $PKG)\"
+3
scripts/recordmcount.pl
··· 389 389 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; 390 390 $type = ".quad"; 391 391 $alignment = 2; 392 + } elsif ($arch eq "nds32") { 393 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; 394 + $alignment = 2; 392 395 } else { 393 396 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 394 397 }
+1 -1
scripts/setlocalversion
··· 74 74 fi 75 75 76 76 # Check for uncommitted changes 77 - if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then 77 + if git status -uno --porcelain | grep -qv '^.. scripts/package'; then 78 78 printf '%s' -dirty 79 79 fi 80 80
-1
security/apparmor/secid.c
··· 79 79 struct aa_label *label = aa_secid_to_label(secid); 80 80 int len; 81 81 82 - AA_BUG(!secdata); 83 82 AA_BUG(!seclen); 84 83 85 84 if (!label)
+1 -1
security/keys/dh.c
··· 300 300 } 301 301 dh_inputs.g_size = dlen; 302 302 303 - dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); 303 + dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); 304 304 if (dlen < 0) { 305 305 ret = dlen; 306 306 goto out2;
+2 -2
sound/core/rawmidi.c
··· 129 129 runtime->avail = 0; 130 130 else 131 131 runtime->avail = runtime->buffer_size; 132 - runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL); 132 + runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL); 133 133 if (!runtime->buffer) { 134 134 kfree(runtime); 135 135 return -ENOMEM; ··· 655 655 if (params->avail_min < 1 || params->avail_min > params->buffer_size) 656 656 return -EINVAL; 657 657 if (params->buffer_size != runtime->buffer_size) { 658 - newbuf = kvmalloc(params->buffer_size, GFP_KERNEL); 658 + newbuf = kvzalloc(params->buffer_size, GFP_KERNEL); 659 659 if (!newbuf) 660 660 return -ENOMEM; 661 661 spin_lock_irq(&runtime->lock);
+7 -15
sound/hda/ext/hdac_ext_stream.c
··· 146 146 */ 147 147 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream) 148 148 { 149 - snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN); 149 + snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 150 + AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN); 150 151 } 151 152 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start); 152 153 ··· 172 171 173 172 snd_hdac_ext_link_stream_clear(stream); 174 173 175 - snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST); 174 + snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 175 + AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST); 176 176 udelay(3); 177 177 timeout = 50; 178 178 do { ··· 244 242 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, 245 243 int stream) 246 244 { 247 - snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream)); 245 + snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0); 248 246 } 249 247 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id); 250 248 ··· 417 415 bool enable, int index) 418 416 { 419 417 u32 mask = 0; 420 - u32 register_mask = 0; 421 418 422 419 if (!bus->spbcap) { 423 420 dev_err(bus->dev, "Address of SPB capability is NULL\n"); ··· 425 424 426 425 mask |= (1 << index); 427 426 428 - register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL); 429 - 430 - mask |= register_mask; 431 - 432 427 if (enable) 433 - snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask); 428 + snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask); 434 429 else 435 430 snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); 436 431 } ··· 500 503 bool enable, int index) 501 504 { 502 505 u32 mask = 0; 503 - u32 register_mask = 0; 504 506 505 507 if (!bus->drsmcap) { 506 508 dev_err(bus->dev, "Address of DRSM capability is NULL\n"); ··· 508 512 509 513 mask |= (1 << index); 510 514 511 - register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL); 512 - 513 - mask |= register_mask; 514 - 515 515 if (enable) 516 - snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask); 516 + snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask); 517 517 else 518 518 snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); 519 519 }
+2 -1
sound/pci/hda/hda_codec.c
··· 3935 3935 3936 3936 list_for_each_codec(codec, bus) { 3937 3937 /* FIXME: maybe a better way needed for forced reset */ 3938 - cancel_delayed_work_sync(&codec->jackpoll_work); 3938 + if (current_work() != &codec->jackpoll_work.work) 3939 + cancel_delayed_work_sync(&codec->jackpoll_work); 3939 3940 #ifdef CONFIG_PM 3940 3941 if (hda_codec_is_power_on(codec)) { 3941 3942 hda_call_codec_suspend(codec);
+49 -10
tools/kvm/kvm_stat/kvm_stat
··· 759 759 if len(vms) == 0: 760 760 self.do_read = False 761 761 762 - self.paths = filter(lambda x: "{}-".format(pid) in x, vms) 762 + self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms)) 763 763 764 764 else: 765 765 self.paths = [] 766 766 self.do_read = True 767 - self.reset() 767 + 768 + def _verify_paths(self): 769 + """Remove invalid paths""" 770 + for path in self.paths: 771 + if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)): 772 + self.paths.remove(path) 773 + continue 768 774 769 775 def read(self, reset=0, by_guest=0): 770 776 """Returns a dict with format:'file name / field -> current value'. ··· 786 780 # If no debugfs filtering support is available, then don't read. 787 781 if not self.do_read: 788 782 return results 783 + self._verify_paths() 789 784 790 785 paths = self.paths 791 786 if self._pid == 0: ··· 1103 1096 pid = self.stats.pid_filter 1104 1097 self.screen.erase() 1105 1098 gname = self.get_gname_from_pid(pid) 1099 + self._gname = gname 1106 1100 if gname: 1107 1101 gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' 1108 1102 if len(gname) > MAX_GUEST_NAME_LEN 1109 1103 else gname)) 1110 1104 if pid > 0: 1111 - self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}' 1112 - .format(pid, gname), curses.A_BOLD) 1105 + self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname) 1113 1106 else: 1114 - self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) 1107 + self._headline = 'kvm statistics - summary' 1108 + self.screen.addstr(0, 0, self._headline, curses.A_BOLD) 1115 1109 if self.stats.fields_filter: 1116 1110 regex = self.stats.fields_filter 1117 1111 if len(regex) > MAX_REGEX_LEN: ··· 1170 1162 1171 1163 return sorted_items 1172 1164 1165 + if not self._is_running_guest(self.stats.pid_filter): 1166 + if self._gname: 1167 + try: # ...to identify the guest by name in case it's back 1168 + pids = self.get_pid_from_gname(self._gname) 1169 + if len(pids) == 1: 1170 + self._refresh_header(pids[0]) 1171 + self._update_pid(pids[0]) 1172 + return 1173 + except: 1174 + pass 1175 + self._display_guest_dead() 1176 + # leave final data on screen 1177 + return 1173 1178 row = 3 1174 1179 self.screen.move(row, 0) 1175 1180 self.screen.clrtobot() ··· 1205 1184 # print events 1206 1185 tavg = 0 1207 1186 tcur = 0 1187 + guest_removed = False 1208 1188 for key, values in get_sorted_events(self, stats): 1209 1189 if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): 1210 1190 break ··· 1213 1191 key = self.get_gname_from_pid(key) 1214 1192 if not key: 1215 1193 continue 1216 - cur = int(round(values.delta / sleeptime)) if values.delta else '' 1194 + cur = int(round(values.delta / sleeptime)) if values.delta else 0 1195 + if cur < 0: 1196 + guest_removed = True 1197 + continue 1217 1198 if key[0] != ' ': 1218 1199 if values.delta: 1219 1200 tcur += values.delta ··· 1229 1204 values.value * 100 / float(ltotal), cur)) 1230 1205 row += 1 1231 1206 if row == 3: 1232 - self.screen.addstr(4, 1, 'No matching events reported yet') 1207 + if guest_removed: 1208 + self.screen.addstr(4, 1, 'Guest removed, updating...') 1209 + else: 1210 + self.screen.addstr(4, 1, 'No matching events reported yet') 1233 1211 if row > 4: 1234 1212 tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' 1235 1213 self.screen.addstr(row, 1, '%-40s %10d %8s' % 1236 1214 ('Total', total, tavg), curses.A_BOLD) 1237 1215 self.screen.refresh() 1216 + 1217 + def _display_guest_dead(self): 1218 + marker = ' Guest is DEAD ' 1219 + y = min(len(self._headline), 80 - len(marker)) 1220 + self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT) 1238 1221 1239 1222 def _show_msg(self, text): 1240 1223 """Display message centered text and exit on key press""" ··· 1252 1219 (x, term_width) = self.screen.getmaxyx() 1253 1220 row = 2 1254 1221 for line in text: 1255 - start = (term_width - len(line)) / 2 1222 + start = (term_width - len(line)) // 2 1256 1223 self.screen.addstr(row, start, line) 1257 1224 row += 1 1258 - self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint, 1225 + self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint, 1259 1226 curses.A_STANDOUT) 1260 1227 self.screen.getkey() 1261 1228 ··· 1352 1319 msg = '"' + str(val) + '": Invalid value' 1353 1320 self._refresh_header() 1354 1321 1322 + def _is_running_guest(self, pid): 1323 + """Check if pid is still a running process.""" 1324 + if not pid: 1325 + return True 1326 + return os.path.isdir(os.path.join('/proc/', str(pid))) 1327 + 1355 1328 def _show_vm_selection_by_guest(self): 1356 1329 """Draws guest selection mask. 1357 1330 ··· 1385 1346 if not guest or guest == '0': 1386 1347 break 1387 1348 if guest.isdigit(): 1388 - if not os.path.isdir(os.path.join('/proc/', guest)): 1349 + if not self._is_running_guest(guest): 1389 1350 msg = '"' + guest + '": Not a running process' 1390 1351 continue 1391 1352 pid = int(guest)
-6
tools/vm/page-types.c
··· 159 159 }; 160 160 161 161 162 - static const char * const debugfs_known_mountpoints[] = { 163 - "/sys/kernel/debug", 164 - "/debug", 165 - 0, 166 - }; 167 - 168 162 /* 169 163 * data structures 170 164 */
+2 -2
tools/vm/slabinfo.c
··· 30 30 int alias; 31 31 int refs; 32 32 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu; 33 - int hwcache_align, object_size, objs_per_slab; 34 - int sanity_checks, slab_size, store_user, trace; 33 + unsigned int hwcache_align, object_size, objs_per_slab; 34 + unsigned int sanity_checks, slab_size, store_user, trace; 35 35 int order, poison, reclaim_account, red_zone; 36 36 unsigned long partial, objects, slabs, objects_partial, objects_total; 37 37 unsigned long alloc_fastpath, alloc_slowpath;
+8 -13
virt/kvm/arm/mmu.c
··· 1817 1817 return 0; 1818 1818 } 1819 1819 1820 - int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1821 - { 1822 - unsigned long end = hva + PAGE_SIZE; 1823 - 1824 - if (!kvm->arch.pgd) 1825 - return 0; 1826 - 1827 - trace_kvm_unmap_hva(hva); 1828 - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); 1829 - return 0; 1830 - } 1831 - 1832 1820 int kvm_unmap_hva_range(struct kvm *kvm, 1833 1821 unsigned long start, unsigned long end) 1834 1822 { ··· 1848 1860 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1849 1861 { 1850 1862 unsigned long end = hva + PAGE_SIZE; 1863 + kvm_pfn_t pfn = pte_pfn(pte); 1851 1864 pte_t stage2_pte; 1852 1865 1853 1866 if (!kvm->arch.pgd) 1854 1867 return; 1855 1868 1856 1869 trace_kvm_set_spte_hva(hva); 1857 - stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); 1870 + 1871 + /* 1872 + * We've moved a page around, probably through CoW, so let's treat it 1873 + * just like a translation fault and clean the cache to the PoC. 1874 + */ 1875 + clean_dcache_guest_page(pfn, PAGE_SIZE); 1876 + stage2_pte = pfn_pte(pfn, PAGE_S2); 1858 1877 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); 1859 1878 } 1860 1879
-15
virt/kvm/arm/trace.h
··· 134 134 __entry->vcpu_pc, __entry->instr, __entry->cpsr) 135 135 ); 136 136 137 - TRACE_EVENT(kvm_unmap_hva, 138 - TP_PROTO(unsigned long hva), 139 - TP_ARGS(hva), 140 - 141 - TP_STRUCT__entry( 142 - __field( unsigned long, hva ) 143 - ), 144 - 145 - TP_fast_assign( 146 - __entry->hva = hva; 147 - ), 148 - 149 - TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) 150 - ); 151 - 152 137 TRACE_EVENT(kvm_unmap_hva_range, 153 138 TP_PROTO(unsigned long start, unsigned long end), 154 139 TP_ARGS(start, end),