Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 5.3-rc5 into char-misc-next

We need the char/misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+1440 -813
+3 -1
Documentation/devicetree/bindings/Makefile
··· 19 19 20 20 DT_DOCS = $(shell \ 21 21 cd $(srctree)/$(src) && \ 22 - find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ 22 + find * \( -name '*.yaml' ! \ 23 + -name $(DT_TMP_SCHEMA) ! \ 24 + -name '*.example.dt.yaml' \) \ 23 25 ) 24 26 25 27 DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
+17 -13
Documentation/devicetree/bindings/net/fsl-fec.txt
··· 7 7 - phy-mode : See ethernet.txt file in the same directory 8 8 9 9 Optional properties: 10 - - phy-reset-gpios : Should specify the gpio for phy reset 11 - - phy-reset-duration : Reset duration in milliseconds. Should present 12 - only if property "phy-reset-gpios" is available. Missing the property 13 - will have the duration be 1 millisecond. Numbers greater than 1000 are 14 - invalid and 1 millisecond will be used instead. 15 - - phy-reset-active-high : If present then the reset sequence using the GPIO 16 - specified in the "phy-reset-gpios" property is reversed (H=reset state, 17 - L=operation state). 18 - - phy-reset-post-delay : Post reset delay in milliseconds. If present then 19 - a delay of phy-reset-post-delay milliseconds will be observed after the 20 - phy-reset-gpios has been toggled. Can be omitted thus no delay is 21 - observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. 22 10 - phy-supply : regulator that powers the Ethernet PHY. 23 11 - phy-handle : phandle to the PHY device connected to this device. 24 12 - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. ··· 35 47 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse 36 48 per second interrupt associated with 1588 precision time protocol(PTP). 37 49 38 - 39 50 Optional subnodes: 40 51 - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes 41 52 according to phy.txt in the same directory 53 + 54 + Deprecated optional properties: 55 + To avoid these, create a phy node according to phy.txt in the same 56 + directory, and point the fec's "phy-handle" property to it. Then use 57 + the phy's reset binding, again described by phy.txt. 58 + - phy-reset-gpios : Should specify the gpio for phy reset 59 + - phy-reset-duration : Reset duration in milliseconds. Should present 60 + only if property "phy-reset-gpios" is available. Missing the property 61 + will have the duration be 1 millisecond. Numbers greater than 1000 are 62 + invalid and 1 millisecond will be used instead. 63 + - phy-reset-active-high : If present then the reset sequence using the GPIO 64 + specified in the "phy-reset-gpios" property is reversed (H=reset state, 65 + L=operation state). 66 + - phy-reset-post-delay : Post reset delay in milliseconds. If present then 67 + a delay of phy-reset-post-delay milliseconds will be observed after the 68 + phy-reset-gpios has been toggled. Can be omitted thus no delay is 69 + observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. 42 70 43 71 Example: 44 72
+2 -1
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
··· 37 37 hwlocks: true 38 38 39 39 st,syscfg: 40 - $ref: "/schemas/types.yaml#/definitions/phandle-array" 40 + allOf: 41 + - $ref: "/schemas/types.yaml#/definitions/phandle-array" 41 42 description: Should be phandle/offset/mask 42 43 items: 43 44 - description: Phandle to the syscon node which includes IRQ mux selection.
+16 -2
MAINTAINERS
··· 6441 6441 F: drivers/perf/fsl_imx8_ddr_perf.c 6442 6442 F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt 6443 6443 6444 + FREESCALE IMX I2C DRIVER 6445 + M: Oleksij Rempel <o.rempel@pengutronix.de> 6446 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 6447 + L: linux-i2c@vger.kernel.org 6448 + S: Maintained 6449 + F: drivers/i2c/busses/i2c-imx.c 6450 + F: Documentation/devicetree/bindings/i2c/i2c-imx.txt 6451 + 6444 6452 FREESCALE IMX LPI2C DRIVER 6445 6453 M: Dong Aisheng <aisheng.dong@nxp.com> 6446 6454 L: linux-i2c@vger.kernel.org ··· 7460 7452 F: drivers/scsi/storvsc_drv.c 7461 7453 F: drivers/uio/uio_hv_generic.c 7462 7454 F: drivers/video/fbdev/hyperv_fb.c 7463 - F: drivers/iommu/hyperv_iommu.c 7455 + F: drivers/iommu/hyperv-iommu.c 7464 7456 F: net/vmw_vsock/hyperv_transport.c 7465 7457 F: include/clocksource/hyperv_timer.h 7466 7458 F: include/linux/hyperv.h ··· 8072 8064 S: Supported 8073 8065 F: drivers/scsi/isci/ 8074 8066 8067 + INTEL CPU family model numbers 8068 + M: Tony Luck <tony.luck@intel.com> 8069 + M: x86@kernel.org 8070 + L: linux-kernel@vger.kernel.org 8071 + S: Supported 8072 + F: arch/x86/include/asm/intel-family.h 8073 + 8075 8074 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 8076 8075 M: Jani Nikula <jani.nikula@linux.intel.com> 8077 8076 M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> ··· 8431 8416 L: linux-fsdevel@vger.kernel.org 8432 8417 T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git 8433 8418 S: Supported 8434 - F: fs/iomap.c 8435 8419 F: fs/iomap/ 8436 8420 F: include/linux/iomap.h 8437 8421
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 3 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Bobtail Squid 7 7 8 8 # *DOCUMENTATION*
+1 -3
arch/arm/mm/dma-mapping.c
··· 2405 2405 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2406 2406 unsigned long attrs) 2407 2407 { 2408 - if (!dev_is_dma_coherent(dev)) 2409 - return __get_dma_pgprot(attrs, prot); 2410 - return prot; 2408 + return __get_dma_pgprot(attrs, prot); 2411 2409 } 2412 2410 2413 2411 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+11 -3
arch/arm64/kernel/cpufeature.c
··· 184 184 }; 185 185 186 186 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 187 - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 188 - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 189 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 187 + /* 188 + * We already refuse to boot CPUs that don't support our configured 189 + * page size, so we can only detect mismatches for a page size other 190 + * than the one we're currently using. Unfortunately, SoCs like this 191 + * exist in the wild so, even though we don't like it, we'll have to go 192 + * along with it and treat them as non-strict. 193 + */ 194 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 195 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 196 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 197 + 190 198 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 191 199 /* Linux shouldn't care about secure memory */ 192 200 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+13 -9
arch/arm64/kernel/ftrace.c
··· 73 73 74 74 if (offset < -SZ_128M || offset >= SZ_128M) { 75 75 #ifdef CONFIG_ARM64_MODULE_PLTS 76 - struct plt_entry trampoline; 76 + struct plt_entry trampoline, *dst; 77 77 struct module *mod; 78 78 79 79 /* ··· 106 106 * to check if the actual opcodes are in fact identical, 107 107 * regardless of the offset in memory so use memcmp() instead. 108 108 */ 109 - trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 110 - if (memcmp(mod->arch.ftrace_trampoline, &trampoline, 111 - sizeof(trampoline))) { 112 - if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 109 + dst = mod->arch.ftrace_trampoline; 110 + trampoline = get_plt_entry(addr, dst); 111 + if (memcmp(dst, &trampoline, sizeof(trampoline))) { 112 + if (plt_entry_is_initialized(dst)) { 113 113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 114 114 return -EINVAL; 115 115 } 116 116 117 117 /* point the trampoline to our ftrace entry point */ 118 118 module_disable_ro(mod); 119 - *mod->arch.ftrace_trampoline = trampoline; 119 + *dst = trampoline; 120 120 module_enable_ro(mod, true); 121 121 122 - /* update trampoline before patching in the branch */ 123 - smp_wmb(); 122 + /* 123 + * Ensure updated trampoline is visible to instruction 124 + * fetch before we patch in the branch. 125 + */ 126 + __flush_icache_range((unsigned long)&dst[0], 127 + (unsigned long)&dst[1]); 124 128 } 125 - addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 129 + addr = (unsigned long)dst; 126 130 #else /* CONFIG_ARM64_MODULE_PLTS */ 127 131 return -EINVAL; 128 132 #endif /* CONFIG_ARM64_MODULE_PLTS */
+1 -3
arch/arm64/mm/dma-mapping.c
··· 14 14 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 15 15 unsigned long attrs) 16 16 { 17 - if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 18 - return pgprot_writecombine(prot); 19 - return prot; 17 + return pgprot_writecombine(prot); 20 18 } 21 19 22 20 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-1
arch/powerpc/Kconfig
··· 121 121 select ARCH_32BIT_OFF_T if PPC32 122 122 select ARCH_HAS_DEBUG_VIRTUAL 123 123 select ARCH_HAS_DEVMEM_IS_ALLOWED 124 - select ARCH_HAS_DMA_MMAP_PGPROT 125 124 select ARCH_HAS_ELF_RANDOMIZE 126 125 select ARCH_HAS_FORTIFY_SOURCE 127 126 select ARCH_HAS_GCOV_PROFILE_ALL
+1 -2
arch/powerpc/kernel/Makefile
··· 49 49 signal.o sysfs.o cacheinfo.o time.o \ 50 50 prom.o traps.o setup-common.o \ 51 51 udbg.o misc.o io.o misc_$(BITS).o \ 52 - of_platform.o prom_parse.o \ 53 - dma-common.o 52 + of_platform.o prom_parse.o 54 53 obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 55 54 signal_64.o ptrace32.o \ 56 55 paca.o nvram_64.o firmware.o
-17
arch/powerpc/kernel/dma-common.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Contains common dma routines for all powerpc platforms. 4 - * 5 - * Copyright (C) 2019 Shawn Anastasio. 6 - */ 7 - 8 - #include <linux/mm.h> 9 - #include <linux/dma-noncoherent.h> 10 - 11 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 12 - unsigned long attrs) 13 - { 14 - if (!dev_is_dma_coherent(dev)) 15 - return pgprot_noncached(prot); 16 - return prot; 17 - }
+2
arch/riscv/configs/defconfig
··· 54 54 CONFIG_SERIAL_OF_PLATFORM=y 55 55 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 56 56 CONFIG_HVC_RISCV_SBI=y 57 + CONFIG_HW_RANDOM=y 58 + CONFIG_HW_RANDOM_VIRTIO=y 57 59 CONFIG_SPI=y 58 60 CONFIG_SPI_SIFIVE=y 59 61 # CONFIG_PTP_1588_CLOCK is not set
+3
arch/riscv/configs/rv32_defconfig
··· 34 34 CONFIG_PCI_HOST_GENERIC=y 35 35 CONFIG_PCIE_XILINX=y 36 36 CONFIG_DEVTMPFS=y 37 + CONFIG_DEVTMPFS_MOUNT=y 37 38 CONFIG_BLK_DEV_LOOP=y 38 39 CONFIG_VIRTIO_BLK=y 39 40 CONFIG_BLK_DEV_SD=y ··· 54 53 CONFIG_SERIAL_OF_PLATFORM=y 55 54 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 56 55 CONFIG_HVC_RISCV_SBI=y 56 + CONFIG_HW_RANDOM=y 57 + CONFIG_HW_RANDOM_VIRTIO=y 57 58 # CONFIG_PTP_1588_CLOCK is not set 58 59 CONFIG_DRM=y 59 60 CONFIG_DRM_RADEON=y
+7 -1
arch/riscv/include/asm/switch_to.h
··· 16 16 17 17 static inline void __fstate_clean(struct pt_regs *regs) 18 18 { 19 - regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; 19 + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; 20 + } 21 + 22 + static inline void fstate_off(struct task_struct *task, 23 + struct pt_regs *regs) 24 + { 25 + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; 20 26 } 21 27 22 28 static inline void fstate_save(struct task_struct *task,
+9 -2
arch/riscv/include/asm/tlbflush.h
··· 53 53 } 54 54 55 55 #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) 56 - #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) 56 + 57 57 #define flush_tlb_range(vma, start, end) \ 58 58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) 59 - #define flush_tlb_mm(mm) \ 59 + 60 + static inline void flush_tlb_page(struct vm_area_struct *vma, 61 + unsigned long addr) 62 + { 63 + flush_tlb_range(vma, addr, addr + PAGE_SIZE); 64 + } 65 + 66 + #define flush_tlb_mm(mm) \ 60 67 remote_sfence_vma(mm_cpumask(mm), 0, -1) 61 68 62 69 #endif /* CONFIG_SMP */
+9 -2
arch/riscv/kernel/process.c
··· 64 64 unsigned long sp) 65 65 { 66 66 regs->sstatus = SR_SPIE; 67 - if (has_fpu) 67 + if (has_fpu) { 68 68 regs->sstatus |= SR_FS_INITIAL; 69 + /* 70 + * Restore the initial value to the FP register 71 + * before starting the user program. 72 + */ 73 + fstate_restore(current, regs); 74 + } 69 75 regs->sepc = pc; 70 76 regs->sp = sp; 71 77 set_fs(USER_DS); ··· 81 75 { 82 76 #ifdef CONFIG_FPU 83 77 /* 84 - * Reset FPU context 78 + * Reset FPU state and context 85 79 * frm: round to nearest, ties to even (IEEE default) 86 80 * fflags: accrued exceptions cleared 87 81 */ 82 + fstate_off(current, task_pt_regs(current)); 88 83 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate)); 89 84 #endif 90 85 }
+2 -3
arch/sh/kernel/disassemble.c
··· 475 475 printk("dbr"); 476 476 break; 477 477 case FD_REG_N: 478 - if (0) 479 - goto d_reg_n; 480 478 case F_REG_N: 481 479 printk("fr%d", rn); 482 480 break; ··· 486 488 printk("xd%d", rn & ~1); 487 489 break; 488 490 } 489 - d_reg_n: 491 + /* else, fall through */ 490 492 case D_REG_N: 491 493 printk("dr%d", rn); 492 494 break; ··· 495 497 printk("xd%d", rm & ~1); 496 498 break; 497 499 } 500 + /* else, fall through */ 498 501 case D_REG_M: 499 502 printk("dr%d", rm); 500 503 break;
+1
arch/sh/kernel/hw_breakpoint.c
··· 157 157 switch (sh_type) { 158 158 case SH_BREAKPOINT_READ: 159 159 *gen_type = HW_BREAKPOINT_R; 160 + break; 160 161 case SH_BREAKPOINT_WRITE: 161 162 *gen_type = HW_BREAKPOINT_W; 162 163 break;
+48 -15
arch/x86/include/asm/bootparam_utils.h
··· 18 18 * Note: efi_info is commonly left uninitialized, but that field has a 19 19 * private magic, so it is better to leave it unchanged. 20 20 */ 21 + 22 + #define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); }) 23 + 24 + #define BOOT_PARAM_PRESERVE(struct_member) \ 25 + { \ 26 + .start = offsetof(struct boot_params, struct_member), \ 27 + .len = sizeof_mbr(struct boot_params, struct_member), \ 28 + } 29 + 30 + struct boot_params_to_save { 31 + unsigned int start; 32 + unsigned int len; 33 + }; 34 + 21 35 static void sanitize_boot_params(struct boot_params *boot_params) 22 36 { 23 37 /* ··· 49 35 * problems again. 50 36 */ 51 37 if (boot_params->sentinel) { 52 - /* fields in boot_params are left uninitialized, clear them */ 53 - boot_params->acpi_rsdp_addr = 0; 54 - memset(&boot_params->ext_ramdisk_image, 0, 55 - (char *)&boot_params->efi_info - 56 - (char *)&boot_params->ext_ramdisk_image); 57 - memset(&boot_params->kbd_status, 0, 58 - (char *)&boot_params->hdr - 59 - (char *)&boot_params->kbd_status); 60 - memset(&boot_params->_pad7[0], 0, 61 - (char *)&boot_params->edd_mbr_sig_buffer[0] - 62 - (char *)&boot_params->_pad7[0]); 63 - memset(&boot_params->_pad8[0], 0, 64 - (char *)&boot_params->eddbuf[0] - 65 - (char *)&boot_params->_pad8[0]); 66 - memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); 38 + static struct boot_params scratch; 39 + char *bp_base = (char *)boot_params; 40 + char *save_base = (char *)&scratch; 41 + int i; 42 + 43 + const struct boot_params_to_save to_save[] = { 44 + BOOT_PARAM_PRESERVE(screen_info), 45 + BOOT_PARAM_PRESERVE(apm_bios_info), 46 + BOOT_PARAM_PRESERVE(tboot_addr), 47 + BOOT_PARAM_PRESERVE(ist_info), 48 + BOOT_PARAM_PRESERVE(acpi_rsdp_addr), 49 + BOOT_PARAM_PRESERVE(hd0_info), 50 + BOOT_PARAM_PRESERVE(hd1_info), 51 + BOOT_PARAM_PRESERVE(sys_desc_table), 52 + BOOT_PARAM_PRESERVE(olpc_ofw_header), 53 + BOOT_PARAM_PRESERVE(efi_info), 54 + BOOT_PARAM_PRESERVE(alt_mem_k), 55 + BOOT_PARAM_PRESERVE(scratch), 56 + BOOT_PARAM_PRESERVE(e820_entries), 57 + BOOT_PARAM_PRESERVE(eddbuf_entries), 58 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), 59 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), 60 + BOOT_PARAM_PRESERVE(e820_table), 61 + BOOT_PARAM_PRESERVE(eddbuf), 62 + }; 63 + 64 + memset(&scratch, 0, sizeof(scratch)); 65 + 66 + for (i = 0; i < ARRAY_SIZE(to_save); i++) { 67 + memcpy(save_base + to_save[i].start, 68 + bp_base + to_save[i].start, to_save[i].len); 69 + } 70 + 71 + memcpy(boot_params, save_base, sizeof(*boot_params)); 67 72 } 68 73 } 69 74
+2 -1
arch/x86/kernel/apic/probe_32.c
··· 184 184 def_to_bigsmp = 0; 185 185 break; 186 186 } 187 - /* If P4 and above fall through */ 187 + /* P4 and above */ 188 + /* fall through */ 188 189 case X86_VENDOR_HYGON: 189 190 case X86_VENDOR_AMD: 190 191 def_to_bigsmp = 1;
+38 -1
arch/x86/kernel/cpu/umwait.c
··· 18 18 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); 19 19 20 20 /* 21 + * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by 22 + * hardware or BIOS before kernel boot. 23 + */ 24 + static u32 orig_umwait_control_cached __ro_after_init; 25 + 26 + /* 21 27 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in 22 28 * the sysfs write functions. 23 29 */ ··· 55 49 local_irq_disable(); 56 50 umwait_update_control_msr(NULL); 57 51 local_irq_enable(); 52 + return 0; 53 + } 54 + 55 + /* 56 + * The CPU hotplug callback sets the control MSR to the original control 57 + * value. 58 + */ 59 + static int umwait_cpu_offline(unsigned int cpu) 60 + { 61 + /* 62 + * This code is protected by the CPU hotplug already and 63 + * orig_umwait_control_cached is never changed after it caches 64 + * the original control MSR value in umwait_init(). So there 65 + * is no race condition here. 66 + */ 67 + wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0); 68 + 58 69 return 0; 59 70 } 60 71 ··· 208 185 if (!boot_cpu_has(X86_FEATURE_WAITPKG)) 209 186 return -ENODEV; 210 187 188 + /* 189 + * Cache the original control MSR value before the control MSR is 190 + * changed. This is the only place where orig_umwait_control_cached 191 + * is modified. 192 + */ 193 + rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); 194 + 211 195 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", 212 - umwait_cpu_online, NULL); 196 + umwait_cpu_online, umwait_cpu_offline); 197 + if (ret < 0) { 198 + /* 199 + * On failure, the control MSR on all CPUs has the 200 + * original control value. 201 + */ 202 + return ret; 203 + } 213 204 214 205 register_syscore_ops(&umwait_syscore_ops); 215 206
+3 -2
arch/x86/math-emu/errors.c
··· 178 178 for (i = 0; i < 8; i++) { 179 179 FPU_REG *r = &st(i); 180 180 u_char tagi = FPU_gettagi(i); 181 + 181 182 switch (tagi) { 182 183 case TAG_Empty: 183 184 continue; 184 - break; 185 185 case TAG_Zero: 186 186 case TAG_Special: 187 + /* Update tagi for the printk below */ 187 188 tagi = FPU_Special(r); 189 + /* fall through */ 188 190 case TAG_Valid: 189 191 printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, 190 192 getsign(r) ? '-' : '+', ··· 200 198 printk("Whoops! Error in errors.c: tag%d is %d ", i, 201 199 tagi); 202 200 continue; 203 - break; 204 201 } 205 202 printk("%s\n", tag_desc[(int)(unsigned)tagi]); 206 203 }
+1 -1
arch/x86/math-emu/fpu_trig.c
··· 1352 1352 case TW_Denormal: 1353 1353 if (denormal_operand() < 0) 1354 1354 return; 1355 - 1355 + /* fall through */ 1356 1356 case TAG_Zero: 1357 1357 case TAG_Valid: 1358 1358 setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
+1
arch/xtensa/kernel/setup.c
··· 511 511 "add %2, %2, %7\n\t" 512 512 "addi %0, %0, -1\n\t" 513 513 "bnez %0, 1b\n\t" 514 + "isync\n\t" 514 515 /* Jump to identity mapping */ 515 516 "jx %3\n" 516 517 "2:\n\t"
+2 -8
block/blk-mq.c
··· 1958 1958 rq = blk_mq_get_request(q, bio, &data); 1959 1959 if (unlikely(!rq)) { 1960 1960 rq_qos_cleanup(q, bio); 1961 - 1962 - cookie = BLK_QC_T_NONE; 1963 - if (bio->bi_opf & REQ_NOWAIT_INLINE) 1964 - cookie = BLK_QC_T_EAGAIN; 1965 - else if (bio->bi_opf & REQ_NOWAIT) 1961 + if (bio->bi_opf & REQ_NOWAIT) 1966 1962 bio_wouldblock_error(bio); 1967 - return cookie; 1963 + return BLK_QC_T_NONE; 1968 1964 } 1969 1965 1970 1966 trace_block_getrq(q, bio, bio->bi_opf); ··· 2661 2665 { 2662 2666 struct blk_mq_hw_ctx *hctx, *next; 2663 2667 int i; 2664 - 2665 - cancel_delayed_work_sync(&q->requeue_work); 2666 2668 2667 2669 queue_for_each_hw_ctx(q, hctx, i) 2668 2670 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
+3
block/blk-sysfs.c
··· 892 892 893 893 blk_free_queue_stats(q->stats); 894 894 895 + if (queue_is_mq(q)) 896 + cancel_delayed_work_sync(&q->requeue_work); 897 + 895 898 blk_exit_queue(q); 896 899 897 900 blk_queue_free_zone_bitmaps(q);
+5
drivers/auxdisplay/Kconfig
··· 448 448 choice 449 449 prompt "Backlight initial state" 450 450 default CHARLCD_BL_FLASH 451 + ---help--- 452 + Select the initial backlight state on boot or module load. 453 + 454 + Previously, there was no option for this: the backlight flashed 455 + briefly on init. Now you can also turn it off/on. 451 456 452 457 config CHARLCD_BL_OFF 453 458 bool "Off"
+1 -1
drivers/auxdisplay/charlcd.c
··· 20 20 21 21 #include <generated/utsrelease.h> 22 22 23 - #include <misc/charlcd.h> 23 + #include "charlcd.h" 24 24 25 25 #define LCD_MINOR 156 26 26
+1 -2
drivers/auxdisplay/hd44780.c
··· 14 14 #include <linux/property.h> 15 15 #include <linux/slab.h> 16 16 17 - #include <misc/charlcd.h> 18 - 17 + #include "charlcd.h" 19 18 20 19 enum hd44780_pin { 21 20 /* Order does matter due to writing to GPIO array subsets! */
+3 -1
drivers/auxdisplay/panel.c
··· 55 55 #include <linux/io.h> 56 56 #include <linux/uaccess.h> 57 57 58 - #include <misc/charlcd.h> 58 + #include "charlcd.h" 59 59 60 60 #define KEYPAD_MINOR 185 61 61 ··· 1617 1617 return; 1618 1618 1619 1619 err_lcd_unreg: 1620 + if (scan_timer.function) 1621 + del_timer_sync(&scan_timer); 1620 1622 if (lcd.enabled) 1621 1623 charlcd_unregister(lcd.charlcd); 1622 1624 err_unreg_device:
+1 -1
drivers/base/regmap/Kconfig
··· 44 44 45 45 config REGMAP_SOUNDWIRE 46 46 tristate 47 - depends on SOUNDWIRE_BUS 47 + depends on SOUNDWIRE 48 48 49 49 config REGMAP_SCCB 50 50 tristate
+3 -3
drivers/block/xen-blkback/xenbus.c
··· 965 965 } 966 966 } 967 967 968 + err = -ENOMEM; 968 969 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 969 970 req = kzalloc(sizeof(*req), GFP_KERNEL); 970 971 if (!req) ··· 988 987 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); 989 988 if (err) { 990 989 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); 991 - return err; 990 + goto fail; 992 991 } 993 992 994 993 return 0; ··· 1008 1007 } 1009 1008 kfree(req); 1010 1009 } 1011 - return -ENOMEM; 1012 - 1010 + return err; 1013 1011 } 1014 1012 1015 1013 static int connect_ring(struct backend_info *be)
+1 -1
drivers/cpufreq/cpufreq.c
··· 2528 2528 } 2529 2529 2530 2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); 2531 - if (ret) 2531 + if (ret < 0) 2532 2532 break; 2533 2533 } 2534 2534
+1 -1
drivers/dma/dw-edma/dw-edma-core.h
··· 50 50 51 51 struct dw_edma_region { 52 52 phys_addr_t paddr; 53 - dma_addr_t vaddr; 53 + void __iomem *vaddr; 54 54 size_t sz; 55 55 }; 56 56
+9 -9
drivers/dma/dw-edma/dw-edma-pcie.c
··· 130 130 chip->id = pdev->devfn; 131 131 chip->irq = pdev->irq; 132 132 133 - dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; 133 + dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar]; 134 134 dw->rg_region.vaddr += pdata->rg_off; 135 135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; 136 136 dw->rg_region.paddr += pdata->rg_off; 137 137 dw->rg_region.sz = pdata->rg_sz; 138 138 139 - dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; 139 + dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar]; 140 140 dw->ll_region.vaddr += pdata->ll_off; 141 141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; 142 142 dw->ll_region.paddr += pdata->ll_off; 143 143 dw->ll_region.sz = pdata->ll_sz; 144 144 145 - dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; 145 + dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar]; 146 146 dw->dt_region.vaddr += pdata->dt_off; 147 147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; 148 148 dw->dt_region.paddr += pdata->dt_off; ··· 158 158 pci_dbg(pdev, "Mode:\t%s\n", 159 159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); 160 160 161 - pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 161 + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", 162 162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz, 163 - &dw->rg_region.vaddr, &dw->rg_region.paddr); 163 + dw->rg_region.vaddr, &dw->rg_region.paddr); 164 164 165 - pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 165 + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", 166 166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz, 167 - &dw->ll_region.vaddr, &dw->ll_region.paddr); 167 + dw->ll_region.vaddr, &dw->ll_region.paddr); 168 168 169 - pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 169 + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", 170 170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz, 171 - &dw->dt_region.vaddr, &dw->dt_region.paddr); 171 + dw->dt_region.vaddr, &dw->dt_region.paddr); 172 172 173 173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); 174 174
+15 -19
drivers/dma/dw-edma/dw-edma-v0-core.c
··· 25 25 26 26 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) 27 27 { 28 - return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; 28 + return dw->rg_region.vaddr; 29 29 } 30 30 31 31 #define SET(dw, name, value) \ ··· 192 192 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) 193 193 { 194 194 struct dw_edma_burst *child; 195 - struct dw_edma_v0_lli *lli; 196 - struct dw_edma_v0_llp *llp; 195 + struct dw_edma_v0_lli __iomem *lli; 196 + struct dw_edma_v0_llp __iomem *llp; 197 197 u32 control = 0, i = 0; 198 - u64 sar, dar, addr; 199 198 int j; 200 199 201 - lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; 200 + lli = chunk->ll_region.vaddr; 202 201 203 202 if (chunk->cb) 204 203 control = DW_EDMA_V0_CB; ··· 213 214 /* Transfer size */ 214 215 SET_LL(&lli[i].transfer_size, child->sz); 215 216 /* SAR - low, high */ 216 - sar = cpu_to_le64(child->sar); 217 - SET_LL(&lli[i].sar_low, lower_32_bits(sar)); 218 - SET_LL(&lli[i].sar_high, upper_32_bits(sar)); 217 + SET_LL(&lli[i].sar_low, lower_32_bits(child->sar)); 218 + SET_LL(&lli[i].sar_high, upper_32_bits(child->sar)); 219 219 /* DAR - low, high */ 220 - dar = cpu_to_le64(child->dar); 221 - SET_LL(&lli[i].dar_low, lower_32_bits(dar)); 222 - SET_LL(&lli[i].dar_high, upper_32_bits(dar)); 220 + SET_LL(&lli[i].dar_low, lower_32_bits(child->dar)); 221 + SET_LL(&lli[i].dar_high, upper_32_bits(child->dar)); 223 222 i++; 224 223 } 225 224 226 - llp = (struct dw_edma_v0_llp *)&lli[i]; 225 + llp = (void __iomem *)&lli[i]; 227 226 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; 228 227 if (!chunk->cb) 229 228 control |= DW_EDMA_V0_CB; ··· 229 232 /* Channel control */ 230 233 SET_LL(&llp->control, control); 231 234 /* Linked list - low, high */ 232 - addr = cpu_to_le64(chunk->ll_region.paddr); 233 - SET_LL(&llp->llp_low, lower_32_bits(addr)); 234 - SET_LL(&llp->llp_high, upper_32_bits(addr)); 235 + SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr)); 236 + SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr)); 235 237 } 236 238 237 239 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ··· 238 242 struct dw_edma_chan *chan = chunk->chan; 239 243 struct dw_edma *dw = chan->chip->dw; 240 244 u32 tmp; 241 - u64 llp; 242 245 243 246 dw_edma_v0_core_write_chunk(chunk); 244 247 ··· 257 262 SET_CH(dw, chan->dir, chan->id, ch_control1, 258 263 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); 259 264 /* Linked list - low, high */ 260 - llp = cpu_to_le64(chunk->ll_region.paddr); 261 - SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); 262 - SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); 265 + SET_CH(dw, chan->dir, chan->id, llp_low, 266 + lower_32_bits(chunk->ll_region.paddr)); 267 + SET_CH(dw, chan->dir, chan->id, llp_high, 268 + upper_32_bits(chunk->ll_region.paddr)); 263 269 } 264 270 /* Doorbell */ 265 271 SET_RW(dw, chan->dir, doorbell,
+15 -14
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
··· 14 14 #include "dw-edma-core.h" 15 15 16 16 #define REGS_ADDR(name) \ 17 - ((dma_addr_t *)&regs->name) 17 + ((void __force *)&regs->name) 18 18 #define REGISTER(name) \ 19 19 { #name, REGS_ADDR(name) } 20 20 ··· 40 40 41 41 static struct dentry *base_dir; 42 42 static struct dw_edma *dw; 43 - static struct dw_edma_v0_regs *regs; 43 + static struct dw_edma_v0_regs __iomem *regs; 44 44 45 45 static struct { 46 - void *start; 47 - void *end; 46 + void __iomem *start; 47 + void __iomem *end; 48 48 } lim[2][EDMA_V0_MAX_NR_CH]; 49 49 50 50 struct debugfs_entries { 51 - char name[24]; 51 + const char *name; 52 52 dma_addr_t *reg; 53 53 }; 54 54 55 55 static int dw_edma_debugfs_u32_get(void *data, u64 *val) 56 56 { 57 + void __iomem *reg = (void __force __iomem *)data; 57 58 if (dw->mode == EDMA_MODE_LEGACY && 58 - data >= (void *)&regs->type.legacy.ch) { 59 - void *ptr = (void *)&regs->type.legacy.ch; 59 + reg >= (void __iomem *)&regs->type.legacy.ch) { 60 + void __iomem *ptr = &regs->type.legacy.ch; 60 61 u32 viewport_sel = 0; 61 62 unsigned long flags; 62 63 u16 ch; 63 64 64 65 for (ch = 0; ch < dw->wr_ch_cnt; ch++) 65 - if (lim[0][ch].start >= data && data < lim[0][ch].end) { 66 - ptr += (data - lim[0][ch].start); 66 + if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { 67 + ptr += (reg - lim[0][ch].start); 67 68 goto legacy_sel_wr; 68 69 } 69 70 70 71 for (ch = 0; ch < dw->rd_ch_cnt; ch++) 71 - if (lim[1][ch].start >= data && data < lim[1][ch].end) { 72 - ptr += (data - lim[1][ch].start); 72 + if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { 73 + ptr += (reg - lim[1][ch].start); 73 74 goto legacy_sel_rd; 74 75 } 75 76 ··· 87 86 88 87 raw_spin_unlock_irqrestore(&dw->lock, flags); 89 88 } else { 90 - *val = readl(data); 89 + *val = readl(reg); 91 90 } 92 91 93 92 return 0; ··· 106 105 } 107 106 } 108 107 109 - static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, 108 + static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, 110 109 struct dentry *dir) 111 110 { 112 111 int nr_entries; ··· 289 288 if (!dw) 290 289 return; 291 290 292 - regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; 291 + regs = dw->rg_region.vaddr; 293 292 if (!regs) 294 293 return; 295 294
+2 -2
drivers/dma/ste_dma40.c
··· 142 142 * when the DMA hw is powered off. 143 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 144 144 */ 145 - static u32 d40_backup_regs[] = { 145 + static __maybe_unused u32 d40_backup_regs[] = { 146 146 D40_DREG_LCPA, 147 147 D40_DREG_LCLA, 148 148 D40_DREG_PRMSE, ··· 211 211 212 212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 213 213 214 - static u32 d40_backup_regs_chan[] = { 214 + static __maybe_unused u32 d40_backup_regs_chan[] = { 215 215 D40_CHAN_REG_SSCFG, 216 216 D40_CHAN_REG_SSELT, 217 217 D40_CHAN_REG_SSPTR,
+1 -1
drivers/dma/stm32-mdma.c
··· 1366 1366 1367 1367 chan = &dmadev->chan[id]; 1368 1368 if (!chan) { 1369 - dev_err(chan2dev(chan), "MDMA channel not initialized\n"); 1369 + dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n"); 1370 1370 goto exit; 1371 1371 } 1372 1372
+2 -2
drivers/dma/tegra210-adma.c
··· 712 712 return chan; 713 713 } 714 714 715 - static int tegra_adma_runtime_suspend(struct device *dev) 715 + static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev) 716 716 { 717 717 struct tegra_adma *tdma = dev_get_drvdata(dev); 718 718 struct tegra_adma_chan_regs *ch_reg; ··· 744 744 return 0; 745 745 } 746 746 747 - static int tegra_adma_runtime_resume(struct device *dev) 747 + static int __maybe_unused tegra_adma_runtime_resume(struct device *dev) 748 748 { 749 749 struct tegra_adma *tdma = dev_get_drvdata(dev); 750 750 struct tegra_adma_chan_regs *ch_reg;
+2 -2
drivers/dma/ti/omap-dma.c
··· 1234 1234 if (src_icg) { 1235 1235 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1236 1236 d->ei = 1; 1237 - d->fi = src_icg; 1237 + d->fi = src_icg + 1; 1238 1238 } else if (xt->src_inc) { 1239 1239 d->ccr |= CCR_SRC_AMODE_POSTINC; 1240 1240 d->fi = 0; ··· 1249 1249 if (dst_icg) { 1250 1250 d->ccr |= CCR_DST_AMODE_DBLIDX; 1251 1251 sg->ei = 1; 1252 - sg->fi = dst_icg; 1252 + sg->fi = dst_icg + 1; 1253 1253 } else if (xt->dst_inc) { 1254 1254 d->ccr |= CCR_DST_AMODE_POSTINC; 1255 1255 sg->fi = 0;
+27 -11
drivers/firmware/efi/libstub/efi-stub-helper.c
··· 927 927 return status; 928 928 } 929 929 930 + #define GET_EFI_CONFIG_TABLE(bits) \ 931 + static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ 932 + efi_guid_t guid) \ 933 + { \ 934 + efi_system_table_##bits##_t *sys_table; \ 935 + efi_config_table_##bits##_t *tables; \ 936 + int i; \ 937 + \ 938 + sys_table = (typeof(sys_table))_sys_table; \ 939 + tables = (typeof(tables))(unsigned long)sys_table->tables; \ 940 + \ 941 + for (i = 0; i < sys_table->nr_tables; i++) { \ 942 + if (efi_guidcmp(tables[i].guid, guid) != 0) \ 943 + continue; \ 944 + \ 945 + return (void *)(unsigned long)tables[i].table; \ 946 + } \ 947 + \ 948 + return NULL; \ 949 + } 950 + GET_EFI_CONFIG_TABLE(32) 951 + GET_EFI_CONFIG_TABLE(64) 952 + 930 953 void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) 931 954 { 932 - efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables; 933 - int i; 934 - 935 - for (i = 0; i < sys_table->nr_tables; i++) { 936 - if (efi_guidcmp(tables[i].guid, guid) != 0) 937 - continue; 938 - 939 - return (void *)tables[i].table; 940 - } 941 - 942 - return NULL; 955 + if (efi_is_64bit()) 956 + return get_efi_config_table64(sys_table, guid); 957 + else 958 + return get_efi_config_table32(sys_table, guid); 943 959 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 4869 4869 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4870 4870 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4871 4871 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4872 - WREG32(mmSQ_CMD, value); 4872 + WREG32_SOC15(GC, 0, mmSQ_CMD, value); 4873 4873 } 4874 4874 4875 4875 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+6 -5
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 23 23 */ 24 24 25 25 #include <linux/slab.h> 26 + #include <linux/mm.h> 26 27 27 28 #include "dm_services.h" 28 29 ··· 1172 1171 1173 1172 struct dc_state *dc_create_state(struct dc *dc) 1174 1173 { 1175 - struct dc_state *context = kzalloc(sizeof(struct dc_state), 1176 - GFP_KERNEL); 1174 + struct dc_state *context = kvzalloc(sizeof(struct dc_state), 1175 + GFP_KERNEL); 1177 1176 1178 1177 if (!context) 1179 1178 return NULL; ··· 1193 1192 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1194 1193 { 1195 1194 int i, j; 1196 - struct dc_state *new_ctx = kmemdup(src_ctx, 1197 - sizeof(struct dc_state), GFP_KERNEL); 1195 + struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 1198 1196 1199 1197 if (!new_ctx) 1200 1198 return NULL; 1199 + memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 1201 1200 1202 1201 for (i = 0; i < MAX_PIPES; i++) { 1203 1202 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; ··· 1231 1230 { 1232 1231 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1233 1232 dc_resource_state_destruct(context); 1234 - kfree(context); 1233 + kvfree(context); 1235 1234 } 1236 1235 1237 1236 void dc_release_state(struct dc_state *context)
+4 -1
drivers/gpu/drm/ast/ast_main.c
··· 131 131 132 132 133 133 /* Enable extended register access */ 134 - ast_enable_mmio(dev); 135 134 ast_open_key(ast); 135 + ast_enable_mmio(dev); 136 136 137 137 /* Find out whether P2A works or whether to use device-tree */ 138 138 ast_detect_config_mode(dev, &scu_rev); ··· 575 575 void ast_driver_unload(struct drm_device *dev) 576 576 { 577 577 struct ast_private *ast = dev->dev_private; 578 + 579 + /* enable standard VGA decode */ 580 + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); 578 581 579 582 ast_release_firmware(dev); 580 583 kfree(ast->dp501_fw_addr);
+1 -1
drivers/gpu/drm/ast/ast_mode.c
··· 604 604 return -EINVAL; 605 605 ast_open_key(ast); 606 606 607 - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 607 + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); 608 608 609 609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); 610 610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+1 -1
drivers/gpu/drm/ast/ast_post.c
··· 46 46 { 47 47 struct ast_private *ast = dev->dev_private; 48 48 49 - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 49 + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); 50 50 } 51 51 52 52
+2 -2
drivers/gpu/drm/i915/gvt/scheduler.c
··· 1528 1528 if (!intel_gvt_ggtt_validate_range(vgpu, 1529 1529 workload->wa_ctx.indirect_ctx.guest_gma, 1530 1530 workload->wa_ctx.indirect_ctx.size)) { 1531 - kmem_cache_free(s->workloads, workload); 1532 1531 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", 1533 1532 workload->wa_ctx.indirect_ctx.guest_gma); 1533 + kmem_cache_free(s->workloads, workload); 1534 1534 return ERR_PTR(-EINVAL); 1535 1535 } 1536 1536 } ··· 1542 1542 if (!intel_gvt_ggtt_validate_range(vgpu, 1543 1543 workload->wa_ctx.per_ctx.guest_gma, 1544 1544 CACHELINE_BYTES)) { 1545 - kmem_cache_free(s->workloads, workload); 1546 1545 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", 1547 1546 workload->wa_ctx.per_ctx.guest_gma); 1547 + kmem_cache_free(s->workloads, workload); 1548 1548 return ERR_PTR(-EINVAL); 1549 1549 } 1550 1550 }
+13 -9
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 771 771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 772 772 int slots; 773 773 774 - /* When restoring duplicated states, we need to make sure that the 775 - * bw remains the same and avoid recalculating it, as the connector's 776 - * bpc may have changed after the state was duplicated 777 - */ 778 - if (!state->duplicated) 779 - asyh->dp.pbn = 780 - drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, 781 - connector->display_info.bpc * 3); 774 + if (crtc_state->mode_changed || crtc_state->connectors_changed) { 775 + /* 776 + * When restoring duplicated states, we need to make sure that 777 + * the bw remains the same and avoid recalculating it, as the 778 + * connector's bpc may have changed after the state was 779 + * duplicated 780 + */ 781 + if (!state->duplicated) { 782 + const int bpp = connector->display_info.bpc * 3; 783 + const int clock = crtc_state->adjusted_mode.clock; 782 784 783 - if (crtc_state->mode_changed) { 785 + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); 786 + } 787 + 784 788 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, 785 789 mstc->port, 786 790 asyh->dp.pbn);
+2 -2
drivers/gpu/drm/scheduler/sched_entity.c
··· 95 95 rmb(); /* for list_empty to work without lock */ 96 96 97 97 if (list_empty(&entity->list) || 98 - spsc_queue_peek(&entity->job_queue) == NULL) 98 + spsc_queue_count(&entity->job_queue) == 0) 99 99 return true; 100 100 101 101 return false; ··· 281 281 /* Consumption of existing IBs wasn't completed. Forcefully 282 282 * remove them here. 283 283 */ 284 - if (spsc_queue_peek(&entity->job_queue)) { 284 + if (spsc_queue_count(&entity->job_queue)) { 285 285 if (sched) { 286 286 /* Park the kernel for a moment to make sure it isn't processing 287 287 * our enity.
+1 -1
drivers/hv/hv_trace.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 3 3 #undef TRACE_SYSTEM 4 4 #define TRACE_SYSTEM hyperv
+1 -1
drivers/hwtracing/intel_th/msu.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures 4 4 *
+1 -1
drivers/hwtracing/intel_th/pti.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * Intel(R) Trace Hub PTI output data structures 4 4 *
+12 -4
drivers/i2c/busses/i2c-emev2.c
··· 69 69 struct completion msg_done; 70 70 struct clk *sclk; 71 71 struct i2c_client *slave; 72 + int irq; 72 73 }; 73 74 74 75 static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) ··· 340 339 341 340 writeb(0, priv->base + I2C_OFS_SVA0); 342 341 342 + /* 343 + * Wait for interrupt to finish. New slave irqs cannot happen because we 344 + * cleared the slave address and, thus, only extension codes will be 345 + * detected which do not use the slave ptr. 346 + */ 347 + synchronize_irq(priv->irq); 343 348 priv->slave = NULL; 344 349 345 350 return 0; ··· 362 355 { 363 356 struct em_i2c_device *priv; 364 357 struct resource *r; 365 - int irq, ret; 358 + int ret; 366 359 367 360 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 368 361 if (!priv) ··· 397 390 398 391 em_i2c_reset(&priv->adap); 399 392 400 - irq = platform_get_irq(pdev, 0); 401 - ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, 393 + priv->irq = platform_get_irq(pdev, 0); 394 + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, 402 395 "em_i2c", priv); 403 396 if (ret) 404 397 goto err_clk; ··· 408 401 if (ret) 409 402 goto err_clk; 410 403 411 - dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); 404 + dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, 405 + priv->irq); 412 406 413 407 return 0; 414 408
+6 -12
drivers/i2c/busses/i2c-imx.c
··· 273 273 } 274 274 275 275 /* Functions for DMA support */ 276 - static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, 277 - dma_addr_t phy_addr) 276 + static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, 277 + dma_addr_t phy_addr) 278 278 { 279 279 struct imx_i2c_dma *dma; 280 280 struct dma_slave_config dma_sconfig; ··· 283 283 284 284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 285 285 if (!dma) 286 - return -ENOMEM; 286 + return; 287 287 288 288 dma->chan_tx = dma_request_chan(dev, "tx"); 289 289 if (IS_ERR(dma->chan_tx)) { ··· 328 328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", 329 329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); 330 330 331 - return 0; 331 + return; 332 332 333 333 fail_rx: 334 334 dma_release_channel(dma->chan_rx); ··· 336 336 dma_release_channel(dma->chan_tx); 337 337 fail_al: 338 338 devm_kfree(dev, dma); 339 - /* return successfully if there is no dma support */ 340 - return ret == -ENODEV ? 0 : ret; 341 339 } 342 340 343 341 static void i2c_imx_dma_callback(void *arg) ··· 1163 1165 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); 1164 1166 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", 1165 1167 i2c_imx->adapter.name); 1168 + dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1166 1169 1167 1170 /* Init DMA config if supported */ 1168 - ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1169 - if (ret < 0) 1170 - goto del_adapter; 1171 + i2c_imx_dma_request(i2c_imx, phy_addr); 1171 1172 1172 - dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1173 1173 return 0; /* Return OK */ 1174 1174 1175 - del_adapter: 1176 - i2c_del_adapter(&i2c_imx->adapter); 1177 1175 clk_notifier_unregister: 1178 1176 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1179 1177 rpm_disable:
+7 -4
drivers/i2c/busses/i2c-rcar.c
··· 139 139 enum dma_data_direction dma_direction; 140 140 141 141 struct reset_control *rstc; 142 + int irq; 142 143 }; 143 144 144 145 #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) ··· 862 861 863 862 WARN_ON(!priv->slave); 864 863 864 + /* disable irqs and ensure none is running before clearing ptr */ 865 865 rcar_i2c_write(priv, ICSIER, 0); 866 866 rcar_i2c_write(priv, ICSCR, 0); 867 867 868 + synchronize_irq(priv->irq); 868 869 priv->slave = NULL; 869 870 870 871 pm_runtime_put(rcar_i2c_priv_to_dev(priv)); ··· 921 918 struct i2c_adapter *adap; 922 919 struct device *dev = &pdev->dev; 923 920 struct i2c_timings i2c_t; 924 - int irq, ret; 921 + int ret; 925 922 926 923 /* Otherwise logic will break because some bytes must always use PIO */ 927 924 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); ··· 987 984 pm_runtime_put(dev); 988 985 989 986 990 - irq = platform_get_irq(pdev, 0); 991 - ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); 987 + priv->irq = platform_get_irq(pdev, 0); 988 + ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv); 992 989 if (ret < 0) { 993 - dev_err(dev, "cannot get irq %d\n", irq); 990 + dev_err(dev, "cannot get irq %d\n", priv->irq); 994 991 goto out_pm_disable; 995 992 } 996 993
+1 -1
drivers/i2c/busses/i2c-stm32.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * i2c-stm32.h 4 4 *
+1 -1
drivers/iio/adc/max9611.c
··· 480 480 if (ret) 481 481 return ret; 482 482 483 - regval = ret & MAX9611_TEMP_MASK; 483 + regval &= MAX9611_TEMP_MASK; 484 484 485 485 if ((regval > MAX9611_TEMP_MAX_POS && 486 486 regval < MAX9611_TEMP_MIN_NEG) ||
+4 -4
drivers/iio/frequency/adf4371.c
··· 276 276 st->buf[0] = st->integer >> 8; 277 277 st->buf[1] = 0x40; /* REG12 default */ 278 278 st->buf[2] = 0x00; 279 - st->buf[3] = st->fract2 & 0xFF; 280 - st->buf[4] = st->fract2 >> 7; 281 - st->buf[5] = st->fract2 >> 15; 279 + st->buf[3] = st->fract1 & 0xFF; 280 + st->buf[4] = st->fract1 >> 8; 281 + st->buf[5] = st->fract1 >> 16; 282 282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) | 283 - ADF4371_FRAC1WORD(st->fract1 >> 23); 283 + ADF4371_FRAC1WORD(st->fract1 >> 24); 284 284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7); 285 285 st->buf[8] = st->mod2 & 0xFF; 286 286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
+6
drivers/infiniband/core/counters.c
··· 38 38 int ret; 39 39 40 40 port_counter = &dev->port_data[port].port_counter; 41 + if (!port_counter->hstats) 42 + return -EOPNOTSUPP; 43 + 41 44 mutex_lock(&port_counter->lock); 42 45 if (on) { 43 46 ret = __counter_set_mode(&port_counter->mode, ··· 511 508 512 509 if (!rdma_is_port_valid(dev, port)) 513 510 return -EINVAL; 511 + 512 + if (!dev->port_data[port].port_counter.hstats) 513 + return -EOPNOTSUPP; 514 514 515 515 qp = rdma_counter_get_qp(dev, qp_num); 516 516 if (!qp)
+6 -2
drivers/infiniband/core/nldev.c
··· 1952 1952 1953 1953 if (fill_nldev_handle(msg, device) || 1954 1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1955 - nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) 1955 + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { 1956 + ret = -EMSGSIZE; 1956 1957 goto err_msg; 1958 + } 1957 1959 1958 1960 if ((mode == RDMA_COUNTER_MODE_AUTO) && 1959 - nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) 1961 + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { 1962 + ret = -EMSGSIZE; 1960 1963 goto err_msg; 1964 + } 1961 1965 1962 1966 nlmsg_end(msg, nlh); 1963 1967 ib_device_put(device);
-4
drivers/infiniband/core/umem_odp.c
··· 112 112 * prevent any further fault handling on this MR. 113 113 */ 114 114 ib_umem_notifier_start_account(umem_odp); 115 - umem_odp->dying = 1; 116 - /* Make sure that the fact the umem is dying is out before we release 117 - * all pending page faults. */ 118 - smp_wmb(); 119 115 complete_all(&umem_odp->notifier_completion); 120 116 umem_odp->umem.context->invalidate_range( 121 117 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
+6 -5
drivers/infiniband/hw/mlx5/devx.c
··· 2026 2026 event_sub->eventfd = 2027 2027 eventfd_ctx_fdget(redirect_fd); 2028 2028 2029 - if (IS_ERR(event_sub)) { 2029 + if (IS_ERR(event_sub->eventfd)) { 2030 2030 err = PTR_ERR(event_sub->eventfd); 2031 2031 event_sub->eventfd = NULL; 2032 2032 goto err; ··· 2644 2644 struct devx_async_event_file *ev_file = filp->private_data; 2645 2645 struct devx_event_subscription *event_sub, *event_sub_tmp; 2646 2646 struct devx_async_event_data *entry, *tmp; 2647 + struct mlx5_ib_dev *dev = ev_file->dev; 2647 2648 2648 - mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock); 2649 + mutex_lock(&dev->devx_event_table.event_xa_lock); 2649 2650 /* delete the subscriptions which are related to this FD */ 2650 2651 list_for_each_entry_safe(event_sub, event_sub_tmp, 2651 2652 &ev_file->subscribed_events_list, file_list) { 2652 - devx_cleanup_subscription(ev_file->dev, event_sub); 2653 + devx_cleanup_subscription(dev, event_sub); 2653 2654 if (event_sub->eventfd) 2654 2655 eventfd_ctx_put(event_sub->eventfd); 2655 2656 ··· 2659 2658 kfree_rcu(event_sub, rcu); 2660 2659 } 2661 2660 2662 - mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock); 2661 + mutex_unlock(&dev->devx_event_table.event_xa_lock); 2663 2662 2664 2663 /* free the pending events allocation */ 2665 2664 if (!ev_file->omit_data) { ··· 2671 2670 } 2672 2671 2673 2672 uverbs_close_fd(filp); 2674 - put_device(&ev_file->dev->ib_dev.dev); 2673 + put_device(&dev->ib_dev.dev); 2675 2674 return 0; 2676 2675 } 2677 2676
+8 -14
drivers/infiniband/hw/mlx5/odp.c
··· 579 579 u32 flags) 580 580 { 581 581 int npages = 0, current_seq, page_shift, ret, np; 582 - bool implicit = false; 583 582 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 584 583 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 585 584 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; ··· 593 594 if (IS_ERR(odp)) 594 595 return PTR_ERR(odp); 595 596 mr = odp->private; 596 - implicit = true; 597 597 } else { 598 598 odp = odp_mr; 599 599 } ··· 680 682 681 683 out: 682 684 if (ret == -EAGAIN) { 683 - if (implicit || !odp->dying) { 684 - unsigned long timeout = 685 - msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 685 + unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 686 686 687 - if (!wait_for_completion_timeout( 688 - &odp->notifier_completion, 689 - timeout)) { 690 - mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", 691 - current_seq, odp->notifiers_seq, odp->notifiers_count); 692 - } 693 - } else { 694 - /* The MR is being killed, kill the QP as well. */ 695 - ret = -EFAULT; 687 + if (!wait_for_completion_timeout(&odp->notifier_completion, 688 + timeout)) { 689 + mlx5_ib_warn( 690 + dev, 691 + "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", 692 + current_seq, odp->notifiers_seq, 693 + odp->notifiers_count); 696 694 } 697 695 } 698 696
+1 -1
drivers/infiniband/sw/siw/Kconfig
··· 1 1 config RDMA_SIW 2 2 tristate "Software RDMA over TCP/IP (iWARP) driver" 3 - depends on INET && INFINIBAND && LIBCRC32C && 64BIT 3 + depends on INET && INFINIBAND && LIBCRC32C 4 4 select DMA_VIRT_OPS 5 5 help 6 6 This driver implements the iWARP RDMA transport over
+1 -1
drivers/infiniband/sw/siw/siw.h
··· 214 214 struct siw_cq { 215 215 struct ib_cq base_cq; 216 216 spinlock_t lock; 217 - u64 *notify; 217 + struct siw_cq_ctrl *notify; 218 218 struct siw_cqe *queue; 219 219 u32 cq_put; 220 220 u32 cq_get;
+1 -3
drivers/infiniband/sw/siw/siw_main.c
··· 160 160 161 161 out_err: 162 162 siw_cpu_info.num_nodes = 0; 163 - while (i) { 163 + while (--i >= 0) 164 164 kfree(siw_cpu_info.tx_valid_cpus[i]); 165 - siw_cpu_info.tx_valid_cpus[i--] = NULL; 166 - } 167 165 kfree(siw_cpu_info.tx_valid_cpus); 168 166 siw_cpu_info.tx_valid_cpus = NULL; 169 167
+10 -4
drivers/infiniband/sw/siw/siw_qp.c
··· 1013 1013 */ 1014 1014 static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) 1015 1015 { 1016 - u64 cq_notify; 1016 + u32 cq_notify; 1017 1017 1018 1018 if (!cq->base_cq.comp_handler) 1019 1019 return false; 1020 1020 1021 - cq_notify = READ_ONCE(*cq->notify); 1021 + /* Read application shared notification state */ 1022 + cq_notify = READ_ONCE(cq->notify->flags); 1022 1023 1023 1024 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || 1024 1025 ((cq_notify & SIW_NOTIFY_SOLICITED) && 1025 1026 (flags & SIW_WQE_SOLICITED))) { 1026 - /* dis-arm CQ */ 1027 - smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); 1027 + /* 1028 + * CQ notification is one-shot: Since the 1029 + * current CQE causes user notification, 1030 + * the CQ gets dis-aremd and must be re-aremd 1031 + * by the user for a new notification. 1032 + */ 1033 + WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); 1028 1034 1029 1035 return true; 1030 1036 }
+11 -5
drivers/infiniband/sw/siw/siw_verbs.c
··· 1049 1049 1050 1050 spin_lock_init(&cq->lock); 1051 1051 1052 - cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; 1052 + cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; 1053 1053 1054 1054 if (udata) { 1055 1055 struct siw_uresp_create_cq uresp = {}; ··· 1141 1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1142 1142 1143 1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 1144 - /* CQ event for next solicited completion */ 1145 - smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); 1144 + /* 1145 + * Enable CQ event for next solicited completion. 1146 + * and make it visible to all associated producers. 1147 + */ 1148 + smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); 1146 1149 else 1147 - /* CQ event for any signalled completion */ 1148 - smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); 1150 + /* 1151 + * Enable CQ event for any signalled completion. 1152 + * and make it visible to all associated producers. 1153 + */ 1154 + smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); 1149 1155 1150 1156 if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1151 1157 return cq->cq_put - cq->cq_get;
+2 -2
drivers/iommu/arm-smmu-v3.c
··· 1186 1186 ste_live = true; 1187 1187 break; 1188 1188 case STRTAB_STE_0_CFG_ABORT: 1189 - if (disable_bypass) 1190 - break; 1189 + BUG_ON(!disable_bypass); 1190 + break; 1191 1191 default: 1192 1192 BUG(); /* STE corruption */ 1193 1193 }
+14 -11
drivers/iommu/dma-iommu.c
··· 459 459 { 460 460 struct iommu_domain *domain = iommu_get_dma_domain(dev); 461 461 struct iommu_dma_cookie *cookie = domain->iova_cookie; 462 - size_t iova_off = 0; 462 + struct iova_domain *iovad = &cookie->iovad; 463 + size_t iova_off = iova_offset(iovad, phys); 463 464 dma_addr_t iova; 464 465 465 - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { 466 - iova_off = iova_offset(&cookie->iovad, phys); 467 - size = iova_align(&cookie->iovad, size + iova_off); 468 - } 466 + size = iova_align(iovad, size + iova_off); 469 467 470 468 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 471 469 if (!iova) ··· 572 574 struct iova_domain *iovad = &cookie->iovad; 573 575 bool coherent = dev_is_dma_coherent(dev); 574 576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 575 - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 577 + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 576 578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 577 579 struct page **pages; 578 580 struct sg_table sgt; ··· 762 764 * - and wouldn't make the resulting output segment too long 763 765 */ 764 766 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 765 - (cur_len + s_length <= max_len)) { 767 + (max_len - cur_len >= s_length)) { 766 768 /* ...then concatenate it with the previous one */ 767 769 cur_len += s_length; 768 770 } else { ··· 973 975 return NULL; 974 976 975 977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 976 - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 978 + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 977 979 978 980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 979 981 VM_USERMAP, prot, __builtin_return_address(0)); ··· 1033 1035 unsigned long pfn, off = vma->vm_pgoff; 1034 1036 int ret; 1035 1037 1036 - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1038 + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1037 1039 1038 1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1039 1041 return ret; ··· 1145 1147 if (!msi_page) 1146 1148 return NULL; 1147 1149 1148 - iova = __iommu_dma_map(dev, msi_addr, size, prot); 1149 - if (iova == DMA_MAPPING_ERROR) 1150 + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1151 + if (!iova) 1150 1152 goto out_free_page; 1153 + 1154 + if (iommu_map(domain, iova, msi_addr, size, prot)) 1155 + goto out_free_iova; 1151 1156 1152 1157 INIT_LIST_HEAD(&msi_page->list); 1153 1158 msi_page->phys = msi_addr; ··· 1158 1157 list_add(&msi_page->list, &cookie->msi_page_list); 1159 1158 return msi_page; 1160 1159 1160 + out_free_iova: 1161 + iommu_dma_free_iova(cookie, iova, size); 1161 1162 out_free_page: 1162 1163 kfree(msi_page); 1163 1164 return NULL;
+1 -1
drivers/iommu/intel-iommu-debugfs.c
··· 235 235 tbl_wlk.ctx_entry = context; 236 236 m->private = &tbl_wlk; 237 237 238 - if (pasid_supported(iommu) && is_pasid_enabled(context)) { 238 + if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { 239 239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 240 240 pasid_dir_size = get_pasid_dir_size(context); 241 241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
+9 -2
drivers/iommu/intel-iommu.c
··· 3449 3449 dmar_domain = to_dmar_domain(domain); 3450 3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 3451 3451 } 3452 + dmar_remove_one_dev_info(dev); 3452 3453 get_private_domain_for_dev(dev); 3453 3454 } 3454 3455 ··· 4791 4790 4792 4791 /* free the private domain */ 4793 4792 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && 4794 - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) 4793 + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && 4794 + list_empty(&domain->devices)) 4795 4795 domain_exit(info->domain); 4796 4796 4797 4797 free_devinfo_mem(info); ··· 4805 4803 4806 4804 spin_lock_irqsave(&device_domain_lock, flags); 4807 4805 info = dev->archdata.iommu; 4808 - __dmar_remove_one_dev_info(info); 4806 + if (info) 4807 + __dmar_remove_one_dev_info(info); 4809 4808 spin_unlock_irqrestore(&device_domain_lock, flags); 4810 4809 } 4811 4810 ··· 5284 5281 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { 5285 5282 ret = iommu_request_dm_for_dev(dev); 5286 5283 if (ret) { 5284 + dmar_remove_one_dev_info(dev); 5287 5285 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5288 5286 domain_add_dev_info(si_domain, dev); 5289 5287 dev_info(dev, ··· 5295 5291 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { 5296 5292 ret = iommu_request_dma_domain_for_dev(dev); 5297 5293 if (ret) { 5294 + dmar_remove_one_dev_info(dev); 5298 5295 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5299 5296 if (!get_private_domain_for_dev(dev)) { 5300 5297 dev_warn(dev, ··· 5320 5315 iommu = device_to_iommu(dev, &bus, &devfn); 5321 5316 if (!iommu) 5322 5317 return; 5318 + 5319 + dmar_remove_one_dev_info(dev); 5323 5320 5324 5321 iommu_group_remove_device(dev); 5325 5322
+1 -2
drivers/media/platform/omap/omap_vout_vrfb.c
··· 253 253 */ 254 254 255 255 pixsize = vout->bpp * vout->vrfb_bpp; 256 - dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - 257 - (vout->pix.width * vout->bpp)) + 1; 256 + dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp; 258 257 259 258 xt->src_start = vout->buf_phy_addr[vb->i]; 260 259 xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+1
drivers/misc/Kconfig
··· 456 456 457 457 config XILINX_SDFEC 458 458 tristate "Xilinx SDFEC 16" 459 + depends on HAS_IOMEM 459 460 help 460 461 This option enables support for the Xilinx SDFEC (Soft Decision 461 462 Forward Error Correction) driver. This enables a char driver
+2 -3
drivers/misc/habanalabs/device.c
··· 970 970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 971 971 if (rc) { 972 972 dev_err(hdev->dev, "failed to initialize kernel context\n"); 973 - goto free_ctx; 973 + kfree(hdev->kernel_ctx); 974 + goto mmu_fini; 974 975 } 975 976 976 977 rc = hl_cb_pool_init(hdev); ··· 1054 1053 if (hl_ctx_put(hdev->kernel_ctx) != 1) 1055 1054 dev_err(hdev->dev, 1056 1055 "kernel ctx is still alive on initialization failure\n"); 1057 - free_ctx: 1058 - kfree(hdev->kernel_ctx); 1059 1056 mmu_fini: 1060 1057 hl_mmu_fini(hdev); 1061 1058 eq_fini:
+47 -25
drivers/misc/habanalabs/goya/goya.c
··· 2729 2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE); 2730 2730 } 2731 2731 2732 - void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) 2732 + void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd) 2733 2733 { 2734 - /* Not needed in Goya */ 2734 + /* The QMANs are on the SRAM so need to copy to IO space */ 2735 + memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd)); 2735 2736 } 2736 2737 2737 2738 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, ··· 3314 3313 int rc; 3315 3314 3316 3315 dev_dbg(hdev->dev, "DMA packet details:\n"); 3317 - dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3318 - dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3319 - dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3316 + dev_dbg(hdev->dev, "source == 0x%llx\n", 3317 + le64_to_cpu(user_dma_pkt->src_addr)); 3318 + dev_dbg(hdev->dev, "destination == 0x%llx\n", 3319 + le64_to_cpu(user_dma_pkt->dst_addr)); 3320 + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); 3320 3321 3321 3322 ctl = le32_to_cpu(user_dma_pkt->ctl); 3322 3323 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> ··· 3347 3344 struct packet_lin_dma *user_dma_pkt) 3348 3345 { 3349 3346 dev_dbg(hdev->dev, "DMA packet details:\n"); 3350 - dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3351 - dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3352 - dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3347 + dev_dbg(hdev->dev, "source == 0x%llx\n", 3348 + le64_to_cpu(user_dma_pkt->src_addr)); 3349 + dev_dbg(hdev->dev, "destination == 0x%llx\n", 3350 + le64_to_cpu(user_dma_pkt->dst_addr)); 3351 + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); 3353 3352 3354 3353 /* 3355 3354 * WA for HW-23. ··· 3391 3386 3392 3387 dev_dbg(hdev->dev, "WREG32 packet details:\n"); 3393 3388 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); 3394 - dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); 3389 + dev_dbg(hdev->dev, "value == 0x%x\n", 3390 + le32_to_cpu(wreg_pkt->value)); 3395 3391 3396 3392 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { 3397 3393 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", ··· 3434 3428 while (cb_parsed_length < parser->user_cb_size) { 3435 3429 enum packet_id pkt_id; 3436 3430 u16 pkt_size; 3437 - void *user_pkt; 3431 + struct goya_packet *user_pkt; 3438 3432 3439 - user_pkt = (void *) (uintptr_t) 3433 + user_pkt = (struct goya_packet *) (uintptr_t) 3440 3434 (parser->user_cb->kernel_address + cb_parsed_length); 3441 3435 3442 - pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3436 + pkt_id = (enum packet_id) ( 3437 + (le64_to_cpu(user_pkt->header) & 3443 3438 PACKET_HEADER_PACKET_ID_MASK) >> 3444 3439 PACKET_HEADER_PACKET_ID_SHIFT); 3445 3440 ··· 3460 3453 * need to validate here as well because patch_cb() is 3461 3454 * not called in MMU path while this function is called 3462 3455 */ 3463 - rc = goya_validate_wreg32(hdev, parser, user_pkt); 3456 + rc = goya_validate_wreg32(hdev, 3457 + parser, (struct packet_wreg32 *) user_pkt); 3464 3458 break; 3465 3459 3466 3460 case PACKET_WREG_BULK: ··· 3489 3481 case PACKET_LIN_DMA: 3490 3482 if (is_mmu) 3491 3483 rc = goya_validate_dma_pkt_mmu(hdev, parser, 3492 - user_pkt); 3484 + (struct packet_lin_dma *) user_pkt); 3493 3485 else 3494 3486 rc = goya_validate_dma_pkt_no_mmu(hdev, parser, 3495 - user_pkt); 3487 + (struct packet_lin_dma *) user_pkt); 3496 3488 break; 3497 3489 3498 3490 case PACKET_MSG_LONG: ··· 3665 3657 enum packet_id pkt_id; 3666 3658 u16 pkt_size; 3667 3659 u32 new_pkt_size = 0; 3668 - void *user_pkt, *kernel_pkt; 3660 + struct goya_packet *user_pkt, *kernel_pkt; 3669 3661 3670 - user_pkt = (void *) (uintptr_t) 3662 + user_pkt = (struct goya_packet *) (uintptr_t) 3671 3663 (parser->user_cb->kernel_address + cb_parsed_length); 3672 - kernel_pkt = (void *) (uintptr_t) 3664 + kernel_pkt = (struct goya_packet *) (uintptr_t) 3673 3665 (parser->patched_cb->kernel_address + 3674 3666 cb_patched_cur_length); 3675 3667 3676 - pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3668 + pkt_id = (enum packet_id) ( 3669 + (le64_to_cpu(user_pkt->header) & 3677 3670 PACKET_HEADER_PACKET_ID_MASK) >> 3678 3671 PACKET_HEADER_PACKET_ID_SHIFT); 3679 3672 ··· 3689 3680 3690 3681 switch (pkt_id) { 3691 3682 case PACKET_LIN_DMA: 3692 - rc = goya_patch_dma_packet(hdev, parser, user_pkt, 3693 - kernel_pkt, &new_pkt_size); 3683 + rc = goya_patch_dma_packet(hdev, parser, 3684 + (struct packet_lin_dma *) user_pkt, 3685 + (struct packet_lin_dma *) kernel_pkt, 3686 + &new_pkt_size); 3694 3687 cb_patched_cur_length += new_pkt_size; 3695 3688 break; 3696 3689 3697 3690 case PACKET_WREG_32: 3698 3691 memcpy(kernel_pkt, user_pkt, pkt_size); 3699 3692 cb_patched_cur_length += pkt_size; 3700 - rc = goya_validate_wreg32(hdev, parser, kernel_pkt); 3693 + rc = goya_validate_wreg32(hdev, parser, 3694 + (struct packet_wreg32 *) kernel_pkt); 3701 3695 break; 3702 3696 3703 3697 case PACKET_WREG_BULK: ··· 4364 4352 size_t total_pkt_size; 4365 4353 long result; 4366 4354 int rc; 4355 + int irq_num_entries, irq_arr_index; 4356 + __le32 *goya_irq_arr; 4367 4357 4368 4358 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + 4369 4359 irq_arr_size; ··· 4383 4369 if (!pkt) 4384 4370 return -ENOMEM; 4385 4371 4386 - pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); 4387 - memcpy(&pkt->irqs, irq_arr, irq_arr_size); 4372 + irq_num_entries = irq_arr_size / sizeof(irq_arr[0]); 4373 + pkt->length = cpu_to_le32(irq_num_entries); 4374 + 4375 + /* We must perform any necessary endianness conversation on the irq 4376 + * array being passed to the goya hardware 4377 + */ 4378 + for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs; 4379 + irq_arr_index < irq_num_entries ; irq_arr_index++) 4380 + goya_irq_arr[irq_arr_index] = 4381 + cpu_to_le32(irq_arr[irq_arr_index]); 4388 4382 4389 4383 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4390 4384 ARMCP_PKT_CTL_OPCODE_SHIFT); ··· 5064 5042 .resume = goya_resume, 5065 5043 .cb_mmap = goya_cb_mmap, 5066 5044 .ring_doorbell = goya_ring_doorbell, 5067 - .flush_pq_write = goya_flush_pq_write, 5045 + .pqe_write = goya_pqe_write, 5068 5046 .asic_dma_alloc_coherent = goya_dma_alloc_coherent, 5069 5047 .asic_dma_free_coherent = goya_dma_free_coherent, 5070 5048 .get_int_queue_base = goya_get_int_queue_base,
+1 -1
drivers/misc/habanalabs/goya/goyaP.h
··· 177 177 void goya_late_fini(struct hl_device *hdev); 178 178 179 179 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 180 - void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); 180 + void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd); 181 181 void goya_update_eq_ci(struct hl_device *hdev, u32 val); 182 182 void goya_restore_phase_topology(struct hl_device *hdev); 183 183 int goya_context_switch(struct hl_device *hdev, u32 asid);
+7 -2
drivers/misc/habanalabs/habanalabs.h
··· 441 441 * @resume: handles IP specific H/W or SW changes for resume. 442 442 * @cb_mmap: maps a CB. 443 443 * @ring_doorbell: increment PI on a given QMAN. 444 - * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. 444 + * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific 445 + * function because the PQs are located in different memory areas 446 + * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of 447 + * writing the PQE must match the destination memory area 448 + * properties. 445 449 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling 446 450 * dma_alloc_coherent(). This is ASIC function because 447 451 * its implementation is not trivial when the driver ··· 514 510 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, 515 511 u64 kaddress, phys_addr_t paddress, u32 size); 516 512 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 517 - void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); 513 + void (*pqe_write)(struct hl_device *hdev, __le64 *pqe, 514 + struct hl_bd *bd); 518 515 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, 519 516 dma_addr_t *dma_handle, gfp_t flag); 520 517 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
+5 -9
drivers/misc/habanalabs/hw_queue.c
··· 290 290 struct hl_device *hdev = job->cs->ctx->hdev; 291 291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 292 292 struct hl_bd bd; 293 - u64 *pi, *pbd = (u64 *) &bd; 293 + __le64 *pi; 294 294 295 295 bd.ctl = 0; 296 - bd.len = __cpu_to_le32(job->job_cb_size); 297 - bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); 296 + bd.len = cpu_to_le32(job->job_cb_size); 297 + bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); 298 298 299 - pi = (u64 *) (uintptr_t) (q->kernel_address + 299 + pi = (__le64 *) (uintptr_t) (q->kernel_address + 300 300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); 301 - 302 - pi[0] = pbd[0]; 303 - pi[1] = pbd[1]; 304 301 305 302 q->pi++; 306 303 q->pi &= ((q->int_queue_len << 1) - 1); 307 304 308 - /* Flush PQ entry write. Relevant only for specific ASICs */ 309 - hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]); 305 + hdev->asic_funcs->pqe_write(hdev, pi, &bd); 310 306 311 307 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 312 308 }
+13
drivers/misc/habanalabs/include/goya/goya_packets.h
··· 52 52 #define GOYA_PKT_CTL_MB_SHIFT 31 53 53 #define GOYA_PKT_CTL_MB_MASK 0x80000000 54 54 55 + /* All packets have, at least, an 8-byte header, which contains 56 + * the packet type. The kernel driver uses the packet header for packet 57 + * validation and to perform any necessary required preparation before 58 + * sending them off to the hardware. 59 + */ 60 + struct goya_packet { 61 + __le64 header; 62 + /* The rest of the packet data follows. Use the corresponding 63 + * packet_XXX struct to deference the data, based on packet type 64 + */ 65 + u8 contents[0]; 66 + }; 67 + 55 68 struct packet_nop { 56 69 __le32 reserved; 57 70 __le32 ctl;
+13 -14
drivers/misc/habanalabs/irq.c
··· 80 80 struct hl_cs_job *job; 81 81 bool shadow_index_valid; 82 82 u16 shadow_index; 83 - u32 *cq_entry; 84 - u32 *cq_base; 83 + struct hl_cq_entry *cq_entry, *cq_base; 85 84 86 85 if (hdev->disabled) { 87 86 dev_dbg(hdev->dev, ··· 89 90 return IRQ_HANDLED; 90 91 } 91 92 92 - cq_base = (u32 *) (uintptr_t) cq->kernel_address; 93 + cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address; 93 94 94 95 while (1) { 95 - bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) 96 + bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) & 97 + CQ_ENTRY_READY_MASK) 96 98 >> CQ_ENTRY_READY_SHIFT); 97 99 98 100 if (!entry_ready) 99 101 break; 100 102 101 - cq_entry = (u32 *) &cq_base[cq->ci]; 103 + cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; 102 104 103 - /* 104 - * Make sure we read CQ entry contents after we've 105 + /* Make sure we read CQ entry contents after we've 105 106 * checked the ownership bit. 106 107 */ 107 108 dma_rmb(); 108 109 109 - shadow_index_valid = 110 - ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) 110 + shadow_index_valid = ((le32_to_cpu(cq_entry->data) & 111 + CQ_ENTRY_SHADOW_INDEX_VALID_MASK) 111 112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); 112 113 113 - shadow_index = (u16) 114 - ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) 114 + shadow_index = (u16) ((le32_to_cpu(cq_entry->data) & 115 + CQ_ENTRY_SHADOW_INDEX_MASK) 115 116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT); 116 117 117 118 queue = &hdev->kernel_queues[cq->hw_queue_id]; ··· 121 122 queue_work(hdev->cq_wq, &job->finish_work); 122 123 } 123 124 124 - /* 125 - * Update ci of the context's queue. There is no 125 + /* Update ci of the context's queue. There is no 126 126 * need to protect it with spinlock because this update is 127 127 * done only inside IRQ and there is a different IRQ per 128 128 * queue ··· 129 131 queue->ci = hl_queue_inc_ptr(queue->ci); 130 132 131 133 /* Clear CQ entry ready bit */ 132 - cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; 134 + cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) & 135 + ~CQ_ENTRY_READY_MASK); 133 136 134 137 cq->ci = hl_cq_inc_ptr(cq->ci); 135 138
+2
drivers/misc/habanalabs/memory.c
··· 1629 1629 dev_dbg(hdev->dev, 1630 1630 "page list 0x%p of asid %d is still alive\n", 1631 1631 phys_pg_list, ctx->asid); 1632 + atomic64_sub(phys_pg_list->total_size, 1633 + &hdev->dram_used_mem); 1632 1634 free_phys_pg_pack(hdev, phys_pg_list); 1633 1635 idr_remove(&vm->phys_pg_pack_handles, i); 1634 1636 }
+3 -2
drivers/mtd/spi-nor/spi-nor.c
··· 3780 3780 default: 3781 3781 /* Kept only for backward compatibility purpose. */ 3782 3782 params->quad_enable = spansion_quad_enable; 3783 - if (nor->clear_sr_bp) 3784 - nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; 3785 3783 break; 3786 3784 } 3787 3785 ··· 4033 4035 int err; 4034 4036 4035 4037 if (nor->clear_sr_bp) { 4038 + if (nor->quad_enable == spansion_quad_enable) 4039 + nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; 4040 + 4036 4041 err = nor->clear_sr_bp(nor); 4037 4042 if (err) { 4038 4043 dev_err(nor->dev,
+14 -1
drivers/nvme/host/core.c
··· 1286 1286 */ 1287 1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1288 1288 mutex_lock(&ctrl->scan_lock); 1289 + mutex_lock(&ctrl->subsys->lock); 1290 + nvme_mpath_start_freeze(ctrl->subsys); 1291 + nvme_mpath_wait_freeze(ctrl->subsys); 1289 1292 nvme_start_freeze(ctrl); 1290 1293 nvme_wait_freeze(ctrl); 1291 1294 } ··· 1319 1316 nvme_update_formats(ctrl); 1320 1317 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1321 1318 nvme_unfreeze(ctrl); 1319 + nvme_mpath_unfreeze(ctrl->subsys); 1320 + mutex_unlock(&ctrl->subsys->lock); 1322 1321 mutex_unlock(&ctrl->scan_lock); 1323 1322 } 1324 1323 if (effects & NVME_CMD_EFFECTS_CCC) ··· 1720 1715 if (ns->head->disk) { 1721 1716 nvme_update_disk_info(ns->head->disk, ns, id); 1722 1717 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1718 + revalidate_disk(ns->head->disk); 1723 1719 } 1724 1720 #endif 1725 1721 } ··· 2493 2487 if (ret) { 2494 2488 dev_err(ctrl->device, 2495 2489 "failed to register subsystem device.\n"); 2490 + put_device(&subsys->dev); 2496 2491 goto out_unlock; 2497 2492 } 2498 2493 ida_init(&subsys->ns_ida); ··· 2516 2509 nvme_put_subsystem(subsys); 2517 2510 out_unlock: 2518 2511 mutex_unlock(&nvme_subsystems_lock); 2519 - put_device(&subsys->dev); 2520 2512 return ret; 2521 2513 } 2522 2514 ··· 3576 3570 { 3577 3571 struct nvme_ns *ns, *next; 3578 3572 LIST_HEAD(ns_list); 3573 + 3574 + /* 3575 + * make sure to requeue I/O to all namespaces as these 3576 + * might result from the scan itself and must complete 3577 + * for the scan_work to make progress 3578 + */ 3579 + nvme_mpath_clear_ctrl_paths(ctrl); 3579 3580 3580 3581 /* prevent racing with ns scanning */ 3581 3582 flush_work(&ctrl->scan_work);
+70 -6
drivers/nvme/host/multipath.c
··· 12 12 MODULE_PARM_DESC(multipath, 13 13 "turn on native support for multiple controllers per subsystem"); 14 14 15 + void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 16 + { 17 + struct nvme_ns_head *h; 18 + 19 + lockdep_assert_held(&subsys->lock); 20 + list_for_each_entry(h, &subsys->nsheads, entry) 21 + if (h->disk) 22 + blk_mq_unfreeze_queue(h->disk->queue); 23 + } 24 + 25 + void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 26 + { 27 + struct nvme_ns_head *h; 28 + 29 + lockdep_assert_held(&subsys->lock); 30 + list_for_each_entry(h, &subsys->nsheads, entry) 31 + if (h->disk) 32 + blk_mq_freeze_queue_wait(h->disk->queue); 33 + } 34 + 35 + void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 36 + { 37 + struct nvme_ns_head *h; 38 + 39 + lockdep_assert_held(&subsys->lock); 40 + list_for_each_entry(h, &subsys->nsheads, entry) 41 + if (h->disk) 42 + blk_freeze_queue_start(h->disk->queue); 43 + } 44 + 15 45 /* 16 46 * If multipathing is enabled we need to always use the subsystem instance 17 47 * number for numbering our devices to avoid conflicts between subsystems that ··· 134 104 [NVME_ANA_CHANGE] = "change", 135 105 }; 136 106 137 - void nvme_mpath_clear_current_path(struct nvme_ns *ns) 107 + bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 138 108 { 139 109 struct nvme_ns_head *head = ns->head; 110 + bool changed = false; 140 111 int node; 141 112 142 113 if (!head) 143 - return; 114 + goto out; 144 115 145 116 for_each_node(node) { 146 - if (ns == rcu_access_pointer(head->current_path[node])) 117 + if (ns == rcu_access_pointer(head->current_path[node])) { 147 118 rcu_assign_pointer(head->current_path[node], NULL); 119 + changed = true; 120 + } 148 121 } 122 + out: 123 + return changed; 124 + } 125 + 126 + void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 127 + { 128 + struct nvme_ns *ns; 129 + 130 + mutex_lock(&ctrl->scan_lock); 131 + list_for_each_entry(ns, &ctrl->namespaces, list) 132 + if (nvme_mpath_clear_current_path(ns)) 133 + kblockd_schedule_work(&ns->head->requeue_work); 134 + mutex_unlock(&ctrl->scan_lock); 149 135 } 150 136 151 137 static bool nvme_path_is_disabled(struct nvme_ns *ns) ··· 272 226 return ns; 273 227 } 274 228 229 + static bool nvme_available_path(struct nvme_ns_head *head) 230 + { 231 + struct nvme_ns *ns; 232 + 233 + list_for_each_entry_rcu(ns, &head->list, siblings) { 234 + switch (ns->ctrl->state) { 235 + case NVME_CTRL_LIVE: 236 + case NVME_CTRL_RESETTING: 237 + case NVME_CTRL_CONNECTING: 238 + /* fallthru */ 239 + return true; 240 + default: 241 + break; 242 + } 243 + } 244 + return false; 245 + } 246 + 275 247 static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, 276 248 struct bio *bio) 277 249 { ··· 316 252 disk_devt(ns->head->disk), 317 253 bio->bi_iter.bi_sector); 318 254 ret = direct_make_request(bio); 319 - } else if (!list_empty_careful(&head->list)) { 320 - dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); 255 + } else if (nvme_available_path(head)) { 256 + dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); 321 257 322 258 spin_lock_irq(&head->requeue_lock); 323 259 bio_list_add(&head->requeue_list, bio); 324 260 spin_unlock_irq(&head->requeue_lock); 325 261 } else { 326 - dev_warn_ratelimited(dev, "no path - failing I/O\n"); 262 + dev_warn_ratelimited(dev, "no available path - failing I/O\n"); 327 263 328 264 bio->bi_status = BLK_STS_IOERR; 329 265 bio_endio(bio);
+19 -2
drivers/nvme/host/nvme.h
··· 490 490 return ctrl->ana_log_buf != NULL; 491 491 } 492 492 493 + void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 494 + void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 495 + void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 493 496 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 494 497 struct nvme_ctrl *ctrl, int *flags); 495 498 void nvme_failover_req(struct request *req); ··· 503 500 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 504 501 void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 505 502 void nvme_mpath_stop(struct nvme_ctrl *ctrl); 506 - void nvme_mpath_clear_current_path(struct nvme_ns *ns); 503 + bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 504 + void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 507 505 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 508 506 509 507 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) ··· 552 548 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 553 549 { 554 550 } 555 - static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 551 + static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 552 + { 553 + return false; 554 + } 555 + static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 556 556 { 557 557 } 558 558 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) ··· 574 566 { 575 567 } 576 568 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 569 + { 570 + } 571 + static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 572 + { 573 + } 574 + static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 575 + { 576 + } 577 + static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 577 578 { 578 579 } 579 580 #endif /* CONFIG_NVME_MULTIPATH */
+12 -4
drivers/nvme/host/pci.c
··· 2695 2695 { 2696 2696 struct nvme_dev *dev = data; 2697 2697 2698 - nvme_reset_ctrl_sync(&dev->ctrl); 2698 + flush_work(&dev->ctrl.reset_work); 2699 2699 flush_work(&dev->ctrl.scan_work); 2700 2700 nvme_put_ctrl(&dev->ctrl); 2701 2701 } ··· 2761 2761 2762 2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2763 2763 2764 + nvme_reset_ctrl(&dev->ctrl); 2764 2765 nvme_get_ctrl(&dev->ctrl); 2765 2766 async_schedule(nvme_async_probe, dev); 2766 2767 ··· 2847 2846 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 2848 2847 struct nvme_ctrl *ctrl = &ndev->ctrl; 2849 2848 2850 - if (pm_resume_via_firmware() || !ctrl->npss || 2849 + if (ndev->last_ps == U32_MAX || 2851 2850 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 2852 2851 nvme_reset_ctrl(ctrl); 2853 2852 return 0; ··· 2860 2859 struct nvme_ctrl *ctrl = &ndev->ctrl; 2861 2860 int ret = -EBUSY; 2862 2861 2862 + ndev->last_ps = U32_MAX; 2863 + 2863 2864 /* 2864 2865 * The platform does not remove power for a kernel managed suspend so 2865 2866 * use host managed nvme power settings for lowest idle power if ··· 2869 2866 * shutdown. But if the firmware is involved after the suspend or the 2870 2867 * device does not support any non-default power states, shut down the 2871 2868 * device fully. 2869 + * 2870 + * If ASPM is not enabled for the device, shut down the device and allow 2871 + * the PCI bus layer to put it into D3 in order to take the PCIe link 2872 + * down, so as to allow the platform to achieve its minimum low-power 2873 + * state (which may not be possible if the link is up). 2872 2874 */ 2873 - if (pm_suspend_via_firmware() || !ctrl->npss) { 2875 + if (pm_suspend_via_firmware() || !ctrl->npss || 2876 + !pcie_aspm_enabled(pdev)) { 2874 2877 nvme_dev_disable(ndev, true); 2875 2878 return 0; 2876 2879 } ··· 2889 2880 ctrl->state != NVME_CTRL_ADMIN_ONLY) 2890 2881 goto unfreeze; 2891 2882 2892 - ndev->last_ps = 0; 2893 2883 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 2894 2884 if (ret < 0) 2895 2885 goto unfreeze;
+11 -5
drivers/nvme/host/rdma.c
··· 562 562 return ret; 563 563 } 564 564 565 + static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 566 + { 567 + rdma_disconnect(queue->cm_id); 568 + ib_drain_qp(queue->qp); 569 + } 570 + 565 571 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 566 572 { 567 573 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 568 574 return; 569 - 570 - rdma_disconnect(queue->cm_id); 571 - ib_drain_qp(queue->qp); 575 + __nvme_rdma_stop_queue(queue); 572 576 } 573 577 574 578 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) ··· 611 607 else 612 608 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 613 609 614 - if (!ret) 610 + if (!ret) { 615 611 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 616 - else 612 + } else { 613 + __nvme_rdma_stop_queue(queue); 617 614 dev_info(ctrl->ctrl.device, 618 615 "failed to connect queue: %d ret=%d\n", idx, ret); 616 + } 619 617 return ret; 620 618 } 621 619
+1
drivers/nvme/target/configfs.c
··· 675 675 676 676 found: 677 677 list_del(&p->entry); 678 + nvmet_port_del_ctrls(port, subsys); 678 679 nvmet_port_disc_changed(port, subsys); 679 680 680 681 if (list_empty(&port->subsystems))
+15
drivers/nvme/target/core.c
··· 46 46 u16 status; 47 47 48 48 switch (errno) { 49 + case 0: 50 + status = NVME_SC_SUCCESS; 51 + break; 49 52 case -ENOSPC: 50 53 req->error_loc = offsetof(struct nvme_rw_command, length); 51 54 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; ··· 282 279 up_write(&nvmet_config_sem); 283 280 } 284 281 EXPORT_SYMBOL_GPL(nvmet_unregister_transport); 282 + 283 + void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) 284 + { 285 + struct nvmet_ctrl *ctrl; 286 + 287 + mutex_lock(&subsys->lock); 288 + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { 289 + if (ctrl->port == port) 290 + ctrl->ops->delete_ctrl(ctrl); 291 + } 292 + mutex_unlock(&subsys->lock); 293 + } 285 294 286 295 int nvmet_enable_port(struct nvmet_port *port) 287 296 {
+8
drivers/nvme/target/loop.c
··· 654 654 mutex_lock(&nvme_loop_ports_mutex); 655 655 list_del_init(&port->entry); 656 656 mutex_unlock(&nvme_loop_ports_mutex); 657 + 658 + /* 659 + * Ensure any ctrls that are in the process of being 660 + * deleted are in fact deleted before we return 661 + * and free the port. This is to prevent active 662 + * ctrls from using a port after it's freed. 663 + */ 664 + flush_workqueue(nvme_delete_wq); 657 665 } 658 666 659 667 static const struct nvmet_fabrics_ops nvme_loop_ops = {
+3
drivers/nvme/target/nvmet.h
··· 418 418 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 419 419 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 420 420 421 + void nvmet_port_del_ctrls(struct nvmet_port *port, 422 + struct nvmet_subsys *subsys); 423 + 421 424 int nvmet_enable_port(struct nvmet_port *port); 422 425 void nvmet_disable_port(struct nvmet_port *port); 423 426
+1 -1
drivers/of/irq.c
··· 277 277 * of_irq_parse_one - Resolve an interrupt for a device 278 278 * @device: the device whose interrupt is to be resolved 279 279 * @index: index of the interrupt to resolve 280 - * @out_irq: structure of_irq filled by this function 280 + * @out_irq: structure of_phandle_args filled by this function 281 281 * 282 282 * This function resolves an interrupt for a node by walking the interrupt tree, 283 283 * finding which interrupt controller node it is attached to, and returning the
+9 -3
drivers/of/resolver.c
··· 206 206 for_each_child_of_node(local_fixups, child) { 207 207 208 208 for_each_child_of_node(overlay, overlay_child) 209 - if (!node_name_cmp(child, overlay_child)) 209 + if (!node_name_cmp(child, overlay_child)) { 210 + of_node_put(overlay_child); 210 211 break; 212 + } 211 213 212 - if (!overlay_child) 214 + if (!overlay_child) { 215 + of_node_put(child); 213 216 return -EINVAL; 217 + } 214 218 215 219 err = adjust_local_phandle_references(child, overlay_child, 216 220 phandle_delta); 217 - if (err) 221 + if (err) { 222 + of_node_put(child); 218 223 return err; 224 + } 219 225 } 220 226 221 227 return 0;
+20
drivers/pci/pcie/aspm.c
··· 1170 1170 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1171 1171 NULL, 0644); 1172 1172 1173 + /** 1174 + * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. 1175 + * @pdev: Target device. 1176 + */ 1177 + bool pcie_aspm_enabled(struct pci_dev *pdev) 1178 + { 1179 + struct pci_dev *bridge = pci_upstream_bridge(pdev); 1180 + bool ret; 1181 + 1182 + if (!bridge) 1183 + return false; 1184 + 1185 + mutex_lock(&aspm_lock); 1186 + ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false; 1187 + mutex_unlock(&aspm_lock); 1188 + 1189 + return ret; 1190 + } 1191 + EXPORT_SYMBOL_GPL(pcie_aspm_enabled); 1192 + 1173 1193 #ifdef CONFIG_PCIEASPM_DEBUG 1174 1194 static ssize_t link_state_show(struct device *dev, 1175 1195 struct device_attribute *attr,
+21 -2
drivers/scsi/lpfc/lpfc_init.c
··· 10776 10776 /* This loop sets up all CPUs that are affinitized with a 10777 10777 * irq vector assigned to the driver. All affinitized CPUs 10778 10778 * will get a link to that vectors IRQ and EQ. 10779 + * 10780 + * NULL affinity mask handling: 10781 + * If irq count is greater than one, log an error message. 10782 + * If the null mask is received for the first irq, find the 10783 + * first present cpu, and assign the eq index to ensure at 10784 + * least one EQ is assigned. 10779 10785 */ 10780 10786 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10781 10787 /* Get a CPU mask for all CPUs affinitized to this vector */ 10782 10788 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10783 - if (!maskp) 10784 - continue; 10789 + if (!maskp) { 10790 + if (phba->cfg_irq_chann > 1) 10791 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10792 + "3329 No affinity mask found " 10793 + "for vector %d (%d)\n", 10794 + idx, phba->cfg_irq_chann); 10795 + if (!idx) { 10796 + cpu = cpumask_first(cpu_present_mask); 10797 + cpup = &phba->sli4_hba.cpu_map[cpu]; 10798 + cpup->eq = idx; 10799 + cpup->irq = pci_irq_vector(phba->pcidev, idx); 10800 + cpup->flag |= LPFC_CPU_FIRST_IRQ; 10801 + } 10802 + break; 10803 + } 10785 10804 10786 10805 i = 0; 10787 10806 /* Loop through all CPUs associated with vector idx */
+1 -6
drivers/soundwire/Kconfig
··· 4 4 # 5 5 6 6 menuconfig SOUNDWIRE 7 - bool "SoundWire support" 7 + tristate "SoundWire support" 8 8 help 9 9 SoundWire is a 2-Pin interface with data and clock line ratified 10 10 by the MIPI Alliance. SoundWire is used for transporting data ··· 17 17 18 18 comment "SoundWire Devices" 19 19 20 - config SOUNDWIRE_BUS 21 - tristate 22 - select REGMAP_SOUNDWIRE 23 - 24 20 config SOUNDWIRE_CADENCE 25 21 tristate 26 22 27 23 config SOUNDWIRE_INTEL 28 24 tristate "Intel SoundWire Master driver" 29 25 select SOUNDWIRE_CADENCE 30 - select SOUNDWIRE_BUS 31 26 depends on X86 && ACPI && SND_SOC 32 27 help 33 28 SoundWire Intel Master driver.
+1 -1
drivers/soundwire/Makefile
··· 5 5 6 6 #Bus Objs 7 7 soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o 8 - obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o 8 + obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o 9 9 10 10 #Cadence Objs 11 11 soundwire-cadence-objs := cadence_master.o
+4 -4
drivers/soundwire/cadence_master.c
··· 81 81 82 82 #define CDNS_MCP_INTSET 0x4C 83 83 84 - #define CDNS_SDW_SLAVE_STAT 0x50 85 - #define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) 84 + #define CDNS_MCP_SLAVE_STAT 0x50 85 + #define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0) 86 86 87 87 #define CDNS_MCP_SLAVE_INTSTAT0 0x54 88 88 #define CDNS_MCP_SLAVE_INTSTAT1 0x58 ··· 96 96 #define CDNS_MCP_SLAVE_INTMASK0 0x5C 97 97 #define CDNS_MCP_SLAVE_INTMASK1 0x60 98 98 99 - #define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) 100 - #define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) 99 + #define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0) 100 + #define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0) 101 101 102 102 #define CDNS_MCP_PORT_INTSTAT 0x64 103 103 #define CDNS_MCP_PDI_STAT 0x6C
+4 -4
drivers/staging/comedi/drivers/dt3000.c
··· 342 342 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, 343 343 unsigned int flags) 344 344 { 345 - int divider, base, prescale; 345 + unsigned int divider, base, prescale; 346 346 347 - /* This function needs improvment */ 347 + /* This function needs improvement */ 348 348 /* Don't know if divider==0 works. */ 349 349 350 350 for (prescale = 0; prescale < 16; prescale++) { ··· 358 358 divider = (*nanosec) / base; 359 359 break; 360 360 case CMDF_ROUND_UP: 361 - divider = (*nanosec) / base; 361 + divider = DIV_ROUND_UP(*nanosec, base); 362 362 break; 363 363 } 364 364 if (divider < 65536) { ··· 368 368 } 369 369 370 370 prescale = 15; 371 - base = timer_base * (1 << prescale); 371 + base = timer_base * (prescale + 1); 372 372 divider = 65535; 373 373 *nanosec = divider * base; 374 374 return (prescale << 16) | (divider);
+12 -7
drivers/usb/chipidea/ci_hdrc_imx.c
··· 454 454 imx_disable_unprepare_clks(dev); 455 455 disable_hsic_regulator: 456 456 if (data->hsic_pad_regulator) 457 - ret = regulator_disable(data->hsic_pad_regulator); 457 + /* don't overwrite original ret (cf. EPROBE_DEFER) */ 458 + regulator_disable(data->hsic_pad_regulator); 458 459 if (pdata.flags & CI_HDRC_PMQOS) 459 460 pm_qos_remove_request(&data->pm_qos_req); 461 + data->ci_pdev = NULL; 460 462 return ret; 461 463 } 462 464 ··· 471 469 pm_runtime_disable(&pdev->dev); 472 470 pm_runtime_put_noidle(&pdev->dev); 473 471 } 474 - ci_hdrc_remove_device(data->ci_pdev); 472 + if (data->ci_pdev) 473 + ci_hdrc_remove_device(data->ci_pdev); 475 474 if (data->override_phy_control) 476 475 usb_phy_shutdown(data->phy); 477 - imx_disable_unprepare_clks(&pdev->dev); 478 - if (data->plat_data->flags & CI_HDRC_PMQOS) 479 - pm_qos_remove_request(&data->pm_qos_req); 480 - if (data->hsic_pad_regulator) 481 - regulator_disable(data->hsic_pad_regulator); 476 + if (data->ci_pdev) { 477 + imx_disable_unprepare_clks(&pdev->dev); 478 + if (data->plat_data->flags & CI_HDRC_PMQOS) 479 + pm_qos_remove_request(&data->pm_qos_req); 480 + if (data->hsic_pad_regulator) 481 + regulator_disable(data->hsic_pad_regulator); 482 + } 482 483 483 484 return 0; 484 485 }
+7 -5
drivers/usb/class/cdc-acm.c
··· 1301 1301 tty_port_init(&acm->port); 1302 1302 acm->port.ops = &acm_port_ops; 1303 1303 1304 - minor = acm_alloc_minor(acm); 1305 - if (minor < 0) 1306 - goto alloc_fail1; 1307 - 1308 1304 ctrlsize = usb_endpoint_maxp(epctrl); 1309 1305 readsize = usb_endpoint_maxp(epread) * 1310 1306 (quirks == SINGLE_RX_URB ? 1 : 2); ··· 1308 1312 acm->writesize = usb_endpoint_maxp(epwrite) * 20; 1309 1313 acm->control = control_interface; 1310 1314 acm->data = data_interface; 1315 + 1316 + usb_get_intf(acm->control); /* undone in destruct() */ 1317 + 1318 + minor = acm_alloc_minor(acm); 1319 + if (minor < 0) 1320 + goto alloc_fail1; 1321 + 1311 1322 acm->minor = minor; 1312 1323 acm->dev = usb_dev; 1313 1324 if (h.usb_cdc_acm_descriptor) ··· 1461 1458 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1462 1459 usb_set_intfdata(data_interface, acm); 1463 1460 1464 - usb_get_intf(control_interface); 1465 1461 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, 1466 1462 &control_interface->dev); 1467 1463 if (IS_ERR(tty_dev)) {
+3 -7
drivers/usb/core/buffer.c
··· 66 66 char name[16]; 67 67 int i, size; 68 68 69 - if (!IS_ENABLED(CONFIG_HAS_DMA) || 70 - (!is_device_dma_capable(hcd->self.sysdev) && 71 - !hcd->localmem_pool)) 69 + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) 72 70 return 0; 73 71 74 72 for (i = 0; i < HCD_BUFFER_POOLS; i++) { ··· 127 129 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma); 128 130 129 131 /* some USB hosts just use PIO */ 130 - if (!IS_ENABLED(CONFIG_HAS_DMA) || 131 - !is_device_dma_capable(bus->sysdev)) { 132 + if (!hcd_uses_dma(hcd)) { 132 133 *dma = ~(dma_addr_t) 0; 133 134 return kmalloc(size, mem_flags); 134 135 } ··· 157 160 return; 158 161 } 159 162 160 - if (!IS_ENABLED(CONFIG_HAS_DMA) || 161 - !is_device_dma_capable(bus->sysdev)) { 163 + if (!hcd_uses_dma(hcd)) { 162 164 kfree(addr); 163 165 return; 164 166 }
+5 -5
drivers/usb/core/file.c
··· 193 193 intf->minor = minor; 194 194 break; 195 195 } 196 - up_write(&minor_rwsem); 197 - if (intf->minor < 0) 196 + if (intf->minor < 0) { 197 + up_write(&minor_rwsem); 198 198 return -EXFULL; 199 + } 199 200 200 201 /* create a usb class device for this usb interface */ 201 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); ··· 204 203 MKDEV(USB_MAJOR, minor), class_driver, 205 204 "%s", kbasename(name)); 206 205 if (IS_ERR(intf->usb_dev)) { 207 - down_write(&minor_rwsem); 208 206 usb_minors[minor] = NULL; 209 207 intf->minor = -1; 210 - up_write(&minor_rwsem); 211 208 retval = PTR_ERR(intf->usb_dev); 212 209 } 210 + up_write(&minor_rwsem); 213 211 return retval; 214 212 } 215 213 EXPORT_SYMBOL_GPL(usb_register_dev); ··· 234 234 return; 235 235 236 236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); 237 + device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 237 238 238 239 down_write(&minor_rwsem); 239 240 usb_minors[intf->minor] = NULL; 240 241 up_write(&minor_rwsem); 241 242 242 - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 243 243 intf->usb_dev = NULL; 244 244 intf->minor = -1; 245 245 destroy_usb_class();
+2 -2
drivers/usb/core/hcd.c
··· 1412 1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1413 1413 if (hcd->self.uses_pio_for_control) 1414 1414 return ret; 1415 - if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1415 + if (hcd_uses_dma(hcd)) { 1416 1416 if (is_vmalloc_addr(urb->setup_packet)) { 1417 1417 WARN_ONCE(1, "setup packet is not dma capable\n"); 1418 1418 return -EAGAIN; ··· 1446 1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1447 1447 if (urb->transfer_buffer_length != 0 1448 1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1449 - if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1449 + if (hcd_uses_dma(hcd)) { 1450 1450 if (urb->num_sgs) { 1451 1451 int n; 1452 1452
+2 -2
drivers/usb/core/message.c
··· 2218 2218 (struct usb_cdc_dmm_desc *)buffer; 2219 2219 break; 2220 2220 case USB_CDC_MDLM_TYPE: 2221 - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) 2221 + if (elength < sizeof(struct usb_cdc_mdlm_desc)) 2222 2222 goto next_desc; 2223 2223 if (desc) 2224 2224 return -EINVAL; 2225 2225 desc = (struct usb_cdc_mdlm_desc *)buffer; 2226 2226 break; 2227 2227 case USB_CDC_MDLM_DETAIL_TYPE: 2228 - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) 2228 + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) 2229 2229 goto next_desc; 2230 2230 if (detail) 2231 2231 return -EINVAL;
+1 -1
drivers/usb/dwc2/hcd.c
··· 4608 4608 4609 4609 buf = urb->transfer_buffer; 4610 4610 4611 - if (hcd->self.uses_dma) { 4611 + if (hcd_uses_dma(hcd)) { 4612 4612 if (!buf && (urb->transfer_dma & 3)) { 4613 4613 dev_err(hsotg->dev, 4614 4614 "%s: unaligned transfer with no transfer_buffer",
+1
drivers/usb/gadget/composite.c
··· 1976 1976 * disconnect callbacks? 1977 1977 */ 1978 1978 spin_lock_irqsave(&cdev->lock, flags); 1979 + cdev->suspended = 0; 1979 1980 if (cdev->config) 1980 1981 reset_config(cdev); 1981 1982 if (cdev->driver->disconnect)
+18 -10
drivers/usb/gadget/function/f_mass_storage.c
··· 261 261 struct fsg_common { 262 262 struct usb_gadget *gadget; 263 263 struct usb_composite_dev *cdev; 264 - struct fsg_dev *fsg, *new_fsg; 264 + struct fsg_dev *fsg; 265 265 wait_queue_head_t io_wait; 266 266 wait_queue_head_t fsg_wait; 267 267 ··· 290 290 unsigned int bulk_out_maxpacket; 291 291 enum fsg_state state; /* For exception handling */ 292 292 unsigned int exception_req_tag; 293 + void *exception_arg; 293 294 294 295 enum data_direction data_dir; 295 296 u32 data_size; ··· 392 391 393 392 /* These routines may be called in process context or in_irq */ 394 393 395 - static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 394 + static void __raise_exception(struct fsg_common *common, enum fsg_state new_state, 395 + void *arg) 396 396 { 397 397 unsigned long flags; 398 398 ··· 406 404 if (common->state <= new_state) { 407 405 common->exception_req_tag = common->ep0_req_tag; 408 406 common->state = new_state; 407 + common->exception_arg = arg; 409 408 if (common->thread_task) 410 409 send_sig_info(SIGUSR1, SEND_SIG_PRIV, 411 410 common->thread_task); ··· 414 411 spin_unlock_irqrestore(&common->lock, flags); 415 412 } 416 413 414 + static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 415 + { 416 + __raise_exception(common, new_state, NULL); 417 + } 417 418 418 419 /*-------------------------------------------------------------------------*/ 419 420 ··· 2292 2285 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2293 2286 { 2294 2287 struct fsg_dev *fsg = fsg_from_func(f); 2295 - fsg->common->new_fsg = fsg; 2296 - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2288 + 2289 + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg); 2297 2290 return USB_GADGET_DELAYED_STATUS; 2298 2291 } 2299 2292 2300 2293 static void fsg_disable(struct usb_function *f) 2301 2294 { 2302 2295 struct fsg_dev *fsg = fsg_from_func(f); 2303 - fsg->common->new_fsg = NULL; 2304 - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2296 + 2297 + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); 2305 2298 } 2306 2299 2307 2300 ··· 2314 2307 enum fsg_state old_state; 2315 2308 struct fsg_lun *curlun; 2316 2309 unsigned int exception_req_tag; 2310 + struct fsg_dev *new_fsg; 2317 2311 2318 2312 /* 2319 2313 * Clear the existing signals. Anything but SIGUSR1 is converted ··· 2368 2360 common->next_buffhd_to_fill = &common->buffhds[0]; 2369 2361 common->next_buffhd_to_drain = &common->buffhds[0]; 2370 2362 exception_req_tag = common->exception_req_tag; 2363 + new_fsg = common->exception_arg; 2371 2364 old_state = common->state; 2372 2365 common->state = FSG_STATE_NORMAL; 2373 2366 ··· 2422 2413 break; 2423 2414 2424 2415 case FSG_STATE_CONFIG_CHANGE: 2425 - do_set_interface(common, common->new_fsg); 2426 - if (common->new_fsg) 2416 + do_set_interface(common, new_fsg); 2417 + if (new_fsg) 2427 2418 usb_composite_setup_continue(common->cdev); 2428 2419 break; 2429 2420 ··· 2998 2989 2999 2990 DBG(fsg, "unbind\n"); 3000 2991 if (fsg->common->fsg == fsg) { 3001 - fsg->common->new_fsg = NULL; 3002 - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2992 + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); 3003 2993 /* FIXME: make interruptible or killable somehow? */ 3004 2994 wait_event(common->fsg_wait, common->fsg != fsg); 3005 2995 }
+3 -2
drivers/usb/gadget/udc/renesas_usb3.c
··· 19 19 #include <linux/pm_runtime.h> 20 20 #include <linux/sizes.h> 21 21 #include <linux/slab.h> 22 + #include <linux/string.h> 22 23 #include <linux/sys_soc.h> 23 24 #include <linux/uaccess.h> 24 25 #include <linux/usb/ch9.h> ··· 2451 2450 if (usb3->forced_b_device) 2452 2451 return -EBUSY; 2453 2452 2454 - if (!strncmp(buf, "host", strlen("host"))) 2453 + if (sysfs_streq(buf, "host")) 2455 2454 new_mode_is_host = true; 2456 - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) 2455 + else if (sysfs_streq(buf, "peripheral")) 2457 2456 new_mode_is_host = false; 2458 2457 else 2459 2458 return -EINVAL;
+4
drivers/usb/host/fotg210-hcd.c
··· 1629 1629 /* see what we found out */ 1630 1630 temp = check_reset_complete(fotg210, wIndex, status_reg, 1631 1631 fotg210_readl(fotg210, status_reg)); 1632 + 1633 + /* restart schedule */ 1634 + fotg210->command |= CMD_RUN; 1635 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); 1632 1636 } 1633 1637 1634 1638 if (!(temp & (PORT_RESUME|PORT_RESET))) {
+10
drivers/usb/serial/option.c
··· 968 968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, 969 969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, 970 970 971 + /* Motorola devices */ 972 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */ 973 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */ 974 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */ 975 + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */ 971 976 972 977 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 973 978 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, ··· 1554 1549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1555 1550 .driver_info = RSVD(2) }, 1556 1551 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ 1552 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ 1557 1553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1558 1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1559 1555 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, ··· 1958 1952 .driver_info = RSVD(4) }, 1959 1953 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1960 1954 .driver_info = RSVD(4) }, 1955 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */ 1956 + .driver_info = RSVD(4) }, 1961 1957 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1962 1958 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1963 1959 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1964 1960 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ 1961 + .driver_info = RSVD(4) }, 1962 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ 1965 1963 .driver_info = RSVD(4) }, 1966 1964 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1967 1965 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+3 -7
fs/afs/cmservice.c
··· 505 505 struct afs_call *call = container_of(work, struct afs_call, work); 506 506 struct afs_uuid *r = call->request; 507 507 508 - struct { 509 - __be32 match; 510 - } reply; 511 - 512 508 _enter(""); 513 509 514 510 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) 515 - reply.match = htonl(0); 511 + afs_send_empty_reply(call); 516 512 else 517 - reply.match = htonl(1); 513 + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 514 + 1, 1, "K-1"); 518 515 519 - afs_send_simple_reply(call, &reply, sizeof(reply)); 520 516 afs_put_call(call); 521 517 _leave(""); 522 518 }
+73 -16
fs/afs/dir.c
··· 440 440 * iterate through the data blob that lists the contents of an AFS directory 441 441 */ 442 442 static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, 443 - struct key *key) 443 + struct key *key, afs_dataversion_t *_dir_version) 444 444 { 445 445 struct afs_vnode *dvnode = AFS_FS_I(dir); 446 446 struct afs_xdr_dir_page *dbuf; ··· 460 460 req = afs_read_dir(dvnode, key); 461 461 if (IS_ERR(req)) 462 462 return PTR_ERR(req); 463 + *_dir_version = req->data_version; 463 464 464 465 /* round the file position up to the next entry boundary */ 465 466 ctx->pos += sizeof(union afs_xdr_dirent) - 1; ··· 515 514 */ 516 515 static int afs_readdir(struct file *file, struct dir_context *ctx) 517 516 { 518 - return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); 517 + afs_dataversion_t dir_version; 518 + 519 + return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), 520 + &dir_version); 519 521 } 520 522 521 523 /* ··· 559 555 * - just returns the FID the dentry name maps to if found 560 556 */ 561 557 static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, 562 - struct afs_fid *fid, struct key *key) 558 + struct afs_fid *fid, struct key *key, 559 + afs_dataversion_t *_dir_version) 563 560 { 564 561 struct afs_super_info *as = dir->i_sb->s_fs_info; 565 562 struct afs_lookup_one_cookie cookie = { ··· 573 568 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 574 569 575 570 /* search the directory */ 576 - ret = afs_dir_iterate(dir, &cookie.ctx, key); 571 + ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); 577 572 if (ret < 0) { 578 573 _leave(" = %d [iter]", ret); 579 574 return ret; ··· 647 642 struct afs_server *server; 648 643 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 649 644 struct inode *inode = NULL, *ti; 645 + afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); 650 646 int ret, i; 651 647 652 648 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); ··· 675 669 cookie->fids[i].vid = as->volume->vid; 676 670 677 671 /* search the directory */ 678 - ret = afs_dir_iterate(dir, &cookie->ctx, key); 672 + ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); 679 673 if (ret < 0) { 680 674 inode = ERR_PTR(ret); 681 675 goto out; 682 676 } 677 + 678 + dentry->d_fsdata = (void *)(unsigned long)data_version; 683 679 684 680 inode = ERR_PTR(-ENOENT); 685 681 if (!cookie->found) ··· 976 968 struct dentry *parent; 977 969 struct inode *inode; 978 970 struct key *key; 979 - long dir_version, de_version; 971 + afs_dataversion_t dir_version; 972 + long de_version; 980 973 int ret; 981 974 982 975 if (flags & LOOKUP_RCU) ··· 1023 1014 * on a 32-bit system, we only have 32 bits in the dentry to store the 1024 1015 * version. 1025 1016 */ 1026 - dir_version = (long)dir->status.data_version; 1017 + dir_version = dir->status.data_version; 1027 1018 de_version = (long)dentry->d_fsdata; 1028 - if (de_version == dir_version) 1029 - goto out_valid; 1019 + if (de_version == (long)dir_version) 1020 + goto out_valid_noupdate; 1030 1021 1031 - dir_version = (long)dir->invalid_before; 1032 - if (de_version - dir_version >= 0) 1022 + dir_version = dir->invalid_before; 1023 + if (de_version - (long)dir_version >= 0) 1033 1024 goto out_valid; 1034 1025 1035 1026 _debug("dir modified"); 1036 1027 afs_stat_v(dir, n_reval); 1037 1028 1038 1029 /* search the directory for this vnode */ 1039 - ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); 1030 + ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); 1040 1031 switch (ret) { 1041 1032 case 0: 1042 1033 /* the filename maps to something */ ··· 1089 1080 } 1090 1081 1091 1082 out_valid: 1092 - dentry->d_fsdata = (void *)dir_version; 1083 + dentry->d_fsdata = (void *)(unsigned long)dir_version; 1084 + out_valid_noupdate: 1093 1085 dput(parent); 1094 1086 key_put(key); 1095 1087 _leave(" = 1 [valid]"); ··· 1196 1186 } 1197 1187 1198 1188 /* 1189 + * Note that a dentry got changed. We need to set d_fsdata to the data version 1190 + * number derived from the result of the operation. It doesn't matter if 1191 + * d_fsdata goes backwards as we'll just revalidate. 1192 + */ 1193 + static void afs_update_dentry_version(struct afs_fs_cursor *fc, 1194 + struct dentry *dentry, 1195 + struct afs_status_cb *scb) 1196 + { 1197 + if (fc->ac.error == 0) 1198 + dentry->d_fsdata = 1199 + (void *)(unsigned long)scb->status.data_version; 1200 + } 1201 + 1202 + /* 1199 1203 * create a directory on an AFS filesystem 1200 1204 */ 1201 1205 static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ··· 1251 1227 afs_check_for_remote_deletion(&fc, dvnode); 1252 1228 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1253 1229 &data_version, &scb[0]); 1230 + afs_update_dentry_version(&fc, dentry, &scb[0]); 1254 1231 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1255 1232 ret = afs_end_vnode_operation(&fc); 1256 1233 if (ret < 0) ··· 1344 1319 1345 1320 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1346 1321 &data_version, scb); 1322 + afs_update_dentry_version(&fc, dentry, scb); 1347 1323 ret = afs_end_vnode_operation(&fc); 1348 1324 if (ret == 0) { 1349 1325 afs_dir_remove_subdir(dentry); ··· 1484 1458 &data_version, &scb[0]); 1485 1459 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1486 1460 &data_version_2, &scb[1]); 1461 + afs_update_dentry_version(&fc, dentry, &scb[0]); 1487 1462 ret = afs_end_vnode_operation(&fc); 1488 1463 if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) 1489 1464 ret = afs_dir_remove_link(dvnode, dentry, key); ··· 1553 1526 afs_check_for_remote_deletion(&fc, dvnode); 1554 1527 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1555 1528 &data_version, &scb[0]); 1529 + afs_update_dentry_version(&fc, dentry, &scb[0]); 1556 1530 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1557 1531 ret = afs_end_vnode_operation(&fc); 1558 1532 if (ret < 0) ··· 1635 1607 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1636 1608 NULL, &scb[1]); 1637 1609 ihold(&vnode->vfs_inode); 1610 + afs_update_dentry_version(&fc, dentry, &scb[0]); 1638 1611 d_instantiate(dentry, &vnode->vfs_inode); 1639 1612 1640 1613 mutex_unlock(&vnode->io_lock); ··· 1715 1686 afs_check_for_remote_deletion(&fc, dvnode); 1716 1687 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1717 1688 &data_version, &scb[0]); 1689 + afs_update_dentry_version(&fc, dentry, &scb[0]); 1718 1690 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1719 1691 ret = afs_end_vnode_operation(&fc); 1720 1692 if (ret < 0) ··· 1821 1791 } 1822 1792 } 1823 1793 1794 + /* This bit is potentially nasty as there's a potential race with 1795 + * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry 1796 + * to reflect it's new parent's new data_version after the op, but 1797 + * d_revalidate may see old_dentry between the op having taken place 1798 + * and the version being updated. 1799 + * 1800 + * So drop the old_dentry for now to make other threads go through 1801 + * lookup instead - which we hold a lock against. 1802 + */ 1803 + d_drop(old_dentry); 1804 + 1824 1805 ret = -ERESTARTSYS; 1825 1806 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { 1826 1807 afs_dataversion_t orig_data_version; ··· 1843 1802 if (orig_dvnode != new_dvnode) { 1844 1803 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { 1845 1804 afs_end_vnode_operation(&fc); 1846 - goto error_rehash; 1805 + goto error_rehash_old; 1847 1806 } 1848 - new_data_version = new_dvnode->status.data_version; 1807 + new_data_version = new_dvnode->status.data_version + 1; 1849 1808 } else { 1850 1809 new_data_version = orig_data_version; 1851 1810 new_scb = &scb[0]; ··· 1868 1827 } 1869 1828 ret = afs_end_vnode_operation(&fc); 1870 1829 if (ret < 0) 1871 - goto error_rehash; 1830 + goto error_rehash_old; 1872 1831 } 1873 1832 1874 1833 if (ret == 0) { ··· 1894 1853 drop_nlink(new_inode); 1895 1854 spin_unlock(&new_inode->i_lock); 1896 1855 } 1856 + 1857 + /* Now we can update d_fsdata on the dentries to reflect their 1858 + * new parent's data_version. 1859 + * 1860 + * Note that if we ever implement RENAME_EXCHANGE, we'll have 1861 + * to update both dentries with opposing dir versions. 1862 + */ 1863 + if (new_dvnode != orig_dvnode) { 1864 + afs_update_dentry_version(&fc, old_dentry, &scb[1]); 1865 + afs_update_dentry_version(&fc, new_dentry, &scb[1]); 1866 + } else { 1867 + afs_update_dentry_version(&fc, old_dentry, &scb[0]); 1868 + afs_update_dentry_version(&fc, new_dentry, &scb[0]); 1869 + } 1897 1870 d_move(old_dentry, new_dentry); 1898 1871 goto error_tmp; 1899 1872 } 1900 1873 1874 + error_rehash_old: 1875 + d_rehash(new_dentry); 1901 1876 error_rehash: 1902 1877 if (rehash) 1903 1878 d_rehash(rehash);
+7 -5
fs/afs/file.c
··· 191 191 int i; 192 192 193 193 if (refcount_dec_and_test(&req->usage)) { 194 - for (i = 0; i < req->nr_pages; i++) 195 - if (req->pages[i]) 196 - put_page(req->pages[i]); 197 - if (req->pages != req->array) 198 - kfree(req->pages); 194 + if (req->pages) { 195 + for (i = 0; i < req->nr_pages; i++) 196 + if (req->pages[i]) 197 + put_page(req->pages[i]); 198 + if (req->pages != req->array) 199 + kfree(req->pages); 200 + } 199 201 kfree(req); 200 202 } 201 203 }
+6 -5
fs/afs/vlclient.c
··· 56 56 struct afs_uuid__xdr *xdr; 57 57 struct afs_uuid *uuid; 58 58 int j; 59 + int n = entry->nr_servers; 59 60 60 61 tmp = ntohl(uvldb->serverFlags[i]); 61 62 if (tmp & AFS_VLSF_DONTUSE || 62 63 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 63 64 continue; 64 65 if (tmp & AFS_VLSF_RWVOL) { 65 - entry->fs_mask[i] |= AFS_VOL_VTM_RW; 66 + entry->fs_mask[n] |= AFS_VOL_VTM_RW; 66 67 if (vlflags & AFS_VLF_BACKEXISTS) 67 - entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 68 + entry->fs_mask[n] |= AFS_VOL_VTM_BAK; 68 69 } 69 70 if (tmp & AFS_VLSF_ROVOL) 70 - entry->fs_mask[i] |= AFS_VOL_VTM_RO; 71 - if (!entry->fs_mask[i]) 71 + entry->fs_mask[n] |= AFS_VOL_VTM_RO; 72 + if (!entry->fs_mask[n]) 72 73 continue; 73 74 74 75 xdr = &uvldb->serverNumber[i]; 75 - uuid = (struct afs_uuid *)&entry->fs_server[i]; 76 + uuid = (struct afs_uuid *)&entry->fs_server[n]; 76 77 uuid->time_low = xdr->time_low; 77 78 uuid->time_mid = htons(ntohl(xdr->time_mid)); 78 79 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
+5 -44
fs/block_dev.c
··· 345 345 struct bio *bio; 346 346 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; 347 347 bool is_read = (iov_iter_rw(iter) == READ), is_sync; 348 - bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; 349 348 loff_t pos = iocb->ki_pos; 350 349 blk_qc_t qc = BLK_QC_T_NONE; 351 - gfp_t gfp; 352 - int ret; 350 + int ret = 0; 353 351 354 352 if ((pos | iov_iter_alignment(iter)) & 355 353 (bdev_logical_block_size(bdev) - 1)) 356 354 return -EINVAL; 357 355 358 - if (nowait) 359 - gfp = GFP_NOWAIT; 360 - else 361 - gfp = GFP_KERNEL; 362 - 363 - bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); 364 - if (!bio) 365 - return -EAGAIN; 356 + bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); 366 357 367 358 dio = container_of(bio, struct blkdev_dio, bio); 368 359 dio->is_sync = is_sync = is_sync_kiocb(iocb); ··· 375 384 if (!is_poll) 376 385 blk_start_plug(&plug); 377 386 378 - ret = 0; 379 387 for (;;) { 380 388 bio_set_dev(bio, bdev); 381 389 bio->bi_iter.bi_sector = pos >> 9; ··· 399 409 task_io_account_write(bio->bi_iter.bi_size); 400 410 } 401 411 402 - /* 403 - * Tell underlying layer to not block for resource shortage. 404 - * And if we would have blocked, return error inline instead 405 - * of through the bio->bi_end_io() callback. 406 - */ 407 - if (nowait) 408 - bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); 409 - 412 + dio->size += bio->bi_iter.bi_size; 410 413 pos += bio->bi_iter.bi_size; 411 414 412 415 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); ··· 411 428 polled = true; 412 429 } 413 430 414 - dio->size += bio->bi_iter.bi_size; 415 431 qc = submit_bio(bio); 416 - if (qc == BLK_QC_T_EAGAIN) { 417 - dio->size -= bio->bi_iter.bi_size; 418 - ret = -EAGAIN; 419 - goto error; 420 - } 421 432 422 433 if (polled) 423 434 WRITE_ONCE(iocb->ki_cookie, qc); ··· 432 455 atomic_inc(&dio->ref); 433 456 } 434 457 435 - dio->size += bio->bi_iter.bi_size; 436 - qc = submit_bio(bio); 437 - if (qc == BLK_QC_T_EAGAIN) { 438 - dio->size -= bio->bi_iter.bi_size; 439 - ret = -EAGAIN; 440 - goto error; 441 - } 442 - 443 - bio = bio_alloc(gfp, nr_pages); 444 - if (!bio) { 445 - ret = -EAGAIN; 446 - goto error; 447 - } 458 + submit_bio(bio); 459 + bio = bio_alloc(GFP_KERNEL, nr_pages); 448 460 } 449 461 450 462 if (!is_poll) ··· 453 487 } 454 488 __set_current_state(TASK_RUNNING); 455 489 456 - out: 457 490 if (!ret) 458 491 ret = blk_status_to_errno(dio->bio.bi_status); 459 492 if (likely(!ret)) ··· 460 495 461 496 bio_put(&dio->bio); 462 497 return ret; 463 - error: 464 - if (!is_poll) 465 - blk_finish_plug(&plug); 466 - goto out; 467 498 } 468 499 469 500 static ssize_t
-4
fs/btrfs/ctree.h
··· 401 401 struct raid_kobject { 402 402 u64 flags; 403 403 struct kobject kobj; 404 - struct list_head list; 405 404 }; 406 405 407 406 /* ··· 914 915 u32 thread_pool_size; 915 916 916 917 struct kobject *space_info_kobj; 917 - struct list_head pending_raid_kobjs; 918 - spinlock_t pending_raid_kobjs_lock; /* uncontended */ 919 918 920 919 u64 total_pinned; 921 920 ··· 2695 2698 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2696 2699 u64 bytes_used, u64 type, u64 chunk_offset, 2697 2700 u64 size); 2698 - void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info); 2699 2701 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2700 2702 struct btrfs_fs_info *fs_info, 2701 2703 const u64 chunk_offset);
-2
fs/btrfs/disk-io.c
··· 2683 2683 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2684 2684 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2685 2685 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2686 - INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); 2687 - spin_lock_init(&fs_info->pending_raid_kobjs_lock); 2688 2686 spin_lock_init(&fs_info->delalloc_root_lock); 2689 2687 spin_lock_init(&fs_info->trans_lock); 2690 2688 spin_lock_init(&fs_info->fs_roots_radix_lock);
+35 -36
fs/btrfs/extent-tree.c
··· 4 4 */ 5 5 6 6 #include <linux/sched.h> 7 + #include <linux/sched/mm.h> 7 8 #include <linux/sched/signal.h> 8 9 #include <linux/pagemap.h> 9 10 #include <linux/writeback.h> ··· 7889 7888 return 0; 7890 7889 } 7891 7890 7892 - /* link_block_group will queue up kobjects to add when we're reclaim-safe */ 7893 - void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info) 7894 - { 7895 - struct btrfs_space_info *space_info; 7896 - struct raid_kobject *rkobj; 7897 - LIST_HEAD(list); 7898 - int ret = 0; 7899 - 7900 - spin_lock(&fs_info->pending_raid_kobjs_lock); 7901 - list_splice_init(&fs_info->pending_raid_kobjs, &list); 7902 - spin_unlock(&fs_info->pending_raid_kobjs_lock); 7903 - 7904 - list_for_each_entry(rkobj, &list, list) { 7905 - space_info = btrfs_find_space_info(fs_info, rkobj->flags); 7906 - 7907 - ret = kobject_add(&rkobj->kobj, &space_info->kobj, 7908 - "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); 7909 - if (ret) { 7910 - kobject_put(&rkobj->kobj); 7911 - break; 7912 - } 7913 - } 7914 - if (ret) 7915 - btrfs_warn(fs_info, 7916 - "failed to add kobject for block cache, ignoring"); 7917 - } 7918 - 7919 7891 static void link_block_group(struct btrfs_block_group_cache *cache) 7920 7892 { 7921 7893 struct btrfs_space_info *space_info = cache->space_info; ··· 7903 7929 up_write(&space_info->groups_sem); 7904 7930 7905 7931 if (first) { 7906 - struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7932 + struct raid_kobject *rkobj; 7933 + unsigned int nofs_flag; 7934 + int ret; 7935 + 7936 + /* 7937 + * Setup a NOFS context because kobject_add(), deep in its call 7938 + * chain, does GFP_KERNEL allocations, and we are often called 7939 + * in a context where if reclaim is triggered we can deadlock 7940 + * (we are either holding a transaction handle or some lock 7941 + * required for a transaction commit). 7942 + */ 7943 + nofs_flag = memalloc_nofs_save(); 7944 + rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); 7907 7945 if (!rkobj) { 7946 + memalloc_nofs_restore(nofs_flag); 7908 7947 btrfs_warn(cache->fs_info, 7909 7948 "couldn't alloc memory for raid level kobject"); 7910 7949 return; 7911 7950 } 7912 7951 rkobj->flags = cache->flags; 7913 7952 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7914 - 7915 - spin_lock(&fs_info->pending_raid_kobjs_lock); 7916 - list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7917 - spin_unlock(&fs_info->pending_raid_kobjs_lock); 7953 + ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", 7954 + btrfs_bg_type_to_raid_name(rkobj->flags)); 7955 + memalloc_nofs_restore(nofs_flag); 7956 + if (ret) { 7957 + kobject_put(&rkobj->kobj); 7958 + btrfs_warn(fs_info, 7959 + "failed to add kobject for block cache, ignoring"); 7960 + return; 7961 + } 7918 7962 space_info->block_group_kobjs[index] = &rkobj->kobj; 7919 7963 } 7920 7964 } ··· 8198 8206 inc_block_group_ro(cache, 1); 8199 8207 } 8200 8208 8201 - btrfs_add_raid_kobjects(info); 8202 8209 btrfs_init_global_block_rsv(info); 8203 8210 ret = check_chunk_block_group_mappings(info); 8204 8211 error: ··· 8966 8975 struct btrfs_device *device; 8967 8976 struct list_head *devices; 8968 8977 u64 group_trimmed; 8978 + u64 range_end = U64_MAX; 8969 8979 u64 start; 8970 8980 u64 end; 8971 8981 u64 trimmed = 0; ··· 8976 8984 int dev_ret = 0; 8977 8985 int ret = 0; 8978 8986 8987 + /* 8988 + * Check range overflow if range->len is set. 8989 + * The default range->len is U64_MAX. 8990 + */ 8991 + if (range->len != U64_MAX && 8992 + check_add_overflow(range->start, range->len, &range_end)) 8993 + return -EINVAL; 8994 + 8979 8995 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8980 8996 for (; cache; cache = next_block_group(cache)) { 8981 - if (cache->key.objectid >= (range->start + range->len)) { 8997 + if (cache->key.objectid >= range_end) { 8982 8998 btrfs_put_block_group(cache); 8983 8999 break; 8984 9000 } 8985 9001 8986 9002 start = max(range->start, cache->key.objectid); 8987 - end = min(range->start + range->len, 8988 - cache->key.objectid + cache->key.offset); 9003 + end = min(range_end, cache->key.objectid + cache->key.offset); 8989 9004 8990 9005 if (end - start >= range->minlen) { 8991 9006 if (!block_group_cache_done(cache)) {
-13
fs/btrfs/volumes.c
··· 3087 3087 if (ret) 3088 3088 return ret; 3089 3089 3090 - /* 3091 - * We add the kobjects here (and after forcing data chunk creation) 3092 - * since relocation is the only place we'll create chunks of a new 3093 - * type at runtime. The only place where we'll remove the last 3094 - * chunk of a type is the call immediately below this one. Even 3095 - * so, we're protected against races with the cleaner thread since 3096 - * we're covered by the delete_unused_bgs_mutex. 3097 - */ 3098 - btrfs_add_raid_kobjects(fs_info); 3099 - 3100 3090 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3101 3091 chunk_offset); 3102 3092 if (IS_ERR(trans)) { ··· 3213 3223 btrfs_end_transaction(trans); 3214 3224 if (ret < 0) 3215 3225 return ret; 3216 - 3217 - btrfs_add_raid_kobjects(fs_info); 3218 - 3219 3226 return 1; 3220 3227 } 3221 3228 }
+10 -10
fs/io_uring.c
··· 1097 1097 1098 1098 iter->bvec = bvec + seg_skip; 1099 1099 iter->nr_segs -= seg_skip; 1100 - iter->count -= (seg_skip << PAGE_SHIFT); 1100 + iter->count -= bvec->bv_len + offset; 1101 1101 iter->iov_offset = offset & ~PAGE_MASK; 1102 - if (iter->iov_offset) 1103 - iter->count -= iter->iov_offset; 1104 1102 } 1105 1103 } 1106 1104 ··· 2023 2025 { 2024 2026 int ret; 2025 2027 2028 + ret = io_req_defer(ctx, req, s->sqe); 2029 + if (ret) { 2030 + if (ret != -EIOCBQUEUED) { 2031 + io_free_req(req); 2032 + io_cqring_add_event(ctx, s->sqe->user_data, ret); 2033 + } 2034 + return 0; 2035 + } 2036 + 2026 2037 ret = __io_submit_sqe(ctx, req, s, true); 2027 2038 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { 2028 2039 struct io_uring_sqe *sqe_copy; ··· 2101 2094 io_free_req(req); 2102 2095 err: 2103 2096 io_cqring_add_event(ctx, s->sqe->user_data, ret); 2104 - return; 2105 - } 2106 - 2107 - ret = io_req_defer(ctx, req, s->sqe); 2108 - if (ret) { 2109 - if (ret != -EIOCBQUEUED) 2110 - goto err_req; 2111 2097 return; 2112 2098 } 2113 2099
+1 -1
fs/seq_file.c
··· 119 119 } 120 120 if (seq_has_overflowed(m)) 121 121 goto Eoverflow; 122 + p = m->op->next(m, p, &m->index); 122 123 if (pos + m->count > offset) { 123 124 m->from = offset - pos; 124 125 m->count -= m->from; ··· 127 126 } 128 127 pos += m->count; 129 128 m->count = 0; 130 - p = m->op->next(m, p, &m->index); 131 129 if (pos == offset) 132 130 break; 133 131 }
+21 -8
fs/xfs/libxfs/xfs_bmap.c
··· 3835 3835 XFS_STATS_INC(mp, xs_blk_mapr); 3836 3836 3837 3837 ifp = XFS_IFORK_PTR(ip, whichfork); 3838 + if (!ifp) { 3839 + /* No CoW fork? Return a hole. */ 3840 + if (whichfork == XFS_COW_FORK) { 3841 + mval->br_startoff = bno; 3842 + mval->br_startblock = HOLESTARTBLOCK; 3843 + mval->br_blockcount = len; 3844 + mval->br_state = XFS_EXT_NORM; 3845 + *nmap = 1; 3846 + return 0; 3847 + } 3838 3848 3839 - /* No CoW fork? Return a hole. */ 3840 - if (whichfork == XFS_COW_FORK && !ifp) { 3841 - mval->br_startoff = bno; 3842 - mval->br_startblock = HOLESTARTBLOCK; 3843 - mval->br_blockcount = len; 3844 - mval->br_state = XFS_EXT_NORM; 3845 - *nmap = 1; 3846 - return 0; 3849 + /* 3850 + * A missing attr ifork implies that the inode says we're in 3851 + * extents or btree format but failed to pass the inode fork 3852 + * verifier while trying to load it. Treat that as a file 3853 + * corruption too. 3854 + */ 3855 + #ifdef DEBUG 3856 + xfs_alert(mp, "%s: inode %llu missing fork %d", 3857 + __func__, ip->i_ino, whichfork); 3858 + #endif /* DEBUG */ 3859 + return -EFSCORRUPTED; 3847 3860 } 3848 3861 3849 3862 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+12 -7
fs/xfs/libxfs/xfs_da_btree.c
··· 487 487 ASSERT(state->path.active == 0); 488 488 oldblk = &state->path.blk[0]; 489 489 error = xfs_da3_root_split(state, oldblk, addblk); 490 - if (error) { 491 - addblk->bp = NULL; 492 - return error; /* GROT: dir is inconsistent */ 493 - } 490 + if (error) 491 + goto out; 494 492 495 493 /* 496 494 * Update pointers to the node which used to be block 0 and just got ··· 503 505 */ 504 506 node = oldblk->bp->b_addr; 505 507 if (node->hdr.info.forw) { 506 - ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); 508 + if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { 509 + error = -EFSCORRUPTED; 510 + goto out; 511 + } 507 512 node = addblk->bp->b_addr; 508 513 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 509 514 xfs_trans_log_buf(state->args->trans, addblk->bp, ··· 515 514 } 516 515 node = oldblk->bp->b_addr; 517 516 if (node->hdr.info.back) { 518 - ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); 517 + if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { 518 + error = -EFSCORRUPTED; 519 + goto out; 520 + } 519 521 node = addblk->bp->b_addr; 520 522 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 521 523 xfs_trans_log_buf(state->args->trans, addblk->bp, 522 524 XFS_DA_LOGRANGE(node, &node->hdr.info, 523 525 sizeof(node->hdr.info))); 524 526 } 527 + out: 525 528 addblk->bp = NULL; 526 - return 0; 529 + return error; 527 530 } 528 531 529 532 /*
+2 -1
fs/xfs/libxfs/xfs_dir2_node.c
··· 741 741 ents = dp->d_ops->leaf_ents_p(leaf); 742 742 743 743 xfs_dir3_leaf_check(dp, bp); 744 - ASSERT(leafhdr.count > 0); 744 + if (leafhdr.count <= 0) 745 + return -EFSCORRUPTED; 745 746 746 747 /* 747 748 * Look up the hash value in the leaf entries.
+1 -4
fs/xfs/xfs_log.c
··· 429 429 430 430 ASSERT(*ticp == NULL); 431 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 432 - KM_SLEEP | KM_MAYFAIL); 433 - if (!tic) 434 - return -ENOMEM; 435 - 432 + KM_SLEEP); 436 433 *ticp = tic; 437 434 438 435 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
+18 -3
include/asm-generic/5level-fixup.h
··· 19 19 20 20 #define p4d_alloc(mm, pgd, address) (pgd) 21 21 #define p4d_offset(pgd, start) (pgd) 22 - #define p4d_none(p4d) 0 23 - #define p4d_bad(p4d) 0 24 - #define p4d_present(p4d) 1 22 + 23 + #ifndef __ASSEMBLY__ 24 + static inline int p4d_none(p4d_t p4d) 25 + { 26 + return 0; 27 + } 28 + 29 + static inline int p4d_bad(p4d_t p4d) 30 + { 31 + return 0; 32 + } 33 + 34 + static inline int p4d_present(p4d_t p4d) 35 + { 36 + return 1; 37 + } 38 + #endif 39 + 25 40 #define p4d_ERROR(p4d) do { } while (0) 26 41 #define p4d_clear(p4d) pgd_clear(p4d) 27 42 #define p4d_val(p4d) pgd_val(p4d)
+1 -4
include/linux/blk_types.h
··· 311 311 __REQ_RAHEAD, /* read ahead, can fail anytime */ 312 312 __REQ_BACKGROUND, /* background IO */ 313 313 __REQ_NOWAIT, /* Don't wait if request will block */ 314 - __REQ_NOWAIT_INLINE, /* Return would-block error inline */ 315 314 /* 316 315 * When a shared kthread needs to issue a bio for a cgroup, doing 317 316 * so synchronously can lead to priority inversions as the kthread ··· 345 346 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 346 347 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 347 348 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 348 - #define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) 349 349 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 350 350 351 351 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) ··· 418 420 419 421 typedef unsigned int blk_qc_t; 420 422 #define BLK_QC_T_NONE -1U 421 - #define BLK_QC_T_EAGAIN -2U 422 423 #define BLK_QC_T_SHIFT 16 423 424 #define BLK_QC_T_INTERNAL (1U << 31) 424 425 425 426 static inline bool blk_qc_t_valid(blk_qc_t cookie) 426 427 { 427 - return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; 428 + return cookie != BLK_QC_T_NONE; 428 429 } 429 430 430 431 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
+9 -4
include/linux/dma-noncoherent.h
··· 42 42 dma_addr_t dma_addr, unsigned long attrs); 43 43 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 44 44 dma_addr_t dma_addr); 45 - 46 - #ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT 47 45 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 48 46 unsigned long attrs); 47 + 48 + #ifdef CONFIG_MMU 49 + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 49 50 #else 50 - # define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51 - #endif 51 + static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 52 + unsigned long attrs) 53 + { 54 + return prot; /* no protection bits supported without page tables */ 55 + } 56 + #endif /* CONFIG_MMU */ 52 57 53 58 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 54 59 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+4 -8
include/linux/gfp.h
··· 510 510 } 511 511 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 512 512 struct vm_area_struct *vma, unsigned long addr, 513 - int node, bool hugepage); 514 - #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 515 - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) 513 + int node); 516 514 #else 517 515 #define alloc_pages(gfp_mask, order) \ 518 516 alloc_pages_node(numa_node_id(), gfp_mask, order) 519 - #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 520 - alloc_pages(gfp_mask, order) 521 - #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 517 + #define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ 522 518 alloc_pages(gfp_mask, order) 523 519 #endif 524 520 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 525 521 #define alloc_page_vma(gfp_mask, vma, addr) \ 526 - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 522 + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) 527 523 #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 528 - alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 524 + alloc_pages_vma(gfp_mask, 0, vma, addr, node) 529 525 530 526 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 531 527 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+19
include/linux/memcontrol.h
··· 668 668 669 669 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 670 670 int val); 671 + void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); 671 672 672 673 static inline void mod_lruvec_state(struct lruvec *lruvec, 673 674 enum node_stat_item idx, int val) ··· 1073 1072 mod_node_page_state(page_pgdat(page), idx, val); 1074 1073 } 1075 1074 1075 + static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, 1076 + int val) 1077 + { 1078 + struct page *page = virt_to_head_page(p); 1079 + 1080 + __mod_node_page_state(page_pgdat(page), idx, val); 1081 + } 1082 + 1076 1083 static inline 1077 1084 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1078 1085 gfp_t gfp_mask, ··· 1166 1157 enum node_stat_item idx) 1167 1158 { 1168 1159 __mod_lruvec_page_state(page, idx, -1); 1160 + } 1161 + 1162 + static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) 1163 + { 1164 + __mod_lruvec_slab_state(p, idx, 1); 1165 + } 1166 + 1167 + static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) 1168 + { 1169 + __mod_lruvec_slab_state(p, idx, -1); 1169 1170 } 1170 1171 1171 1172 /* idx can be of type enum memcg_stat_item or node_stat_item */
+2
include/linux/mempolicy.h
··· 139 139 struct mempolicy *get_task_policy(struct task_struct *p); 140 140 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 141 141 unsigned long addr); 142 + struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 143 + unsigned long addr); 142 144 bool vma_policy_mof(struct vm_area_struct *vma); 143 145 144 146 extern void numa_default_policy(void);
+10 -1
include/linux/mm_types.h
··· 159 159 /** @pgmap: Points to the hosting device page map. */ 160 160 struct dev_pagemap *pgmap; 161 161 void *zone_device_data; 162 - unsigned long _zd_pad_1; /* uses mapping */ 162 + /* 163 + * ZONE_DEVICE private pages are counted as being 164 + * mapped so the next 3 words hold the mapping, index, 165 + * and private fields from the source anonymous or 166 + * page cache page while the page is migrated to device 167 + * private memory. 168 + * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also 169 + * use the mapping, index, and private fields when 170 + * pmem backed DAX files are mapped. 171 + */ 163 172 }; 164 173 165 174 /** @rcu_head: You can use this to free a page by RCU. */
+2
include/linux/pci.h
··· 1567 1567 1568 1568 #ifdef CONFIG_PCIEASPM 1569 1569 bool pcie_aspm_support_enabled(void); 1570 + bool pcie_aspm_enabled(struct pci_dev *pdev); 1570 1571 #else 1571 1572 static inline bool pcie_aspm_support_enabled(void) { return false; } 1573 + static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } 1572 1574 #endif 1573 1575 1574 1576 #ifdef CONFIG_PCIEAER
+1 -1
include/linux/usb.h
··· 1457 1457 * field rather than determining a dma address themselves. 1458 1458 * 1459 1459 * Note that transfer_buffer must still be set if the controller 1460 - * does not support DMA (as indicated by bus.uses_dma) and when talking 1460 + * does not support DMA (as indicated by hcd_uses_dma()) and when talking 1461 1461 * to root hub. If you have to trasfer between highmem zone and the device 1462 1462 * on such controller, create a bounce buffer or bail out with an error. 1463 1463 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
+3
include/linux/usb/hcd.h
··· 422 422 return hcd->high_prio_bh.completing_ep == ep; 423 423 } 424 424 425 + #define hcd_uses_dma(hcd) \ 426 + (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma) 427 + 425 428 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 426 429 extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, 427 430 int status);
+5
include/misc/charlcd.h drivers/auxdisplay/charlcd.h
··· 6 6 * Copyright (C) 2016-2017 Glider bvba 7 7 */ 8 8 9 + #ifndef _CHARLCD_H 10 + #define _CHARLCD_H 11 + 9 12 struct charlcd { 10 13 const struct charlcd_ops *ops; 11 14 const unsigned char *char_conv; /* Optional */ ··· 40 37 int charlcd_unregister(struct charlcd *lcd); 41 38 42 39 void charlcd_poke(struct charlcd *lcd); 40 + 41 + #endif /* CHARLCD_H */
+2 -1
include/uapi/rdma/siw-abi.h
··· 180 180 * to control CQ arming. 181 181 */ 182 182 struct siw_cq_ctrl { 183 - __aligned_u64 notify; 183 + __u32 flags; 184 + __u32 pad; 184 185 }; 185 186 #endif
+1 -15
kernel/configs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 1 2 /* 2 3 * kernel/configs.c 3 4 * Echo the kernel .config file used to build the kernel ··· 7 6 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> 8 7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> 9 8 * Copyright (C) 2002 Hewlett-Packard Company 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of the GNU General Public License as published by 13 - * the Free Software Foundation; either version 2 of the License, or (at 14 - * your option) any later version. 15 - * 16 - * This program is distributed in the hope that it will be useful, but 17 - * WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 19 - * NON INFRINGEMENT. See the GNU General Public License for more 20 - * details. 21 - * 22 - * You should have received a copy of the GNU General Public License 23 - * along with this program; if not, write to the Free Software 24 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 9 */ 26 10 27 11 #include <linux/kernel.h>
+5 -5
kernel/dma/direct.c
··· 47 47 { 48 48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 49 49 50 - if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) 51 - max_dma = dev->bus_dma_mask; 52 - 53 50 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 54 51 } 55 52 ··· 127 130 if (!page) 128 131 return NULL; 129 132 130 - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 133 + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 134 + !force_dma_unencrypted(dev)) { 131 135 /* remove any dirty cache lines on the kernel alias */ 132 136 if (!PageHighMem(page)) 133 137 arch_dma_prep_coherent(page, size); 138 + *dma_handle = phys_to_dma(dev, page_to_phys(page)); 134 139 /* return the page pointer as the opaque cookie */ 135 140 return page; 136 141 } ··· 177 178 { 178 179 unsigned int page_order = get_order(size); 179 180 180 - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 181 + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 182 + !force_dma_unencrypted(dev)) { 181 183 /* cpu_addr is a struct page cookie, not a kernel address */ 182 184 __dma_direct_free_pages(dev, size, cpu_addr); 183 185 return;
+18 -1
kernel/dma/mapping.c
··· 150 150 } 151 151 EXPORT_SYMBOL(dma_get_sgtable_attrs); 152 152 153 + #ifdef CONFIG_MMU 154 + /* 155 + * Return the page attributes used for mapping dma_alloc_* memory, either in 156 + * kernel space if remapping is needed, or to userspace through dma_mmap_*. 157 + */ 158 + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 159 + { 160 + if (dev_is_dma_coherent(dev) || 161 + (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 162 + (attrs & DMA_ATTR_NON_CONSISTENT))) 163 + return prot; 164 + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) 165 + return arch_dma_mmap_pgprot(dev, prot, attrs); 166 + return pgprot_noncached(prot); 167 + } 168 + #endif /* CONFIG_MMU */ 169 + 153 170 /* 154 171 * Create userspace mapping for the DMA-coherent memory. 155 172 */ ··· 181 164 unsigned long pfn; 182 165 int ret = -ENXIO; 183 166 184 - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 167 + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 185 168 186 169 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 187 170 return ret;
+1 -1
kernel/dma/remap.c
··· 218 218 219 219 /* create a coherent mapping */ 220 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 221 - arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 + dma_pgprot(dev, PAGE_KERNEL, attrs), 222 222 __builtin_return_address(0)); 223 223 if (!ret) { 224 224 __dma_direct_free_pages(dev, size, page);
+10 -4
kernel/sched/cpufreq_schedutil.c
··· 40 40 struct task_struct *thread; 41 41 bool work_in_progress; 42 42 43 + bool limits_changed; 43 44 bool need_freq_update; 44 45 }; 45 46 ··· 90 89 !cpufreq_this_cpu_can_update(sg_policy->policy)) 91 90 return false; 92 91 93 - if (unlikely(sg_policy->need_freq_update)) 92 + if (unlikely(sg_policy->limits_changed)) { 93 + sg_policy->limits_changed = false; 94 + sg_policy->need_freq_update = true; 94 95 return true; 96 + } 95 97 96 98 delta_ns = time - sg_policy->last_freq_update_time; 97 99 ··· 441 437 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 442 438 { 443 439 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 444 - sg_policy->need_freq_update = true; 440 + sg_policy->limits_changed = true; 445 441 } 446 442 447 443 static void sugov_update_single(struct update_util_data *hook, u64 time, ··· 461 457 if (!sugov_should_update_freq(sg_policy, time)) 462 458 return; 463 459 464 - busy = sugov_cpu_is_busy(sg_cpu); 460 + /* Limits may have changed, don't skip frequency update */ 461 + busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); 465 462 466 463 util = sugov_get_util(sg_cpu); 467 464 max = sg_cpu->max; ··· 836 831 sg_policy->last_freq_update_time = 0; 837 832 sg_policy->next_freq = 0; 838 833 sg_policy->work_in_progress = false; 834 + sg_policy->limits_changed = false; 839 835 sg_policy->need_freq_update = false; 840 836 sg_policy->cached_raw_freq = 0; 841 837 ··· 885 879 mutex_unlock(&sg_policy->work_lock); 886 880 } 887 881 888 - sg_policy->need_freq_update = true; 882 + sg_policy->limits_changed = true; 889 883 } 890 884 891 885 struct cpufreq_governor schedutil_gov = {
+31 -20
mm/huge_memory.c
··· 644 644 * available 645 645 * never: never stall for any thp allocation 646 646 */ 647 - static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 647 + static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) 648 648 { 649 649 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 650 + gfp_t this_node = 0; 650 651 651 - /* Always do synchronous compaction */ 652 + #ifdef CONFIG_NUMA 653 + struct mempolicy *pol; 654 + /* 655 + * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not 656 + * specified, to express a general desire to stay on the current 657 + * node for optimistic allocation attempts. If the defrag mode 658 + * and/or madvise hint requires the direct reclaim then we prefer 659 + * to fallback to other node rather than node reclaim because that 660 + * can lead to excessive reclaim even though there is free memory 661 + * on other nodes. We expect that NUMA preferences are specified 662 + * by memory policies. 663 + */ 664 + pol = get_vma_policy(vma, addr); 665 + if (pol->mode != MPOL_BIND) 666 + this_node = __GFP_THISNODE; 667 + mpol_cond_put(pol); 668 + #endif 669 + 652 670 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 653 671 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 654 - 655 - /* Kick kcompactd and fail quickly */ 656 672 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 657 - return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 658 - 659 - /* Synchronous compaction if madvised, otherwise kick kcompactd */ 673 + return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; 660 674 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 661 - return GFP_TRANSHUGE_LIGHT | 662 - (vma_madvised ? __GFP_DIRECT_RECLAIM : 663 - __GFP_KSWAPD_RECLAIM); 664 - 665 - /* Only do synchronous compaction if madvised */ 675 + return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 676 + __GFP_KSWAPD_RECLAIM | this_node); 666 677 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 667 - return GFP_TRANSHUGE_LIGHT | 668 - (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 669 - 670 - return GFP_TRANSHUGE_LIGHT; 678 + return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 679 + this_node); 680 + return GFP_TRANSHUGE_LIGHT | this_node; 671 681 } 672 682 673 683 /* Caller must hold page table lock. */ ··· 749 739 pte_free(vma->vm_mm, pgtable); 750 740 return ret; 751 741 } 752 - gfp = alloc_hugepage_direct_gfpmask(vma); 753 - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 742 + gfp = alloc_hugepage_direct_gfpmask(vma, haddr); 743 + page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); 754 744 if (unlikely(!page)) { 755 745 count_vm_event(THP_FAULT_FALLBACK); 756 746 return VM_FAULT_FALLBACK; ··· 1357 1347 alloc: 1358 1348 if (__transparent_hugepage_enabled(vma) && 1359 1349 !transparent_hugepage_debug_cow()) { 1360 - huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1361 - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1350 + huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); 1351 + new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, 1352 + haddr, numa_node_id()); 1362 1353 } else 1363 1354 new_page = NULL; 1364 1355
+19
mm/hugetlb.c
··· 3856 3856 3857 3857 page = alloc_huge_page(vma, haddr, 0); 3858 3858 if (IS_ERR(page)) { 3859 + /* 3860 + * Returning error will result in faulting task being 3861 + * sent SIGBUS. The hugetlb fault mutex prevents two 3862 + * tasks from racing to fault in the same page which 3863 + * could result in false unable to allocate errors. 3864 + * Page migration does not take the fault mutex, but 3865 + * does a clear then write of pte's under page table 3866 + * lock. Page fault code could race with migration, 3867 + * notice the clear pte and try to allocate a page 3868 + * here. Before returning error, get ptl and make 3869 + * sure there really is no pte entry. 3870 + */ 3871 + ptl = huge_pte_lock(h, mm, ptep); 3872 + if (!huge_pte_none(huge_ptep_get(ptep))) { 3873 + ret = 0; 3874 + spin_unlock(ptl); 3875 + goto out; 3876 + } 3877 + spin_unlock(ptl); 3859 3878 ret = vmf_error(PTR_ERR(page)); 3860 3879 goto out; 3861 3880 }
+1 -1
mm/kmemleak.c
··· 1966 1966 1967 1967 /* stop any memory operation tracing */ 1968 1968 kmemleak_enabled = 0; 1969 + kmemleak_early_log = 0; 1969 1970 1970 1971 /* check whether it is too early for a kernel thread */ 1971 1972 if (kmemleak_initialized) ··· 2010 2009 2011 2010 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 2012 2011 if (!kmemleak_skip_disable) { 2013 - kmemleak_early_log = 0; 2014 2012 kmemleak_disable(); 2015 2013 return; 2016 2014 }
+49 -10
mm/memcontrol.c
··· 768 768 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 769 769 } 770 770 771 + void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 772 + { 773 + struct page *page = virt_to_head_page(p); 774 + pg_data_t *pgdat = page_pgdat(page); 775 + struct mem_cgroup *memcg; 776 + struct lruvec *lruvec; 777 + 778 + rcu_read_lock(); 779 + memcg = memcg_from_slab_page(page); 780 + 781 + /* Untracked pages have no memcg, no lruvec. Update only the node */ 782 + if (!memcg || memcg == root_mem_cgroup) { 783 + __mod_node_page_state(pgdat, idx, val); 784 + } else { 785 + lruvec = mem_cgroup_lruvec(pgdat, memcg); 786 + __mod_lruvec_state(lruvec, idx, val); 787 + } 788 + rcu_read_unlock(); 789 + } 790 + 771 791 /** 772 792 * __count_memcg_events - account VM events in a cgroup 773 793 * @memcg: the memory cgroup ··· 1150 1130 css_put(&prev->css); 1151 1131 } 1152 1132 1153 - static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1133 + static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1134 + struct mem_cgroup *dead_memcg) 1154 1135 { 1155 - struct mem_cgroup *memcg = dead_memcg; 1156 1136 struct mem_cgroup_reclaim_iter *iter; 1157 1137 struct mem_cgroup_per_node *mz; 1158 1138 int nid; 1159 1139 int i; 1160 1140 1161 - for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1162 - for_each_node(nid) { 1163 - mz = mem_cgroup_nodeinfo(memcg, nid); 1164 - for (i = 0; i <= DEF_PRIORITY; i++) { 1165 - iter = &mz->iter[i]; 1166 - cmpxchg(&iter->position, 1167 - dead_memcg, NULL); 1168 - } 1141 + for_each_node(nid) { 1142 + mz = mem_cgroup_nodeinfo(from, nid); 1143 + for (i = 0; i <= DEF_PRIORITY; i++) { 1144 + iter = &mz->iter[i]; 1145 + cmpxchg(&iter->position, 1146 + dead_memcg, NULL); 1169 1147 } 1170 1148 } 1149 + } 1150 + 1151 + static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1152 + { 1153 + struct mem_cgroup *memcg = dead_memcg; 1154 + struct mem_cgroup *last; 1155 + 1156 + do { 1157 + __invalidate_reclaim_iterators(memcg, dead_memcg); 1158 + last = memcg; 1159 + } while ((memcg = parent_mem_cgroup(memcg))); 1160 + 1161 + /* 1162 + * When cgruop1 non-hierarchy mode is used, 1163 + * parent_mem_cgroup() does not walk all the way up to the 1164 + * cgroup root (root_mem_cgroup). So we have to handle 1165 + * dead_memcg from cgroup root separately. 1166 + */ 1167 + if (last != root_mem_cgroup) 1168 + __invalidate_reclaim_iterators(root_mem_cgroup, 1169 + dead_memcg); 1171 1170 } 1172 1171 1173 1172 /**
+77 -57
mm/mempolicy.c
··· 403 403 }, 404 404 }; 405 405 406 - static void migrate_page_add(struct page *page, struct list_head *pagelist, 406 + static int migrate_page_add(struct page *page, struct list_head *pagelist, 407 407 unsigned long flags); 408 408 409 409 struct queue_pages { ··· 429 429 } 430 430 431 431 /* 432 - * queue_pages_pmd() has three possible return values: 433 - * 1 - pages are placed on the right node or queued successfully. 434 - * 0 - THP was split. 435 - * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing 436 - * page was already on a node that does not follow the policy. 432 + * queue_pages_pmd() has four possible return values: 433 + * 0 - pages are placed on the right node or queued successfully. 434 + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 435 + * specified. 436 + * 2 - THP was split. 437 + * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 438 + * existing page was already on a node that does not follow the 439 + * policy. 437 440 */ 438 441 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 439 442 unsigned long end, struct mm_walk *walk) ··· 454 451 if (is_huge_zero_page(page)) { 455 452 spin_unlock(ptl); 456 453 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 454 + ret = 2; 457 455 goto out; 458 456 } 459 - if (!queue_pages_required(page, qp)) { 460 - ret = 1; 457 + if (!queue_pages_required(page, qp)) 461 458 goto unlock; 462 - } 463 459 464 - ret = 1; 465 460 flags = qp->flags; 466 461 /* go to thp migration */ 467 462 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 468 - if (!vma_migratable(walk->vma)) { 469 - ret = -EIO; 463 + if (!vma_migratable(walk->vma) || 464 + migrate_page_add(page, qp->pagelist, flags)) { 465 + ret = 1; 470 466 goto unlock; 471 467 } 472 - 473 - migrate_page_add(page, qp->pagelist, flags); 474 468 } else 475 469 ret = -EIO; 476 470 unlock: ··· 479 479 /* 480 480 * Scan through pages checking if pages follow certain conditions, 481 481 * and move them to the pagelist if they do. 482 + * 483 + * queue_pages_pte_range() has three possible return values: 484 + * 0 - pages are placed on the right node or queued successfully. 485 + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 486 + * specified. 487 + * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 488 + * on a node that does not follow the policy. 482 489 */ 483 490 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 484 491 unsigned long end, struct mm_walk *walk) ··· 495 488 struct queue_pages *qp = walk->private; 496 489 unsigned long flags = qp->flags; 497 490 int ret; 491 + bool has_unmovable = false; 498 492 pte_t *pte; 499 493 spinlock_t *ptl; 500 494 501 495 ptl = pmd_trans_huge_lock(pmd, vma); 502 496 if (ptl) { 503 497 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 504 - if (ret > 0) 505 - return 0; 506 - else if (ret < 0) 498 + if (ret != 2) 507 499 return ret; 508 500 } 501 + /* THP was split, fall through to pte walk */ 509 502 510 503 if (pmd_trans_unstable(pmd)) 511 504 return 0; ··· 526 519 if (!queue_pages_required(page, qp)) 527 520 continue; 528 521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 529 - if (!vma_migratable(vma)) 522 + /* MPOL_MF_STRICT must be specified if we get here */ 523 + if (!vma_migratable(vma)) { 524 + has_unmovable = true; 530 525 break; 531 - migrate_page_add(page, qp->pagelist, flags); 526 + } 527 + 528 + /* 529 + * Do not abort immediately since there may be 530 + * temporary off LRU pages in the range. Still 531 + * need migrate other LRU pages. 532 + */ 533 + if (migrate_page_add(page, qp->pagelist, flags)) 534 + has_unmovable = true; 532 535 } else 533 536 break; 534 537 } 535 538 pte_unmap_unlock(pte - 1, ptl); 536 539 cond_resched(); 540 + 541 + if (has_unmovable) 542 + return 1; 543 + 537 544 return addr != end ? -EIO : 0; 538 545 } 539 546 ··· 660 639 * 661 640 * If pages found in a given range are on a set of nodes (determined by 662 641 * @nodes and @flags,) it's isolated and queued to the pagelist which is 663 - * passed via @private.) 642 + * passed via @private. 643 + * 644 + * queue_pages_range() has three possible return values: 645 + * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 646 + * specified. 647 + * 0 - queue pages successfully or no misplaced page. 648 + * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified. 664 649 */ 665 650 static int 666 651 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, ··· 967 940 /* 968 941 * page migration, thp tail pages can be passed. 969 942 */ 970 - static void migrate_page_add(struct page *page, struct list_head *pagelist, 943 + static int migrate_page_add(struct page *page, struct list_head *pagelist, 971 944 unsigned long flags) 972 945 { 973 946 struct page *head = compound_head(page); ··· 980 953 mod_node_page_state(page_pgdat(head), 981 954 NR_ISOLATED_ANON + page_is_file_cache(head), 982 955 hpage_nr_pages(head)); 956 + } else if (flags & MPOL_MF_STRICT) { 957 + /* 958 + * Non-movable page may reach here. And, there may be 959 + * temporary off LRU pages or non-LRU movable pages. 960 + * Treat them as unmovable pages since they can't be 961 + * isolated, so they can't be moved at the moment. It 962 + * should return -EIO for this case too. 963 + */ 964 + return -EIO; 983 965 } 984 966 } 967 + 968 + return 0; 985 969 } 986 970 987 971 /* page allocation callback for NUMA node migration */ ··· 1180 1142 } else if (PageTransHuge(page)) { 1181 1143 struct page *thp; 1182 1144 1183 - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1184 - HPAGE_PMD_ORDER); 1145 + thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, 1146 + address, numa_node_id()); 1185 1147 if (!thp) 1186 1148 return NULL; 1187 1149 prep_transhuge_page(thp); ··· 1195 1157 } 1196 1158 #else 1197 1159 1198 - static void migrate_page_add(struct page *page, struct list_head *pagelist, 1160 + static int migrate_page_add(struct page *page, struct list_head *pagelist, 1199 1161 unsigned long flags) 1200 1162 { 1163 + return -EIO; 1201 1164 } 1202 1165 1203 1166 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ··· 1221 1182 struct mempolicy *new; 1222 1183 unsigned long end; 1223 1184 int err; 1185 + int ret; 1224 1186 LIST_HEAD(pagelist); 1225 1187 1226 1188 if (flags & ~(unsigned long)MPOL_MF_VALID) ··· 1283 1243 if (err) 1284 1244 goto mpol_out; 1285 1245 1286 - err = queue_pages_range(mm, start, end, nmask, 1246 + ret = queue_pages_range(mm, start, end, nmask, 1287 1247 flags | MPOL_MF_INVERT, &pagelist); 1288 - if (!err) 1289 - err = mbind_range(mm, start, end, new); 1248 + 1249 + if (ret < 0) { 1250 + err = -EIO; 1251 + goto up_out; 1252 + } 1253 + 1254 + err = mbind_range(mm, start, end, new); 1290 1255 1291 1256 if (!err) { 1292 1257 int nr_failed = 0; ··· 1304 1259 putback_movable_pages(&pagelist); 1305 1260 } 1306 1261 1307 - if (nr_failed && (flags & MPOL_MF_STRICT)) 1262 + if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1308 1263 err = -EIO; 1309 1264 } else 1310 1265 putback_movable_pages(&pagelist); 1311 1266 1267 + up_out: 1312 1268 up_write(&mm->mmap_sem); 1313 - mpol_out: 1269 + mpol_out: 1314 1270 mpol_put(new); 1315 1271 return err; 1316 1272 } ··· 1734 1688 * freeing by another task. It is the caller's responsibility to free the 1735 1689 * extra reference for shared policies. 1736 1690 */ 1737 - static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1691 + struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1738 1692 unsigned long addr) 1739 1693 { 1740 1694 struct mempolicy *pol = __get_vma_policy(vma, addr); ··· 2083 2037 * @vma: Pointer to VMA or NULL if not available. 2084 2038 * @addr: Virtual Address of the allocation. Must be inside the VMA. 2085 2039 * @node: Which node to prefer for allocation (modulo policy). 2086 - * @hugepage: for hugepages try only the preferred node if possible 2087 2040 * 2088 2041 * This function allocates a page from the kernel page pool and applies 2089 2042 * a NUMA policy associated with the VMA or the current process. ··· 2093 2048 */ 2094 2049 struct page * 2095 2050 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2096 - unsigned long addr, int node, bool hugepage) 2051 + unsigned long addr, int node) 2097 2052 { 2098 2053 struct mempolicy *pol; 2099 2054 struct page *page; ··· 2109 2064 mpol_cond_put(pol); 2110 2065 page = alloc_page_interleave(gfp, order, nid); 2111 2066 goto out; 2112 - } 2113 - 2114 - if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 2115 - int hpage_node = node; 2116 - 2117 - /* 2118 - * For hugepage allocation and non-interleave policy which 2119 - * allows the current node (or other explicitly preferred 2120 - * node) we only try to allocate from the current/preferred 2121 - * node and don't fall back to other nodes, as the cost of 2122 - * remote accesses would likely offset THP benefits. 2123 - * 2124 - * If the policy is interleave, or does not allow the current 2125 - * node in its nodemask, we allocate the standard way. 2126 - */ 2127 - if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) 2128 - hpage_node = pol->v.preferred_node; 2129 - 2130 - nmask = policy_nodemask(gfp, pol); 2131 - if (!nmask || node_isset(hpage_node, *nmask)) { 2132 - mpol_cond_put(pol); 2133 - page = __alloc_pages_node(hpage_node, 2134 - gfp | __GFP_THISNODE, order); 2135 - goto out; 2136 - } 2137 2067 } 2138 2068 2139 2069 nmask = policy_nodemask(gfp, pol);
+24
mm/memremap.c
··· 403 403 404 404 mem_cgroup_uncharge(page); 405 405 406 + /* 407 + * When a device_private page is freed, the page->mapping field 408 + * may still contain a (stale) mapping value. For example, the 409 + * lower bits of page->mapping may still identify the page as 410 + * an anonymous page. Ultimately, this entire field is just 411 + * stale and wrong, and it will cause errors if not cleared. 412 + * One example is: 413 + * 414 + * migrate_vma_pages() 415 + * migrate_vma_insert_page() 416 + * page_add_new_anon_rmap() 417 + * __page_set_anon_rmap() 418 + * ...checks page->mapping, via PageAnon(page) call, 419 + * and incorrectly concludes that the page is an 420 + * anonymous page. Therefore, it incorrectly, 421 + * silently fails to set up the new anon rmap. 422 + * 423 + * For other types of ZONE_DEVICE pages, migration is either 424 + * handled differently or not done at all, so there is no need 425 + * to clear page->mapping. 426 + */ 427 + if (is_device_private_page(page)) 428 + page->mapping = NULL; 429 + 406 430 page->pgmap->ops->page_free(page); 407 431 } else if (!count) 408 432 __put_page(page);
+8
mm/rmap.c
··· 1475 1475 /* 1476 1476 * No need to invalidate here it will synchronize on 1477 1477 * against the special swap migration pte. 1478 + * 1479 + * The assignment to subpage above was computed from a 1480 + * swap PTE which results in an invalid pointer. 1481 + * Since only PAGE_SIZE pages can currently be 1482 + * migrated, just set it to page. This will need to be 1483 + * changed when hugepage migrations to device private 1484 + * memory are supported. 1478 1485 */ 1486 + subpage = page; 1479 1487 goto discard; 1480 1488 } 1481 1489
+1 -1
mm/shmem.c
··· 1466 1466 1467 1467 shmem_pseudo_vma_init(&pvma, info, hindex); 1468 1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1469 - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1469 + HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); 1470 1470 shmem_pseudo_vma_destroy(&pvma); 1471 1471 if (page) 1472 1472 prep_transhuge_page(page);
+1 -1
mm/usercopy.c
··· 147 147 bool to_user) 148 148 { 149 149 /* Reject if object wraps past end of memory. */ 150 - if (ptr + n < ptr) 150 + if (ptr + (n - 1) < ptr) 151 151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 152 152 153 153 /* Reject if NULL or ZERO-allocation. */
+11 -1
mm/vmalloc.c
··· 3279 3279 goto overflow; 3280 3280 3281 3281 /* 3282 + * If required width exeeds current VA block, move 3283 + * base downwards and then recheck. 3284 + */ 3285 + if (base + end > va->va_end) { 3286 + base = pvm_determine_end_from_reverse(&va, align) - end; 3287 + term_area = area; 3288 + continue; 3289 + } 3290 + 3291 + /* 3282 3292 * If this VA does not fit, move base downwards and recheck. 3283 3293 */ 3284 - if (base + start < va->va_start || base + end > va->va_end) { 3294 + if (base + start < va->va_start) { 3285 3295 va = node_to_va(rb_prev(&va->rb_node)); 3286 3296 base = pvm_determine_end_from_reverse(&va, align) - end; 3287 3297 term_area = area;
+2 -11
mm/vmscan.c
··· 88 88 /* Can pages be swapped as part of reclaim? */ 89 89 unsigned int may_swap:1; 90 90 91 - /* e.g. boosted watermark reclaim leaves slabs alone */ 92 - unsigned int may_shrinkslab:1; 93 - 94 91 /* 95 92 * Cgroups are not reclaimed below their configured memory.low, 96 93 * unless we threaten to OOM. If any cgroups are skipped due to ··· 2711 2714 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2712 2715 node_lru_pages += lru_pages; 2713 2716 2714 - if (sc->may_shrinkslab) { 2715 - shrink_slab(sc->gfp_mask, pgdat->node_id, 2716 - memcg, sc->priority); 2717 - } 2717 + shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 2718 + sc->priority); 2718 2719 2719 2720 /* Record the group's reclaim efficiency */ 2720 2721 vmpressure(sc->gfp_mask, memcg, false, ··· 3189 3194 .may_writepage = !laptop_mode, 3190 3195 .may_unmap = 1, 3191 3196 .may_swap = 1, 3192 - .may_shrinkslab = 1, 3193 3197 }; 3194 3198 3195 3199 /* ··· 3232 3238 .may_unmap = 1, 3233 3239 .reclaim_idx = MAX_NR_ZONES - 1, 3234 3240 .may_swap = !noswap, 3235 - .may_shrinkslab = 1, 3236 3241 }; 3237 3242 unsigned long lru_pages; 3238 3243 ··· 3279 3286 .may_writepage = !laptop_mode, 3280 3287 .may_unmap = 1, 3281 3288 .may_swap = may_swap, 3282 - .may_shrinkslab = 1, 3283 3289 }; 3284 3290 3285 3291 set_task_reclaim_state(current, &sc.reclaim_state); ··· 3590 3598 */ 3591 3599 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 3592 3600 sc.may_swap = !nr_boost_reclaim; 3593 - sc.may_shrinkslab = !nr_boost_reclaim; 3594 3601 3595 3602 /* 3596 3603 * Do some background aging of the anon list, to give
+4 -6
mm/workingset.c
··· 380 380 if (node->count && node->count == node->nr_values) { 381 381 if (list_empty(&node->private_list)) { 382 382 list_lru_add(&shadow_nodes, &node->private_list); 383 - __inc_lruvec_page_state(virt_to_page(node), 384 - WORKINGSET_NODES); 383 + __inc_lruvec_slab_state(node, WORKINGSET_NODES); 385 384 } 386 385 } else { 387 386 if (!list_empty(&node->private_list)) { 388 387 list_lru_del(&shadow_nodes, &node->private_list); 389 - __dec_lruvec_page_state(virt_to_page(node), 390 - WORKINGSET_NODES); 388 + __dec_lruvec_slab_state(node, WORKINGSET_NODES); 391 389 } 392 390 } 393 391 } ··· 478 480 } 479 481 480 482 list_lru_isolate(lru, item); 481 - __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES); 483 + __dec_lruvec_slab_state(node, WORKINGSET_NODES); 482 484 483 485 spin_unlock(lru_lock); 484 486 ··· 501 503 * shadow entries we were tracking ... 502 504 */ 503 505 xas_store(&xas, NULL); 504 - __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); 506 + __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); 505 507 506 508 out_invalid: 507 509 xa_unlock_irq(&mapping->i_pages);
+12 -2
mm/z3fold.c
··· 817 817 static void z3fold_destroy_pool(struct z3fold_pool *pool) 818 818 { 819 819 kmem_cache_destroy(pool->c_handle); 820 - z3fold_unregister_migration(pool); 821 - destroy_workqueue(pool->release_wq); 820 + 821 + /* 822 + * We need to destroy pool->compact_wq before pool->release_wq, 823 + * as any pending work on pool->compact_wq will call 824 + * queue_work(pool->release_wq, &pool->work). 825 + * 826 + * There are still outstanding pages until both workqueues are drained, 827 + * so we cannot unregister migration until then. 828 + */ 829 + 822 830 destroy_workqueue(pool->compact_wq); 831 + destroy_workqueue(pool->release_wq); 832 + z3fold_unregister_migration(pool); 823 833 kfree(pool); 824 834 } 825 835
+1 -1
samples/auxdisplay/cfag12864b-example.c
··· 245 245 246 246 if (argc != 2) { 247 247 printf( 248 - "Sintax: %s fbdev\n" 248 + "Syntax: %s fbdev\n" 249 249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]); 250 250 return -1; 251 251 }
+1
scripts/coccinelle/api/atomic_as_refcounter.cocci
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 1 2 // Check if refcount_t type and API should be used 2 3 // instead of atomic_t type when dealing with refcounters 3 4 //
-13
security/keys/trusted.c
··· 1228 1228 1229 1229 static int __init init_digests(void) 1230 1230 { 1231 - u8 digest[TPM_MAX_DIGEST_SIZE]; 1232 - int ret; 1233 - int i; 1234 - 1235 - ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE); 1236 - if (ret < 0) 1237 - return ret; 1238 - if (ret < TPM_MAX_DIGEST_SIZE) 1239 - return -EFAULT; 1240 - 1241 1231 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), 1242 1232 GFP_KERNEL); 1243 1233 if (!digests) 1244 1234 return -ENOMEM; 1245 - 1246 - for (i = 0; i < chip->nr_allocated_banks; i++) 1247 - memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE); 1248 1235 1249 1236 return 0; 1250 1237 }
+20 -1
sound/pci/hda/hda_generic.c
··· 6051 6051 } 6052 6052 EXPORT_SYMBOL_GPL(snd_hda_gen_free); 6053 6053 6054 + /** 6055 + * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting 6056 + * @codec: the HDA codec 6057 + * 6058 + * This can be put as patch_ops reboot_notify function. 6059 + */ 6060 + void snd_hda_gen_reboot_notify(struct hda_codec *codec) 6061 + { 6062 + /* Make the codec enter D3 to avoid spurious noises from the internal 6063 + * speaker during (and after) reboot 6064 + */ 6065 + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); 6066 + snd_hda_codec_write(codec, codec->core.afg, 0, 6067 + AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 6068 + msleep(10); 6069 + } 6070 + EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify); 6071 + 6054 6072 #ifdef CONFIG_PM 6055 6073 /** 6056 6074 * snd_hda_gen_check_power_status - check the loopback power save state ··· 6096 6078 .init = snd_hda_gen_init, 6097 6079 .free = snd_hda_gen_free, 6098 6080 .unsol_event = snd_hda_jack_unsol_event, 6081 + .reboot_notify = snd_hda_gen_reboot_notify, 6099 6082 #ifdef CONFIG_PM 6100 6083 .check_power_status = snd_hda_gen_check_power_status, 6101 6084 #endif ··· 6119 6100 6120 6101 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); 6121 6102 if (err < 0) 6122 - return err; 6103 + goto error; 6123 6104 6124 6105 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); 6125 6106 if (err < 0)
+1
sound/pci/hda/hda_generic.h
··· 332 332 struct auto_pin_cfg *cfg); 333 333 int snd_hda_gen_build_controls(struct hda_codec *codec); 334 334 int snd_hda_gen_build_pcms(struct hda_codec *codec); 335 + void snd_hda_gen_reboot_notify(struct hda_codec *codec); 335 336 336 337 /* standard jack event callbacks */ 337 338 void snd_hda_gen_hp_automute(struct hda_codec *codec,
+3
sound/pci/hda/hda_intel.c
··· 2508 2508 /* AMD, X370 & co */ 2509 2509 { PCI_DEVICE(0x1022, 0x1457), 2510 2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, 2511 + /* AMD, X570 & co */ 2512 + { PCI_DEVICE(0x1022, 0x1487), 2513 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, 2511 2514 /* AMD Stoney */ 2512 2515 { PCI_DEVICE(0x1022, 0x157a), 2513 2516 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+1 -14
sound/pci/hda/patch_conexant.c
··· 163 163 { 164 164 struct conexant_spec *spec = codec->spec; 165 165 166 - switch (codec->core.vendor_id) { 167 - case 0x14f12008: /* CX8200 */ 168 - case 0x14f150f2: /* CX20722 */ 169 - case 0x14f150f4: /* CX20724 */ 170 - break; 171 - default: 172 - return; 173 - } 174 - 175 166 /* Turn the problematic codec into D3 to avoid spurious noises 176 167 from the internal speaker during (and after) reboot */ 177 168 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); 178 - 179 - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); 180 - snd_hda_codec_write(codec, codec->core.afg, 0, 181 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 182 - msleep(10); 169 + snd_hda_gen_reboot_notify(codec); 183 170 } 184 171 185 172 static void cx_auto_free(struct hda_codec *codec)
+2 -10
sound/pci/hda/patch_realtek.c
··· 869 869 alc_shutup(codec); 870 870 } 871 871 872 - /* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */ 873 - static void alc_d3_at_reboot(struct hda_codec *codec) 874 - { 875 - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); 876 - snd_hda_codec_write(codec, codec->core.afg, 0, 877 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 878 - msleep(10); 879 - } 880 - 881 872 #define alc_free snd_hda_gen_free 882 873 883 874 #ifdef CONFIG_PM ··· 5143 5152 struct alc_spec *spec = codec->spec; 5144 5153 5145 5154 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 5146 - spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 5155 + spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */ 5147 5156 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 5148 5157 codec->power_save_node = 0; /* avoid click noises */ 5149 5158 snd_hda_apply_pincfgs(codec, pincfgs); ··· 6978 6987 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6979 6988 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6980 6989 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6990 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6981 6991 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6982 6992 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6983 6993 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+29 -8
sound/usb/mixer.c
··· 68 68 unsigned char *buffer; 69 69 unsigned int buflen; 70 70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); 71 + DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS); 71 72 struct usb_audio_term oterm; 72 73 const struct usbmix_name_map *map; 73 74 const struct usbmix_selector_map *selector_map; ··· 745 744 return -EINVAL; 746 745 if (!desc->bNrInPins) 747 746 return -EINVAL; 747 + if (desc->bLength < sizeof(*desc) + desc->bNrInPins) 748 + return -EINVAL; 748 749 749 750 switch (state->mixer->protocol) { 750 751 case UAC_VERSION_1: ··· 776 773 * parse the source unit recursively until it reaches to a terminal 777 774 * or a branched unit. 778 775 */ 779 - static int check_input_term(struct mixer_build *state, int id, 776 + static int __check_input_term(struct mixer_build *state, int id, 780 777 struct usb_audio_term *term) 781 778 { 782 779 int protocol = state->mixer->protocol; 783 780 int err; 784 781 void *p1; 782 + unsigned char *hdr; 785 783 786 784 memset(term, 0, sizeof(*term)); 787 - while ((p1 = find_audio_control_unit(state, id)) != NULL) { 788 - unsigned char *hdr = p1; 785 + for (;;) { 786 + /* a loop in the terminal chain? */ 787 + if (test_and_set_bit(id, state->termbitmap)) 788 + return -EINVAL; 789 + 790 + p1 = find_audio_control_unit(state, id); 791 + if (!p1) 792 + break; 793 + 794 + hdr = p1; 789 795 term->id = id; 790 796 791 797 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { ··· 812 800 813 801 /* call recursively to verify that the 814 802 * referenced clock entity is valid */ 815 - err = check_input_term(state, d->bCSourceID, term); 803 + err = __check_input_term(state, d->bCSourceID, term); 816 804 if (err < 0) 817 805 return err; 818 806 ··· 846 834 case UAC2_CLOCK_SELECTOR: { 847 835 struct uac_selector_unit_descriptor *d = p1; 848 836 /* call recursively to retrieve the channel info */ 849 - err = check_input_term(state, d->baSourceID[0], term); 837 + err = __check_input_term(state, d->baSourceID[0], term); 850 838 if (err < 0) 851 839 return err; 852 840 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ ··· 909 897 910 898 /* call recursively to verify that the 911 899 * referenced clock entity is valid */ 912 - err = check_input_term(state, d->bCSourceID, term); 900 + err = __check_input_term(state, d->bCSourceID, term); 913 901 if (err < 0) 914 902 return err; 915 903 ··· 960 948 case UAC3_CLOCK_SELECTOR: { 961 949 struct uac_selector_unit_descriptor *d = p1; 962 950 /* call recursively to retrieve the channel info */ 963 - err = check_input_term(state, d->baSourceID[0], term); 951 + err = __check_input_term(state, d->baSourceID[0], term); 964 952 if (err < 0) 965 953 return err; 966 954 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ ··· 976 964 return -EINVAL; 977 965 978 966 /* call recursively to retrieve the channel info */ 979 - err = check_input_term(state, d->baSourceID[0], term); 967 + err = __check_input_term(state, d->baSourceID[0], term); 980 968 if (err < 0) 981 969 return err; 982 970 ··· 992 980 } 993 981 } 994 982 return -ENODEV; 983 + } 984 + 985 + 986 + static int check_input_term(struct mixer_build *state, int id, 987 + struct usb_audio_term *term) 988 + { 989 + memset(term, 0, sizeof(*term)); 990 + memset(state->termbitmap, 0, sizeof(state->termbitmap)); 991 + return __check_input_term(state, id, term); 995 992 } 996 993 997 994 /*
+1 -1
tools/hv/hv_get_dhcp_info.sh
··· 13 13 # the script prints the string "Disabled" to stdout. 14 14 # 15 15 # Each Distro is expected to implement this script in a distro specific 16 - # fashion. For instance on Distros that ship with Network Manager enabled, 16 + # fashion. For instance, on Distros that ship with Network Manager enabled, 17 17 # this script can be based on the Network Manager APIs for retrieving DHCP 18 18 # information. 19 19
+5 -3
tools/hv/hv_kvp_daemon.c
··· 700 700 701 701 702 702 /* 703 - * Gather the DNS state. 703 + * Gather the DNS state. 704 704 * Since there is no standard way to get this information 705 705 * across various distributions of interest; we just invoke 706 706 * an external script that needs to be ported across distros ··· 1051 1051 char *start; 1052 1052 1053 1053 /* 1054 - * in_buf has sequence of characters that are seperated by 1054 + * in_buf has sequence of characters that are separated by 1055 1055 * the character ';'. The last sequence does not have the 1056 1056 * terminating ";" character. 1057 1057 */ ··· 1386 1386 daemonize = 0; 1387 1387 break; 1388 1388 case 'h': 1389 + print_usage(argv); 1390 + exit(0); 1389 1391 default: 1390 1392 print_usage(argv); 1391 1393 exit(EXIT_FAILURE); ··· 1492 1490 case KVP_OP_GET_IP_INFO: 1493 1491 kvp_ip_val = &hv_msg->body.kvp_ip_val; 1494 1492 1495 - error = kvp_mac_to_ip(kvp_ip_val); 1493 + error = kvp_mac_to_ip(kvp_ip_val); 1496 1494 1497 1495 if (error) 1498 1496 hv_msg->error = error;
+1 -1
tools/hv/hv_set_ifconfig.sh
··· 12 12 # be used to configure the interface. 13 13 # 14 14 # Each Distro is expected to implement this script in a distro specific 15 - # fashion. For instance on Distros that ship with Network Manager enabled, 15 + # fashion. For instance, on Distros that ship with Network Manager enabled, 16 16 # this script can be based on the Network Manager APIs for configuring the 17 17 # interface. 18 18 #
+3 -1
tools/hv/hv_vss_daemon.c
··· 42 42 * If a partition is mounted more than once, only the first 43 43 * FREEZE/THAW can succeed and the later ones will get 44 44 * EBUSY/EINVAL respectively: there could be 2 cases: 45 - * 1) a user may mount the same partition to differnt directories 45 + * 1) a user may mount the same partition to different directories 46 46 * by mistake or on purpose; 47 47 * 2) The subvolume of btrfs appears to have the same partition 48 48 * mounted more than once. ··· 218 218 daemonize = 0; 219 219 break; 220 220 case 'h': 221 + print_usage(argv); 222 + exit(0); 221 223 default: 222 224 print_usage(argv); 223 225 exit(EXIT_FAILURE);
+42 -33
tools/hv/lsvmbus
··· 4 4 import os 5 5 from optparse import OptionParser 6 6 7 + help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages" 7 8 parser = OptionParser() 8 - parser.add_option("-v", "--verbose", dest="verbose", 9 - help="print verbose messages. Try -vv, -vvv for \ 10 - more verbose messages", action="count") 9 + parser.add_option( 10 + "-v", "--verbose", dest="verbose", help=help_msg, action="count") 11 11 12 12 (options, args) = parser.parse_args() 13 13 ··· 21 21 exit(-1) 22 22 23 23 vmbus_dev_dict = { 24 - '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', 25 - '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', 26 - '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', 27 - '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', 28 - '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', 29 - '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', 30 - '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', 31 - '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', 32 - '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', 33 - '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', 34 - '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', 35 - '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', 36 - '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', 37 - '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', 38 - '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', 39 - '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', 40 - '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', 41 - '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', 42 - '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', 24 + '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]', 25 + '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]', 26 + '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]', 27 + '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]', 28 + '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]', 29 + '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]', 30 + '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]', 31 + '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse', 32 + '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard', 33 + '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter', 34 + '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter', 35 + '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller', 36 + '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller', 37 + '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter', 38 + '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter', 39 + '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through', 40 + '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]', 41 + '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]', 42 + '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]', 43 43 } 44 + 44 45 45 46 def get_vmbus_dev_attr(dev_name, attr): 46 47 try: ··· 52 51 lines = [] 53 52 54 53 return lines 54 + 55 55 56 56 class VMBus_Dev: 57 57 pass ··· 68 66 69 67 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') 70 68 chn_vp_mapping = [c.strip() for c in chn_vp_mapping] 71 - chn_vp_mapping = sorted(chn_vp_mapping, 72 - key = lambda c : int(c.split(':')[0])) 69 + chn_vp_mapping = sorted( 70 + chn_vp_mapping, key=lambda c: int(c.split(':')[0])) 73 71 74 - chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % 75 - (c.split(':')[0], c.split(':')[1]) 76 - for c in chn_vp_mapping] 72 + chn_vp_mapping = [ 73 + '\tRel_ID=%s, target_cpu=%s' % 74 + (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping 75 + ] 77 76 d = VMBus_Dev() 78 77 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) 79 78 d.vmbus_id = vmbus_id ··· 88 85 vmbus_dev_list.append(d) 89 86 90 87 91 - vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) 88 + vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id)) 92 89 93 90 format0 = '%2s: %s' 94 91 format1 = '%2s: Class_ID = %s - %s\n%s' ··· 98 95 if verbose == 0: 99 96 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)) 100 97 elif verbose == 1: 101 - print (('VMBUS ID ' + format1) % \ 102 - (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)) 98 + print( 99 + ('VMBUS ID ' + format1) % 100 + (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping) 101 + ) 103 102 else: 104 - print (('VMBUS ID ' + format2) % \ 105 - (d.vmbus_id, d.class_id, d.dev_desc, \ 106 - d.device_id, d.sysfs_path, d.chn_vp_mapping)) 103 + print( 104 + ('VMBUS ID ' + format2) % 105 + ( 106 + d.vmbus_id, d.class_id, d.dev_desc, 107 + d.device_id, d.sysfs_path, d.chn_vp_mapping 108 + ) 109 + )