···35233523 ramdisk_size= [RAM] Sizes of RAM disks in kilobytes35243524 See Documentation/blockdev/ramdisk.txt.3525352535263526+ random.trust_cpu={on,off}35273527+ [KNL] Enable or disable trusting the use of the35283528+ CPU's random number generator (if available) to35293529+ fully seed the kernel's CRNG. Default is controlled35303530+ by CONFIG_RANDOM_TRUST_CPU.35313531+35263532 ras=option[,option,...] [KNL] RAS-specific options3527353335283534 cec_disable [X86]
···33Required properties:44- compatible :55 - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc66- - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc76- reg : address and length of the lpi2c master registers87- interrupts : lpi2c interrupt98- clocks : lpi2c clock specifier···1011Examples:11121213lpi2c7: lpi2c7@40a50000 {1313- compatible = "fsl,imx8dv-lpi2c";1414+ compatible = "fsl,imx7ulp-lpi2c";1415 reg = <0x40A50000 0x10000>;1516 interrupt-parent = <&intc>;1617 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+1-1
Documentation/process/changes.rst
···86868787The build system, as of 4.18, requires pkg-config to check for installed8888kconfig tools and to determine flags settings for use in8989-'make {menu,n,g,x}config'. Previously pkg-config was being used but not8989+'make {g,x}config'. Previously pkg-config was being used but not9090verified or documented.91919292Flex
+5
Documentation/scsi/scsi-parameters.txt
···9797 allowing boot to proceed. none ignores them, expecting9898 user space to do the scan.9999100100+ scsi_mod.use_blk_mq=101101+ [SCSI] use blk-mq I/O path by default102102+ See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.103103+ Format: <y/n>104104+100105 sim710= [SCSI,HW]101106 See header of drivers/scsi/sim710.c.102107
···9494 };95959696 /*9797+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do9898+ * it via overlay because peripherals defined in axs10x_mb.dtsi are9999+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so100100+ * only AXS103 board has HW-coherent DMA peripherals)101101+ * We don't need to mark pgu@17000 as dma-coherent because it uses102102+ * external DMA buffer located outside of IOC aperture.103103+ */104104+ axs10x_mb {105105+ ethernet@0x18000 {106106+ dma-coherent;107107+ };108108+109109+ ehci@0x40000 {110110+ dma-coherent;111111+ };112112+113113+ ohci@0x60000 {114114+ dma-coherent;115115+ };116116+117117+ mmc@0x15000 {118118+ dma-coherent;119119+ };120120+ };121121+122122+ /*97123 * The DW APB ICTL intc on MB is connected to CPU intc via a98124 * DT "invisible" DW APB GPIO block, configured to simply pass thru99125 * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+26
arch/arc/boot/dts/axc003_idu.dtsi
···101101 };102102103103 /*104104+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do105105+ * it via overlay because peripherals defined in axs10x_mb.dtsi are106106+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so107107+ * only AXS103 board has HW-coherent DMA peripherals)108108+ * We don't need to mark pgu@17000 as dma-coherent because it uses109109+ * external DMA buffer located outside of IOC aperture.110110+ */111111+ axs10x_mb {112112+ ethernet@0x18000 {113113+ dma-coherent;114114+ };115115+116116+ ehci@0x40000 {117117+ dma-coherent;118118+ };119119+120120+ ohci@0x60000 {121121+ dma-coherent;122122+ };123123+124124+ mmc@0x15000 {125125+ dma-coherent;126126+ };127127+ };128128+129129+ /*104130 * This INTC is actually connected to DW APB GPIO105131 * which acts as a wire between MB INTC and CPU INTC.106132 * GPIO INTC is configured in platform init code
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"22-# CONFIG_SWAP is not set31CONFIG_SYSVIPC=y42CONFIG_POSIX_MQUEUE=y53# CONFIG_CROSS_MEMORY_ATTACH is not set···6163CONFIG_MOUSE_SERIAL=y6264CONFIG_MOUSE_SYNAPTICS_USB=y6365# CONFIG_LEGACY_PTYS is not set6464-# CONFIG_DEVKMEM is not set6566CONFIG_SERIAL_8250=y6667CONFIG_SERIAL_8250_CONSOLE=y6768CONFIG_SERIAL_8250_DW=y
-3
arch/arc/configs/axs103_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"22-# CONFIG_SWAP is not set31CONFIG_SYSVIPC=y42CONFIG_POSIX_MQUEUE=y53# CONFIG_CROSS_MEMORY_ATTACH is not set···6264CONFIG_MOUSE_SERIAL=y6365CONFIG_MOUSE_SYNAPTICS_USB=y6466# CONFIG_LEGACY_PTYS is not set6565-# CONFIG_DEVKMEM is not set6667CONFIG_SERIAL_8250=y6768CONFIG_SERIAL_8250_CONSOLE=y6869CONFIG_SERIAL_8250_DW=y
-3
arch/arc/configs/axs103_smp_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"22-# CONFIG_SWAP is not set31CONFIG_SYSVIPC=y42CONFIG_POSIX_MQUEUE=y53# CONFIG_CROSS_MEMORY_ATTACH is not set···6365CONFIG_MOUSE_SERIAL=y6466CONFIG_MOUSE_SYNAPTICS_USB=y6567# CONFIG_LEGACY_PTYS is not set6666-# CONFIG_DEVKMEM is not set6768CONFIG_SERIAL_8250=y6869CONFIG_SERIAL_8250_CONSOLE=y6970CONFIG_SERIAL_8250_DW=y
-2
arch/arc/configs/haps_hs_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"21# CONFIG_SWAP is not set32CONFIG_SYSVIPC=y43CONFIG_POSIX_MQUEUE=y···5657# CONFIG_SERIO_SERPORT is not set5758CONFIG_SERIO_ARC_PS2=y5859# CONFIG_LEGACY_PTYS is not set5959-# CONFIG_DEVKMEM is not set6060CONFIG_SERIAL_8250=y6161CONFIG_SERIAL_8250_CONSOLE=y6262CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/haps_hs_smp_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"21# CONFIG_SWAP is not set32CONFIG_SYSVIPC=y43CONFIG_POSIX_MQUEUE=y···5960# CONFIG_SERIO_SERPORT is not set6061CONFIG_SERIO_ARC_PS2=y6162# CONFIG_LEGACY_PTYS is not set6262-# CONFIG_DEVKMEM is not set6363CONFIG_SERIAL_8250=y6464CONFIG_SERIAL_8250_CONSOLE=y6565CONFIG_SERIAL_8250_NR_UARTS=1
-1
arch/arc/configs/hsdk_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"21CONFIG_SYSVIPC=y32# CONFIG_CROSS_MEMORY_ATTACH is not set43CONFIG_NO_HZ_IDLE=y
-1
arch/arc/configs/nps_defconfig
···5959# CONFIG_INPUT_MOUSE is not set6060# CONFIG_SERIO is not set6161# CONFIG_LEGACY_PTYS is not set6262-# CONFIG_DEVKMEM is not set6362CONFIG_SERIAL_8250=y6463CONFIG_SERIAL_8250_CONSOLE=y6564CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsim_700_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_SWAP is not set43CONFIG_SYSVIPC=y54CONFIG_POSIX_MQUEUE=y···4344# CONFIG_INPUT_MOUSE is not set4445# CONFIG_SERIO is not set4546# CONFIG_LEGACY_PTYS is not set4646-# CONFIG_DEVKMEM is not set4747CONFIG_SERIAL_ARC=y4848CONFIG_SERIAL_ARC_CONSOLE=y4949# CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsim_hs_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_SWAP is not set43CONFIG_SYSVIPC=y54CONFIG_POSIX_MQUEUE=y···4445# CONFIG_INPUT_MOUSE is not set4546# CONFIG_SERIO is not set4647# CONFIG_LEGACY_PTYS is not set4747-# CONFIG_DEVKMEM is not set4848CONFIG_SERIAL_ARC=y4949CONFIG_SERIAL_ARC_CONSOLE=y5050# CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsim_hs_smp_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_SWAP is not set43# CONFIG_CROSS_MEMORY_ATTACH is not set54CONFIG_HIGH_RES_TIMERS=y···4344# CONFIG_INPUT_MOUSE is not set4445# CONFIG_SERIO is not set4546# CONFIG_LEGACY_PTYS is not set4646-# CONFIG_DEVKMEM is not set4747CONFIG_SERIAL_ARC=y4848CONFIG_SERIAL_ARC_CONSOLE=y4949# CONFIG_HW_RANDOM is not set
-2
arch/arc/configs/nsimosci_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_SWAP is not set43CONFIG_SYSVIPC=y54# CONFIG_CROSS_MEMORY_ATTACH is not set···4748# CONFIG_SERIO_SERPORT is not set4849CONFIG_SERIO_ARC_PS2=y4950# CONFIG_LEGACY_PTYS is not set5050-# CONFIG_DEVKMEM is not set5151CONFIG_SERIAL_8250=y5252CONFIG_SERIAL_8250_CONSOLE=y5353CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsimosci_hs_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_SWAP is not set43CONFIG_SYSVIPC=y54# CONFIG_CROSS_MEMORY_ATTACH is not set···4647# CONFIG_SERIO_SERPORT is not set4748CONFIG_SERIO_ARC_PS2=y4849# CONFIG_LEGACY_PTYS is not set4949-# CONFIG_DEVKMEM is not set5050CONFIG_SERIAL_8250=y5151CONFIG_SERIAL_8250_CONSOLE=y5252CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/nsimosci_hs_smp_defconfig
···11-CONFIG_DEFAULT_HOSTNAME="ARCLinux"21# CONFIG_SWAP is not set32CONFIG_SYSVIPC=y43# CONFIG_CROSS_MEMORY_ATTACH is not set···5758# CONFIG_SERIO_SERPORT is not set5859CONFIG_SERIO_ARC_PS2=y5960# CONFIG_LEGACY_PTYS is not set6060-# CONFIG_DEVKMEM is not set6161CONFIG_SERIAL_8250=y6262CONFIG_SERIAL_8250_CONSOLE=y6363CONFIG_SERIAL_8250_NR_UARTS=1
-1
arch/arc/configs/tb10x_defconfig
···5757# CONFIG_SERIO is not set5858# CONFIG_VT is not set5959# CONFIG_LEGACY_PTYS is not set6060-# CONFIG_DEVKMEM is not set6160CONFIG_SERIAL_8250=y6261CONFIG_SERIAL_8250_CONSOLE=y6362CONFIG_SERIAL_8250_NR_UARTS=1
-2
arch/arc/configs/vdk_hs38_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_CROSS_MEMORY_ATTACH is not set43CONFIG_HIGH_RES_TIMERS=y54CONFIG_IKCONFIG=y···5253CONFIG_MOUSE_PS2_TOUCHKIT=y5354CONFIG_SERIO_ARC_PS2=y5455# CONFIG_LEGACY_PTYS is not set5555-# CONFIG_DEVKMEM is not set5656CONFIG_SERIAL_8250=y5757CONFIG_SERIAL_8250_CONSOLE=y5858CONFIG_SERIAL_8250_DW=y
-1
arch/arc/configs/vdk_hs38_smp_defconfig
···11# CONFIG_LOCALVERSION_AUTO is not set22-CONFIG_DEFAULT_HOSTNAME="ARCLinux"32# CONFIG_CROSS_MEMORY_ATTACH is not set43CONFIG_HIGH_RES_TIMERS=y54CONFIG_IKCONFIG=y
···8383static void show_faulting_vma(unsigned long address, char *buf)8484{8585 struct vm_area_struct *vma;8686- struct inode *inode;8787- unsigned long ino = 0;8888- dev_t dev = 0;8986 char *nm = buf;9087 struct mm_struct *active_mm = current->active_mm;9188···9699 * if the container VMA is not found97100 */98101 if (vma && (vma->vm_start <= address)) {9999- struct file *file = vma->vm_file;100100- if (file) {101101- nm = file_path(file, buf, PAGE_SIZE - 1);102102- inode = file_inode(vma->vm_file);103103- dev = inode->i_sb->s_dev;104104- ino = inode->i_ino;102102+ if (vma->vm_file) {103103+ nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);104104+ if (IS_ERR(nm))105105+ nm = "?";105106 }106107 pr_info(" @off 0x%lx in [%s]\n"107108 " VMA: 0x%08lx to 0x%08lx\n",
+21-15
arch/arc/mm/cache.c
···65656666 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",6767 perip_base,6868- IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));6868+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));69697070 return buf;7171}···897897}898898899899/*900900- * DMA ops for systems with IOC901901- * IOC hardware snoops all DMA traffic keeping the caches consistent with902902- * memory - eliding need for any explicit cache maintenance of DMA buffers903903- */904904-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}905905-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}906906-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}907907-908908-/*909900 * Exported DMA API910901 */911902void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)···11441153{11451154 unsigned int ioc_base, mem_sz;1146115511561156+ /*11571157+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled11581158+ * simultaneously. This happens because as of today IOC aperture covers11591159+ * only ZONE_NORMAL (low mem) and any dma transactions outside this11601160+ * region won't be HW coherent.11611161+ * If we want to use both IOC and ZONE_HIGHMEM we can use11621162+ * bounce_buffer to handle dma transactions to HIGHMEM.11631163+ * Also it is possible to modify dma_direct cache ops or increase IOC11641164+ * aperture size if we are planning to use HIGHMEM without PAE.11651165+ */11661166+ if (IS_ENABLED(CONFIG_HIGHMEM))11671167+ panic("IOC and HIGHMEM can't be used simultaneously");11681168+11471169 /* Flush + invalidate + disable L1 dcache */11481170 __dc_disable();11491171···12681264 if (is_isa_arcv2() && ioc_enable)12691265 arc_ioc_setup();1270126612711271- if (is_isa_arcv2() && ioc_enable) {12721272- __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;12731273- __dma_cache_inv = __dma_cache_inv_ioc;12741274- __dma_cache_wback = __dma_cache_wback_ioc;12751275- } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {12671267+ if (is_isa_arcv2() && l2_line_sz && slc_enable) {12761268 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;12771269 __dma_cache_inv = __dma_cache_inv_slc;12781270 __dma_cache_wback = __dma_cache_wback_slc;···12771277 __dma_cache_inv = __dma_cache_inv_l1;12781278 __dma_cache_wback = __dma_cache_wback_l1;12791279 }12801280+ /*12811281+ * In case of IOC (say IOC+SLC case), pointers above could still be set12821282+ * but end up not being relevant as the first function in chain is not12831283+ * called at all for @dma_direct_ops12841284+ * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()12851285+ */12801286}1281128712821288void __ref arc_cache_init(void)
+41-41
arch/arc/mm/dma.c
···66 * published by the Free Software Foundation.77 */8899-/*1010- * DMA Coherent API Notes1111- *1212- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is1313- * implemented by accessing it using a kernel virtual address, with1414- * Cache bit off in the TLB entry.1515- *1616- * The default DMA address == Phy address which is 0x8000_0000 based.1717- */1818-199#include <linux/dma-noncoherent.h>2010#include <asm/cache.h>2111#include <asm/cacheflush.h>22121313+/*1414+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)1515+ * - hardware IOC not available (or "dma-coherent" not set for device in DT)1616+ * - But still handle both coherent and non-coherent requests from caller1717+ *1818+ * For DMA coherent hardware (IOC) generic code suffices1919+ */2320void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,2421 gfp_t gfp, unsigned long attrs)2522{···2427 struct page *page;2528 phys_addr_t paddr;2629 void *kvaddr;2727- int need_coh = 1, need_kvaddr = 0;3030+ bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);3131+3232+ /*3333+ * __GFP_HIGHMEM flag is cleared by upper layer functions3434+ * (in include/linux/dma-mapping.h) so we should never get a3535+ * __GFP_HIGHMEM here.3636+ */3737+ BUG_ON(gfp & __GFP_HIGHMEM);28382939 page = alloc_pages(gfp, order);3040 if (!page)3141 return NULL;3232-3333- /*3434- * IOC relies on all data (even coherent DMA data) being in cache3535- * Thus allocate normal cached memory3636- *3737- * The gains with IOC are two pronged:3838- * -For streaming data, elides need for cache maintenance, saving3939- * cycles in flush code, and bus bandwidth as all the lines of a4040- * buffer need to be flushed out to memory4141- * -For coherent data, Read/Write to buffers terminate early in cache4242- * (vs. always going to memory - thus are faster)4343- */4444- if ((is_isa_arcv2() && ioc_enable) ||4545- (attrs & DMA_ATTR_NON_CONSISTENT))4646- need_coh = 0;4747-4848- /*4949- * - A coherent buffer needs MMU mapping to enforce non-cachability5050- * - A highmem page needs a virtual handle (hence MMU mapping)5151- * independent of cachability5252- */5353- if (PageHighMem(page) || need_coh)5454- need_kvaddr = 1;55425643 /* This is linear addr (0x8000_0000 based) */5744 paddr = page_to_phys(page);58455946 *dma_handle = paddr;60476161- /* This is kernel Virtual address (0x7000_0000 based) */6262- if (need_kvaddr) {4848+ /*4949+ * A coherent buffer needs MMU mapping to enforce non-cachability.5050+ * kvaddr is kernel Virtual address (0x7000_0000 based).5151+ */5252+ if (need_coh) {6353 kvaddr = ioremap_nocache(paddr, size);6454 if (kvaddr == NULL) {6555 __free_pages(page, order);···7793{7894 phys_addr_t paddr = dma_handle;7995 struct page *page = virt_to_page(paddr);8080- int is_non_coh = 1;81968282- is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||8383- (is_isa_arcv2() && ioc_enable);8484-8585- if (PageHighMem(page) || !is_non_coh)9797+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))8698 iounmap((void __force __iomem *)vaddr);879988100 __free_pages(page, get_order(size));···163183164184 default:165185 break;186186+ }187187+}188188+189189+/*190190+ * Plug in coherent or noncoherent dma ops191191+ */192192+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,193193+ const struct iommu_ops *iommu, bool coherent)194194+{195195+ /*196196+ * IOC hardware snoops all DMA traffic keeping the caches consistent197197+ * with memory - eliding need for any explicit cache maintenance of198198+ * DMA buffers - so we can use dma_direct cache ops.199199+ */200200+ if (is_isa_arcv2() && ioc_enable && coherent) {201201+ set_dma_ops(dev, &dma_direct_ops);202202+ dev_info(dev, "use dma_direct_ops cache ops\n");203203+ } else {204204+ set_dma_ops(dev, &dma_noncoherent_ops);205205+ dev_info(dev, "use dma_noncoherent_ops cache ops\n");166206 }167207}
-1
arch/arm/include/asm/kvm_host.h
···223223 struct kvm_vcpu_events *events);224224225225#define KVM_ARCH_WANT_MMU_NOTIFIER226226-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);227226int kvm_unmap_hva_range(struct kvm *kvm,228227 unsigned long start, unsigned long end);229228void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
···6161 u64 vmid_gen;6262 u32 vmid;63636464- /* 1-level 2nd stage table and lock */6565- spinlock_t pgd_lock;6464+ /* 1-level 2nd stage table, protected by kvm->mmu_lock */6665 pgd_t *pgd;67666867 /* VTTBR value associated with above pgd and vmid */···356357 struct kvm_vcpu_events *events);357358358359#define KVM_ARCH_WANT_MMU_NOTIFIER359359-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);360360int kvm_unmap_hva_range(struct kvm *kvm,361361 unsigned long start, unsigned long end);362362void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+6-3
arch/arm64/kvm/hyp/switch.c
···9898 val = read_sysreg(cpacr_el1);9999 val |= CPACR_EL1_TTA;100100 val &= ~CPACR_EL1_ZEN;101101- if (!update_fp_enabled(vcpu))101101+ if (!update_fp_enabled(vcpu)) {102102 val &= ~CPACR_EL1_FPEN;103103+ __activate_traps_fpsimd32(vcpu);104104+ }103105104106 write_sysreg(val, cpacr_el1);105107···116114117115 val = CPTR_EL2_DEFAULT;118116 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;119119- if (!update_fp_enabled(vcpu))117117+ if (!update_fp_enabled(vcpu)) {120118 val |= CPTR_EL2_TFP;119119+ __activate_traps_fpsimd32(vcpu);120120+ }121121122122 write_sysreg(val, cptr_el2);123123}···133129 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))134130 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);135131136136- __activate_traps_fpsimd32(vcpu);137132 if (has_vhe())138133 activate_traps_vhe(vcpu);139134 else
+6-4
arch/arm64/mm/mmu.c
···985985986986 pmd = READ_ONCE(*pmdp);987987988988- /* No-op for empty entry and WARN_ON for valid entry */989989- if (!pmd_present(pmd) || !pmd_table(pmd)) {988988+ if (!pmd_present(pmd))989989+ return 1;990990+ if (!pmd_table(pmd)) {990991 VM_WARN_ON(!pmd_table(pmd));991992 return 1;992993 }···1008100710091008 pud = READ_ONCE(*pudp);1010100910111011- /* No-op for empty entry and WARN_ON for valid entry */10121012- if (!pud_present(pud) || !pud_table(pud)) {10101010+ if (!pud_present(pud))10111011+ return 1;10121012+ if (!pud_table(pud)) {10131013 VM_WARN_ON(!pud_table(pud));10141014 return 1;10151015 }
···931931 bool write);932932933933#define KVM_ARCH_WANT_MMU_NOTIFIER934934-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);935934int kvm_unmap_hva_range(struct kvm *kvm,936935 unsigned long start, unsigned long end);937936void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+1
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
···4040 int desc; /* the current descriptor */4141 struct ltq_dma_desc *desc_base; /* the descriptor base */4242 int phys; /* physical addr */4343+ struct device *dev;4344};44454546enum {
+20
arch/mips/kernel/vdso.c
···1313#include <linux/err.h>1414#include <linux/init.h>1515#include <linux/ioport.h>1616+#include <linux/kernel.h>1617#include <linux/mm.h>1718#include <linux/sched.h>1819#include <linux/slab.h>···21202221#include <asm/abi.h>2322#include <asm/mips-cps.h>2323+#include <asm/page.h>2424#include <asm/vdso.h>25252626/* Kernel-provided data used by the VDSO. */···130128 vvar_size = gic_size + PAGE_SIZE;131129 size = vvar_size + image->size;132130131131+ /*132132+ * Find a region that's large enough for us to perform the133133+ * colour-matching alignment below.134134+ */135135+ if (cpu_has_dc_aliases)136136+ size += shm_align_mask + 1;137137+133138 base = get_unmapped_area(NULL, 0, size, 0, 0);134139 if (IS_ERR_VALUE(base)) {135140 ret = base;136141 goto out;142142+ }143143+144144+ /*145145+ * If we suffer from dcache aliasing, ensure that the VDSO data page146146+ * mapping is coloured the same as the kernel's mapping of that memory.147147+ * This ensures that when the kernel updates the VDSO data userland148148+ * will observe it without requiring cache invalidations.149149+ */150150+ if (cpu_has_dc_aliases) {151151+ base = __ALIGN_MASK(base, shm_align_mask);152152+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;137153 }138154139155 data_addr = base + gic_size;
-10
arch/mips/kvm/mmu.c
···512512 return 1;513513}514514515515-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)516516-{517517- unsigned long end = hva + PAGE_SIZE;518518-519519- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);520520-521521- kvm_mips_callbacks->flush_shadow_all(kvm);522522- return 0;523523-}524524-525515int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)526516{527517 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
···358358 unsigned long pp, key;359359 unsigned long v, orig_v, gr;360360 __be64 *hptep;361361- int index;361361+ long int index;362362 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);363363364364 if (kvm_is_radix(vcpu->kvm))
···8585#ifdef CONFIG_BLK_DEV_INITRD8686static void __init setup_initrd(void)8787{8888- extern char __initramfs_start[];8989- extern unsigned long __initramfs_size;9088 unsigned long size;9191-9292- if (__initramfs_size > 0) {9393- initrd_start = (unsigned long)(&__initramfs_start);9494- initrd_end = initrd_start + __initramfs_size;9595- }96899790 if (initrd_start >= initrd_end) {9891 printk(KERN_INFO "initrd not found or empty");
+7-1
arch/s390/include/asm/mmu.h
···1616 unsigned long asce;1717 unsigned long asce_limit;1818 unsigned long vdso_base;1919- /* The mmu context allocates 4K page tables. */1919+ /*2020+ * The following bitfields need a down_write on the mm2121+ * semaphore when they are written to. As they are only2222+ * written once, they can be read without a lock.2323+ *2424+ * The mmu context allocates 4K page tables.2525+ */2026 unsigned int alloc_pgste:1;2127 /* The mmu context uses extended page tables. */2228 unsigned int has_pgste:1;
+2
arch/s390/kvm/kvm-s390.c
···695695 r = -EINVAL;696696 else {697697 r = 0;698698+ down_write(&kvm->mm->mmap_sem);698699 kvm->mm->context.allow_gmap_hpage_1m = 1;700700+ up_write(&kvm->mm->mmap_sem);699701 /*700702 * We might have to create fake 4k page701703 * tables. To avoid that the hardware works on
+18-12
arch/s390/kvm/priv.c
···280280 goto retry;281281 }282282 }283283- if (rc)284284- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);285283 up_read(¤t->mm->mmap_sem);284284+ if (rc == -EFAULT)285285+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);286286+ if (rc < 0)287287+ return rc;286288 vcpu->run->s.regs.gprs[reg1] &= ~0xff;287289 vcpu->run->s.regs.gprs[reg1] |= key;288290 return 0;···326324 goto retry;327325 }328326 }329329- if (rc < 0)330330- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);331327 up_read(¤t->mm->mmap_sem);328328+ if (rc == -EFAULT)329329+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);330330+ if (rc < 0)331331+ return rc;332332 kvm_s390_set_psw_cc(vcpu, rc);333333 return 0;334334}···394390 FAULT_FLAG_WRITE, &unlocked);395391 rc = !rc ? -EAGAIN : rc;396392 }393393+ up_read(¤t->mm->mmap_sem);397394 if (rc == -EFAULT)398395 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);399399-400400- up_read(¤t->mm->mmap_sem);401401- if (rc >= 0)402402- start += PAGE_SIZE;396396+ if (rc < 0)397397+ return rc;398398+ start += PAGE_SIZE;403399 }404400405401 if (m3 & (SSKE_MC | SSKE_MR)) {···10061002 FAULT_FLAG_WRITE, &unlocked);10071003 rc = !rc ? -EAGAIN : rc;10081004 }10051005+ up_read(¤t->mm->mmap_sem);10091006 if (rc == -EFAULT)10101007 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);10111011-10121012- up_read(¤t->mm->mmap_sem);10131013- if (rc >= 0)10141014- start += PAGE_SIZE;10081008+ if (rc == -EAGAIN)10091009+ continue;10101010+ if (rc < 0)10111011+ return rc;10151012 }10131013+ start += PAGE_SIZE;10161014 }10171015 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {10181016 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
+2-1
arch/s390/kvm/vsie.c
···173173 return set_validity_icpt(scb_s, 0x0039U);174174175175 /* copy only the wrapping keys */176176- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))176176+ if (read_guest_real(vcpu, crycb_addr + 72,177177+ vsie_page->crycb.dea_wrapping_key_mask, 56))177178 return set_validity_icpt(scb_s, 0x0035U);178179179180 scb_s->ecb3 |= ecb3_flags;
+6-6
arch/x86/include/asm/atomic.h
···8080 * true if the result is zero, or false for all8181 * other cases.8282 */8383-#define arch_atomic_sub_and_test arch_atomic_sub_and_test8483static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)8584{8685 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);8786}8787+#define arch_atomic_sub_and_test arch_atomic_sub_and_test88888989/**9090 * arch_atomic_inc - increment atomic variable···9292 *9393 * Atomically increments @v by 1.9494 */9595-#define arch_atomic_inc arch_atomic_inc9695static __always_inline void arch_atomic_inc(atomic_t *v)9796{9897 asm volatile(LOCK_PREFIX "incl %0"9998 : "+m" (v->counter));10099}100100+#define arch_atomic_inc arch_atomic_inc101101102102/**103103 * arch_atomic_dec - decrement atomic variable···105105 *106106 * Atomically decrements @v by 1.107107 */108108-#define arch_atomic_dec arch_atomic_dec109108static __always_inline void arch_atomic_dec(atomic_t *v)110109{111110 asm volatile(LOCK_PREFIX "decl %0"112111 : "+m" (v->counter));113112}113113+#define arch_atomic_dec arch_atomic_dec114114115115/**116116 * arch_atomic_dec_and_test - decrement and test···120120 * returns true if the result is 0, or false for all other121121 * cases.122122 */123123-#define arch_atomic_dec_and_test arch_atomic_dec_and_test124123static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)125124{126125 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);127126}127127+#define arch_atomic_dec_and_test arch_atomic_dec_and_test128128129129/**130130 * arch_atomic_inc_and_test - increment and test···134134 * and returns true if the result is zero, or false for all135135 * other cases.136136 */137137-#define arch_atomic_inc_and_test arch_atomic_inc_and_test138137static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)139138{140139 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);141140}141141+#define arch_atomic_inc_and_test arch_atomic_inc_and_test142142143143/**144144 * arch_atomic_add_negative - add and test if negative···149149 * if the result is negative, or false when150150 * result is greater than or equal to zero.151151 */152152-#define arch_atomic_add_negative arch_atomic_add_negative153152static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)154153{155154 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);156155}156156+#define arch_atomic_add_negative arch_atomic_add_negative157157158158/**159159 * arch_atomic_add_return - add integer and return
+4-4
arch/x86/include/asm/atomic64_32.h
···205205 *206206 * Atomically increments @v by 1.207207 */208208-#define arch_atomic64_inc arch_atomic64_inc209208static inline void arch_atomic64_inc(atomic64_t *v)210209{211210 __alternative_atomic64(inc, inc_return, /* no output */,212211 "S" (v) : "memory", "eax", "ecx", "edx");213212}213213+#define arch_atomic64_inc arch_atomic64_inc214214215215/**216216 * arch_atomic64_dec - decrement atomic64 variable···218218 *219219 * Atomically decrements @v by 1.220220 */221221-#define arch_atomic64_dec arch_atomic64_dec222221static inline void arch_atomic64_dec(atomic64_t *v)223222{224223 __alternative_atomic64(dec, dec_return, /* no output */,225224 "S" (v) : "memory", "eax", "ecx", "edx");226225}226226+#define arch_atomic64_dec arch_atomic64_dec227227228228/**229229 * arch_atomic64_add_unless - add unless the number is a given value···245245 return (int)a;246246}247247248248-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero249248static inline int arch_atomic64_inc_not_zero(atomic64_t *v)250249{251250 int r;···252253 "S" (v) : "ecx", "edx", "memory");253254 return r;254255}256256+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero255257256256-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive257258static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)258259{259260 long long r;···261262 "S" (v) : "ecx", "memory");262263 return r;263264}265265+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive264266265267#undef alternative_atomic64266268#undef __alternative_atomic64
+6-6
arch/x86/include/asm/atomic64_64.h
···7171 * true if the result is zero, or false for all7272 * other cases.7373 */7474-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test7574static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)7675{7776 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);7877}7878+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test79798080/**8181 * arch_atomic64_inc - increment atomic64 variable···8383 *8484 * Atomically increments @v by 1.8585 */8686-#define arch_atomic64_inc arch_atomic64_inc8786static __always_inline void arch_atomic64_inc(atomic64_t *v)8887{8988 asm volatile(LOCK_PREFIX "incq %0"9089 : "=m" (v->counter)9190 : "m" (v->counter));9291}9292+#define arch_atomic64_inc arch_atomic64_inc93939494/**9595 * arch_atomic64_dec - decrement atomic64 variable···9797 *9898 * Atomically decrements @v by 1.9999 */100100-#define arch_atomic64_dec arch_atomic64_dec101100static __always_inline void arch_atomic64_dec(atomic64_t *v)102101{103102 asm volatile(LOCK_PREFIX "decq %0"104103 : "=m" (v->counter)105104 : "m" (v->counter));106105}106106+#define arch_atomic64_dec arch_atomic64_dec107107108108/**109109 * arch_atomic64_dec_and_test - decrement and test···113113 * returns true if the result is 0, or false for all other114114 * cases.115115 */116116-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test117116static inline bool arch_atomic64_dec_and_test(atomic64_t *v)118117{119118 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);120119}120120+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test121121122122/**123123 * arch_atomic64_inc_and_test - increment and test···127127 * and returns true if the result is zero, or false for all128128 * other cases.129129 */130130-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test131130static inline bool arch_atomic64_inc_and_test(atomic64_t *v)132131{133132 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);134133}134134+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test135135136136/**137137 * arch_atomic64_add_negative - add and test if negative···142142 * if the result is negative, or false when143143 * result is greater than or equal to zero.144144 */145145-#define arch_atomic64_add_negative arch_atomic64_add_negative146145static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)147146{148147 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);149148}149149+#define arch_atomic64_add_negative arch_atomic64_add_negative150150151151/**152152 * arch_atomic64_add_return - add and return
+11-1
arch/x86/include/asm/kdebug.h
···2222 DIE_NMIUNKNOWN,2323};24242525+enum show_regs_mode {2626+ SHOW_REGS_SHORT,2727+ /*2828+ * For when userspace crashed, but we don't think it's our fault, and2929+ * therefore don't print kernel registers.3030+ */3131+ SHOW_REGS_USER,3232+ SHOW_REGS_ALL3333+};3434+2535extern void die(const char *, struct pt_regs *,long);2636extern int __must_check __die(const char *, struct pt_regs *, long);2737extern void show_stack_regs(struct pt_regs *regs);2828-extern void __show_regs(struct pt_regs *regs, int all);3838+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);2939extern void show_iret_regs(struct pt_regs *regs);3040extern unsigned long oops_begin(void);3141extern void oops_end(unsigned long, struct pt_regs *, int signr);
+7-15
arch/x86/include/asm/kvm_host.h
···12371237#define EMULTYPE_NO_DECODE (1 << 0)12381238#define EMULTYPE_TRAP_UD (1 << 1)12391239#define EMULTYPE_SKIP (1 << 2)12401240-#define EMULTYPE_RETRY (1 << 3)12411241-#define EMULTYPE_NO_REEXECUTE (1 << 4)12421242-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)12431243-#define EMULTYPE_VMWARE (1 << 6)12441244-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,12451245- int emulation_type, void *insn, int insn_len);12461246-12471247-static inline int emulate_instruction(struct kvm_vcpu *vcpu,12481248- int emulation_type)12491249-{12501250- return x86_emulate_instruction(vcpu, 0,12511251- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);12521252-}12401240+#define EMULTYPE_ALLOW_RETRY (1 << 3)12411241+#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)12421242+#define EMULTYPE_VMWARE (1 << 5)12431243+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);12441244+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,12451245+ void *insn, int insn_len);1253124612541247void kvm_enable_efer_bits(u64);12551248bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);···14431450 ____kvm_handle_fault_on_reboot(insn, "")1444145114451452#define KVM_ARCH_WANT_MMU_NOTIFIER14461446-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);14471453int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);14481454int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);14491455int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);···14551463void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);1456146414571465int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,14581458- unsigned long ipi_bitmap_high, int min,14661466+ unsigned long ipi_bitmap_high, u32 min,14591467 unsigned long icr, int op_64_bit);1460146814611469u64 kvm_get_arch_capabilities(void);
···413413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {414414 /* Something in the core code broke! Survive gracefully */415415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);416416- return EINVAL;416416+ return -EINVAL;417417 }418418419419 ret = assign_managed_vector(irqd, vector_searchmask);
+16-8
arch/x86/kernel/cpu/microcode/amd.c
···504504 struct microcode_amd *mc_amd;505505 struct ucode_cpu_info *uci;506506 struct ucode_patch *p;507507+ enum ucode_state ret;507508 u32 rev, dummy;508509509510 BUG_ON(raw_smp_processor_id() != cpu);···522521523522 /* need to apply patch? */524523 if (rev >= mc_amd->hdr.patch_id) {525525- c->microcode = rev;526526- uci->cpu_sig.rev = rev;527527- return UCODE_OK;524524+ ret = UCODE_OK;525525+ goto out;528526 }529527530528 if (__apply_microcode_amd(mc_amd)) {···531531 cpu, mc_amd->hdr.patch_id);532532 return UCODE_ERROR;533533 }534534- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,535535- mc_amd->hdr.patch_id);536534537537- uci->cpu_sig.rev = mc_amd->hdr.patch_id;538538- c->microcode = mc_amd->hdr.patch_id;535535+ rev = mc_amd->hdr.patch_id;536536+ ret = UCODE_UPDATED;539537540540- return UCODE_UPDATED;538538+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);539539+540540+out:541541+ uci->cpu_sig.rev = rev;542542+ c->microcode = rev;543543+544544+ /* Update boot_cpu_data's revision too, if we're on the BSP: */545545+ if (c->cpu_index == boot_cpu_data.cpu_index)546546+ boot_cpu_data.microcode = rev;547547+548548+ return ret;541549}542550543551static int install_equiv_cpu_table(const u8 *buf)
···146146 * they can be printed in the right context.147147 */148148 if (!partial && on_stack(info, regs, sizeof(*regs))) {149149- __show_regs(regs, 0);149149+ __show_regs(regs, SHOW_REGS_SHORT);150150151151 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,152152 IRET_FRAME_SIZE)) {···344344 oops_exit();345345346346 /* Executive summary in case the oops scrolled away */347347- __show_regs(&exec_summary_regs, true);347347+ __show_regs(&exec_summary_regs, SHOW_REGS_ALL);348348349349 if (!signr)350350 return;···407407408408void show_regs(struct pt_regs *regs)409409{410410- bool all = true;411411-412410 show_regs_print_info(KERN_DEFAULT);413411414414- if (IS_ENABLED(CONFIG_X86_32))415415- all = !user_mode(regs);416416-417417- __show_regs(regs, all);412412+ __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);418413419414 /*420415 * When in-kernel, we also print out the stack at the time of the fault..
···548548}549549550550int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,551551- unsigned long ipi_bitmap_high, int min,551551+ unsigned long ipi_bitmap_high, u32 min,552552 unsigned long icr, int op_64_bit)553553{554554 int i;···571571 rcu_read_lock();572572 map = rcu_dereference(kvm->arch.apic_map);573573574574+ if (min > map->max_apic_id)575575+ goto out;574576 /* Bits above cluster_size are masked in the caller. */575575- for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {576576- vcpu = map->phys_map[min + i]->vcpu;577577- count += kvm_apic_set_irq(vcpu, &irq, NULL);577577+ for_each_set_bit(i, &ipi_bitmap_low,578578+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {579579+ if (map->phys_map[min + i]) {580580+ vcpu = map->phys_map[min + i]->vcpu;581581+ count += kvm_apic_set_irq(vcpu, &irq, NULL);582582+ }578583 }579584580585 min += cluster_size;581581- for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {582582- vcpu = map->phys_map[min + i]->vcpu;583583- count += kvm_apic_set_irq(vcpu, &irq, NULL);586586+587587+ if (min > map->max_apic_id)588588+ goto out;589589+590590+ for_each_set_bit(i, &ipi_bitmap_high,591591+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {592592+ if (map->phys_map[min + i]) {593593+ vcpu = map->phys_map[min + i]->vcpu;594594+ count += kvm_apic_set_irq(vcpu, &irq, NULL);595595+ }584596 }585597598598+out:586599 rcu_read_unlock();587600 return count;588601}
+15-11
arch/x86/kvm/mmu.c
···18531853 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);18541854}1855185518561856-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)18571857-{18581858- return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);18591859-}18601860-18611856int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)18621857{18631858 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);···52125217int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,52135218 void *insn, int insn_len)52145219{52155215- int r, emulation_type = EMULTYPE_RETRY;52205220+ int r, emulation_type = 0;52165221 enum emulation_result er;52175222 bool direct = vcpu->arch.mmu.direct_map;52185223···52255230 r = RET_PF_INVALID;52265231 if (unlikely(error_code & PFERR_RSVD_MASK)) {52275232 r = handle_mmio_page_fault(vcpu, cr2, direct);52285228- if (r == RET_PF_EMULATE) {52295229- emulation_type = 0;52335233+ if (r == RET_PF_EMULATE)52305234 goto emulate;52315231- }52325235 }5233523652345237 if (r == RET_PF_INVALID) {···52535260 return 1;52545261 }5255526252565256- if (mmio_info_in_cache(vcpu, cr2, direct))52575257- emulation_type = 0;52635263+ /*52645264+ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still52655265+ * optimistically try to just unprotect the page and let the processor52665266+ * re-execute the instruction that caused the page fault. Do not allow52675267+ * retrying MMIO emulation, as it's not only pointless but could also52685268+ * cause us to enter an infinite loop because the processor will keep52695269+ * faulting on the non-existent MMIO address. Retrying an instruction52705270+ * from a nested guest is also pointless and dangerous as we are only52715271+ * explicitly shadowing L1's page tables, i.e. unprotecting something52725272+ * for L1 isn't going to magically fix whatever issue cause L2 to fail.52735273+ */52745274+ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))52755275+ emulation_type = EMULTYPE_ALLOW_RETRY;52585276emulate:52595277 /*52605278 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
+9-10
arch/x86/kvm/svm.c
···776776 }777777778778 if (!svm->next_rip) {779779- if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=779779+ if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=780780 EMULATE_DONE)781781 printk(KERN_DEBUG "%s: NOP\n", __func__);782782 return;···2715271527162716 WARN_ON_ONCE(!enable_vmware_backdoor);2717271727182718- er = emulate_instruction(vcpu,27182718+ er = kvm_emulate_instruction(vcpu,27192719 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);27202720 if (er == EMULATE_USER_EXIT)27212721 return 0;···28192819 string = (io_info & SVM_IOIO_STR_MASK) != 0;28202820 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;28212821 if (string)28222822- return emulate_instruction(vcpu, 0) == EMULATE_DONE;28222822+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;2823282328242824 port = io_info >> 16;28252825 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;···38613861static int invlpg_interception(struct vcpu_svm *svm)38623862{38633863 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))38643864- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;38643864+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;3865386538663866 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);38673867 return kvm_skip_emulated_instruction(&svm->vcpu);···3869386938703870static int emulate_on_interception(struct vcpu_svm *svm)38713871{38723872- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;38723872+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;38733873}3874387438753875static int rsm_interception(struct vcpu_svm *svm)38763876{38773877- return x86_emulate_instruction(&svm->vcpu, 0, 0,38783878- rsm_ins_bytes, 2) == EMULATE_DONE;38773877+ return kvm_emulate_instruction_from_buffer(&svm->vcpu,38783878+ rsm_ins_bytes, 2) == EMULATE_DONE;38793879}3880388038813881static int rdpmc_interception(struct vcpu_svm *svm)···47004700 ret = avic_unaccel_trap_write(svm);47014701 } else {47024702 /* Handling Fault */47034703- ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);47034703+ ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);47044704 }4705470547064706 return ret;···67476747static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)67486748{67496749 unsigned long vaddr, vaddr_end, next_vaddr;67506750- unsigned long dst_vaddr, dst_vaddr_end;67506750+ unsigned long dst_vaddr;67516751 struct page **src_p, **dst_p;67526752 struct kvm_sev_dbg debug;67536753 unsigned long n;···67636763 size = debug.len;67646764 vaddr_end = vaddr + size;67656765 dst_vaddr = debug.dst_uaddr;67666766- dst_vaddr_end = dst_vaddr + size;6767676667686767 for (; vaddr < vaddr_end; vaddr = next_vaddr) {67696768 int len, s_off, d_off;
+31-12
arch/x86/kvm/vmx.c
···69836983 * Cause the #SS fault with 0 error code in VM86 mode.69846984 */69856985 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {69866986- if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {69866986+ if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {69876987 if (vcpu->arch.halt_request) {69886988 vcpu->arch.halt_request = 0;69896989 return kvm_vcpu_halt(vcpu);···7054705470557055 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {70567056 WARN_ON_ONCE(!enable_vmware_backdoor);70577057- er = emulate_instruction(vcpu,70577057+ er = kvm_emulate_instruction(vcpu,70587058 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);70597059 if (er == EMULATE_USER_EXIT)70607060 return 0;···71577157 ++vcpu->stat.io_exits;7158715871597159 if (string)71607160- return emulate_instruction(vcpu, 0) == EMULATE_DONE;71607160+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;7161716171627162 port = exit_qualification >> 16;71637163 size = (exit_qualification & 7) + 1;···72317231static int handle_desc(struct kvm_vcpu *vcpu)72327232{72337233 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));72347234- return emulate_instruction(vcpu, 0) == EMULATE_DONE;72347234+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;72357235}7236723672377237static int handle_cr(struct kvm_vcpu *vcpu)···7480748074817481static int handle_invd(struct kvm_vcpu *vcpu)74827482{74837483- return emulate_instruction(vcpu, 0) == EMULATE_DONE;74837483+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;74847484}7485748574867486static int handle_invlpg(struct kvm_vcpu *vcpu)···75477547 return kvm_skip_emulated_instruction(vcpu);75487548 }75497549 }75507550- return emulate_instruction(vcpu, 0) == EMULATE_DONE;75507550+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;75517551}7552755275537553static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)···77047704 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))77057705 return kvm_skip_emulated_instruction(vcpu);77067706 else77077707- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,77087708- NULL, 0) == EMULATE_DONE;77077707+ return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==77087708+ EMULATE_DONE;77097709 }7710771077117711 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);···77487748 if (kvm_test_request(KVM_REQ_EVENT, vcpu))77497749 return 1;7750775077517751- err = emulate_instruction(vcpu, 0);77517751+ err = kvm_emulate_instruction(vcpu, 0);7752775277537753 if (err == EMULATE_USER_EXIT) {77547754 ++vcpu->stat.mmio_exits;···1253712537 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);1253812538 bool from_vmentry = !!exit_qual;1253912539 u32 dummy_exit_qual;1254012540+ u32 vmcs01_cpu_exec_ctrl;1254012541 int r = 0;1254212542+1254312543+ vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);12541125441254212545 enter_guest_mode(vcpu);1254312546···1257512572 * have already been set at vmentry time and should not be reset.1257612573 */1257712574 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);1257512575+ }1257612576+1257712577+ /*1257812578+ * If L1 had a pending IRQ/NMI until it executed1257912579+ * VMLAUNCH/VMRESUME which wasn't delivered because it was1258012580+ * disallowed (e.g. interrupts disabled), L0 needs to1258112581+ * evaluate if this pending event should cause an exit from L21258212582+ * to L1 or delivered directly to L2 (e.g. In case L1 don't1258312583+ * intercept EXTERNAL_INTERRUPT).1258412584+ *1258512585+ * Usually this would be handled by L0 requesting a1258612586+ * IRQ/NMI window by setting VMCS accordingly. However,1258712587+ * this setting was done on VMCS01 and now VMCS02 is active1258812588+ * instead. Thus, we force L0 to perform pending event1258912589+ * evaluation by requesting a KVM_REQ_EVENT.1259012590+ */1259112591+ if (vmcs01_cpu_exec_ctrl &1259212592+ (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {1259312593+ kvm_make_request(KVM_REQ_EVENT, vcpu);1257812594 }12579125951258012596 /*···1400913987 if (check_vmentry_prereqs(vcpu, vmcs12) ||1401013988 check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))1401113989 return -EINVAL;1401214012-1401314013- if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)1401414014- vmx->nested.nested_run_pending = 1;14015139901401613991 vmx->nested.dirty_vmcs12 = true;1401713992 ret = enter_vmx_non_root_mode(vcpu, NULL);
+23-5
arch/x86/kvm/x86.c
···49874987 emul_type = 0;49884988 }4989498949904990- er = emulate_instruction(vcpu, emul_type);49904990+ er = kvm_emulate_instruction(vcpu, emul_type);49914991 if (er == EMULATE_USER_EXIT)49924992 return 0;49934993 if (er != EMULATE_DONE)···58705870 gpa_t gpa = cr2;58715871 kvm_pfn_t pfn;5872587258735873- if (emulation_type & EMULTYPE_NO_REEXECUTE)58735873+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))58745874+ return false;58755875+58765876+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))58745877 return false;5875587858765879 if (!vcpu->arch.mmu.direct_map) {···59615958 */59625959 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;5963596059645964- if (!(emulation_type & EMULTYPE_RETRY))59615961+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))59625962+ return false;59635963+59645964+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))59655965 return false;5966596659675967 if (x86_page_table_writing_insn(ctxt))···6282627662836277 return r;62846278}62856285-EXPORT_SYMBOL_GPL(x86_emulate_instruction);62796279+62806280+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)62816281+{62826282+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);62836283+}62846284+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);62856285+62866286+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,62876287+ void *insn, int insn_len)62886288+{62896289+ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);62906290+}62916291+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);6286629262876293static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,62886294 unsigned short port)···77527734{77537735 int r;77547736 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);77557755- r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);77377737+ r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);77567738 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);77577739 if (r != EMULATE_DONE)77587740 return 0;
+2
arch/x86/kvm/x86.h
···274274bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,275275 int page_num);276276bool kvm_vector_hashing_enabled(void);277277+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,278278+ int emulation_type, void *insn, int insn_len);277279278280#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \279281 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
+4-4
arch/x86/mm/pgtable.c
···269269 if (pgd_val(pgd) != 0) {270270 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);271271272272- *pgdp = native_make_pgd(0);272272+ pgd_clear(pgdp);273273274274 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);275275 pmd_free(mm, pmd);···494494 int changed = !pte_same(*ptep, entry);495495496496 if (changed && dirty)497497- *ptep = entry;497497+ set_pte(ptep, entry);498498499499 return changed;500500}···509509 VM_BUG_ON(address & ~HPAGE_PMD_MASK);510510511511 if (changed && dirty) {512512- *pmdp = entry;512512+ set_pmd(pmdp, entry);513513 /*514514 * We had a write-protection fault here and changed the pmd515515 * to to more permissive. No need to flush the TLB for that,···529529 VM_BUG_ON(address & ~HPAGE_PUD_MASK);530530531531 if (changed && dirty) {532532- *pudp = entry;532532+ set_pud(pudp, entry);533533 /*534534 * We had a write-protection fault here and changed the pud535535 * to to more permissive. No need to flush the TLB for that,
···310310 }311311}312312313313-static void blkg_pd_offline(struct blkcg_gq *blkg)314314-{315315- int i;316316-317317- lockdep_assert_held(blkg->q->queue_lock);318318- lockdep_assert_held(&blkg->blkcg->lock);319319-320320- for (i = 0; i < BLKCG_MAX_POLS; i++) {321321- struct blkcg_policy *pol = blkcg_policy[i];322322-323323- if (blkg->pd[i] && !blkg->pd[i]->offline &&324324- pol->pd_offline_fn) {325325- pol->pd_offline_fn(blkg->pd[i]);326326- blkg->pd[i]->offline = true;327327- }328328- }329329-}330330-331313static void blkg_destroy(struct blkcg_gq *blkg)332314{333315 struct blkcg *blkcg = blkg->blkcg;334316 struct blkcg_gq *parent = blkg->parent;317317+ int i;335318336319 lockdep_assert_held(blkg->q->queue_lock);337320 lockdep_assert_held(&blkcg->lock);···322339 /* Something wrong if we are trying to remove same group twice */323340 WARN_ON_ONCE(list_empty(&blkg->q_node));324341 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));342342+343343+ for (i = 0; i < BLKCG_MAX_POLS; i++) {344344+ struct blkcg_policy *pol = blkcg_policy[i];345345+346346+ if (blkg->pd[i] && pol->pd_offline_fn)347347+ pol->pd_offline_fn(blkg->pd[i]);348348+ }325349326350 if (parent) {327351 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);···372382 struct blkcg *blkcg = blkg->blkcg;373383374384 spin_lock(&blkcg->lock);375375- blkg_pd_offline(blkg);376385 blkg_destroy(blkg);377386 spin_unlock(&blkcg->lock);378387 }···10421053 { } /* terminate */10431054};1044105510561056+/*10571057+ * blkcg destruction is a three-stage process.10581058+ *10591059+ * 1. Destruction starts. The blkcg_css_offline() callback is invoked10601060+ * which offlines writeback. Here we tie the next stage of blkg destruction10611061+ * to the completion of writeback associated with the blkcg. This lets us10621062+ * avoid punting potentially large amounts of outstanding writeback to root10631063+ * while maintaining any ongoing policies. The next stage is triggered when10641064+ * the nr_cgwbs count goes to zero.10651065+ *10661066+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called10671067+ * and handles the destruction of blkgs. Here the css reference held by10681068+ * the blkg is put back eventually allowing blkcg_css_free() to be called.10691069+ * This work may occur in cgwb_release_workfn() on the cgwb_release10701070+ * workqueue. Any submitted ios that fail to get the blkg ref will be10711071+ * punted to the root_blkg.10721072+ *10731073+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.10741074+ * This finally frees the blkcg.10751075+ */10761076+10451077/**10461078 * blkcg_css_offline - cgroup css_offline callback10471079 * @css: css of interest10481080 *10491049- * This function is called when @css is about to go away and responsible10501050- * for offlining all blkgs pd and killing all wbs associated with @css.10511051- * blkgs pd offline should be done while holding both q and blkcg locks.10521052- * As blkcg lock is nested inside q lock, this function performs reverse10531053- * double lock dancing.10541054- *10551055- * This is the blkcg counterpart of ioc_release_fn().10811081+ * This function is called when @css is about to go away. Here the cgwbs are10821082+ * offlined first and only once writeback associated with the blkcg has10831083+ * finished do we start step 2 (see above).10561084 */10571085static void blkcg_css_offline(struct cgroup_subsys_state *css)10581086{10591087 struct blkcg *blkcg = css_to_blkcg(css);10601060- struct blkcg_gq *blkg;1061108810621062- spin_lock_irq(&blkcg->lock);10631063-10641064- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {10651065- struct request_queue *q = blkg->q;10661066-10671067- if (spin_trylock(q->queue_lock)) {10681068- blkg_pd_offline(blkg);10691069- spin_unlock(q->queue_lock);10701070- } else {10711071- spin_unlock_irq(&blkcg->lock);10721072- cpu_relax();10731073- spin_lock_irq(&blkcg->lock);10741074- }10751075- }10761076-10771077- spin_unlock_irq(&blkcg->lock);10781078-10891089+ /* this prevents anyone from attaching or migrating to this blkcg */10791090 wb_blkcg_offline(blkcg);10911091+10921092+ /* put the base cgwb reference allowing step 2 to be triggered */10931093+ blkcg_cgwb_put(blkcg);10801094}1081109510821096/**10831083- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg10971097+ * blkcg_destroy_blkgs - responsible for shooting down blkgs10841098 * @blkcg: blkcg of interest10851099 *10861086- * This function is called when blkcg css is about to free and responsible for10871087- * destroying all blkgs associated with @blkcg.10881088- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock11001100+ * blkgs should be removed while holding both q and blkcg locks. As blkcg lock10891101 * is nested inside q lock, this function performs reverse double lock dancing.11021102+ * Destroying the blkgs releases the reference held on the blkcg's css allowing11031103+ * blkcg_css_free to eventually be called.11041104+ *11051105+ * This is the blkcg counterpart of ioc_release_fn().10901106 */10911091-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)11071107+void blkcg_destroy_blkgs(struct blkcg *blkcg)10921108{10931109 spin_lock_irq(&blkcg->lock);11101110+10941111 while (!hlist_empty(&blkcg->blkg_list)) {10951112 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,10961096- struct blkcg_gq,10971097- blkcg_node);11131113+ struct blkcg_gq, blkcg_node);10981114 struct request_queue *q = blkg->q;1099111511001116 if (spin_trylock(q->queue_lock)) {···11111117 spin_lock_irq(&blkcg->lock);11121118 }11131119 }11201120+11141121 spin_unlock_irq(&blkcg->lock);11151122}11161123···11191124{11201125 struct blkcg *blkcg = css_to_blkcg(css);11211126 int i;11221122-11231123- blkcg_destroy_all_blkgs(blkcg);1124112711251128 mutex_lock(&blkcg_pol_mutex);11261129···11821189 INIT_HLIST_HEAD(&blkcg->blkg_list);11831190#ifdef CONFIG_CGROUP_WRITEBACK11841191 INIT_LIST_HEAD(&blkcg->cgwb_list);11921192+ refcount_set(&blkcg->cgwb_refcnt, 1);11851193#endif11861194 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);11871195···1474148014751481 list_for_each_entry(blkg, &q->blkg_list, q_node) {14761482 if (blkg->pd[pol->plid]) {14771477- if (!blkg->pd[pol->plid]->offline &&14781478- pol->pd_offline_fn) {14831483+ if (pol->pd_offline_fn)14791484 pol->pd_offline_fn(blkg->pd[pol->plid]);14801480- blkg->pd[pol->plid]->offline = true;14811481- }14821485 pol->pd_free_fn(blkg->pd[pol->plid]);14831486 blkg->pd[pol->plid] = NULL;14841487 }
+4-1
block/blk-core.c
···21632163{21642164 const int op = bio_op(bio);2165216521662166- if (part->policy && (op_is_write(op) && !op_is_flush(op))) {21662166+ if (part->policy && op_is_write(op)) {21672167 char b[BDEVNAME_SIZE];21682168+21692169+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))21702170+ return false;2168217121692172 WARN_ONCE(1,21702173 "generic_make_request: Trying to write "
+3-2
block/blk-throttle.c
···21292129static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)21302130{21312131#ifdef CONFIG_BLK_DEV_THROTTLING_LOW21322132- if (bio->bi_css)21332133- bio_associate_blkg(bio, tg_to_blkg(tg));21322132+ /* fallback to root_blkg if we fail to get a blkg ref */21332133+ if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))21342134+ bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);21342135 bio_issue_init(&bio->bi_issue, bio_sectors(bio));21352136#endif21362137}
···3535#include <linux/delay.h>3636#ifdef CONFIG_X863737#include <asm/mpspec.h>3838+#include <linux/dmi.h>3839#endif3940#include <linux/acpi_iort.h>4041#include <linux/pci.h>4142#include <acpi/apei.h>4242-#include <linux/dmi.h>4343#include <linux/suspend.h>44444545#include "internal.h"···8080 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),8181 },8282 },8383- {}8484-};8585-#else8686-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {8783 {}8884};8985#endif···1029103310301034 acpi_permanent_mmap = true;1031103510361036+#ifdef CONFIG_X8610321037 /*10331038 * If the machine falls into the DMI check table,10341034- * DSDT will be copied to memory10391039+ * DSDT will be copied to memory.10401040+ * Note that calling dmi_check_system() here on other architectures10411041+ * would not be OK because only x86 initializes dmi early enough.10421042+ * Thankfully only x86 systems need such quirks for now.10351043 */10361044 dmi_check_system(dsdt_dmi_table);10451045+#endif1037104610381047 status = acpi_reallocate_root_table();10391048 if (ACPI_FAILURE(status)) {
drivers/ata/libata-core.c
+9-11
drivers/base/memory.c
···417417 int nid;418418419419 /*420420- * The block contains more than one zone can not be offlined.421421- * This can happen e.g. for ZONE_DMA and ZONE_DMA32422422- */423423- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))424424- return sprintf(buf, "none\n");425425-426426- start_pfn = valid_start_pfn;427427- nr_pages = valid_end_pfn - start_pfn;428428-429429- /*430420 * Check the existing zone. Make sure that we do that only on the431421 * online nodes otherwise the page_zone is not reliable432422 */433423 if (mem->state == MEM_ONLINE) {424424+ /*425425+ * The block contains more than one zone can not be offlined.426426+ * This can happen e.g. for ZONE_DMA and ZONE_DMA32427427+ */428428+ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,429429+ &valid_start_pfn, &valid_end_pfn))430430+ return sprintf(buf, "none\n");431431+ start_pfn = valid_start_pfn;434432 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);435433 goto out;436434 }437435438438- nid = pfn_to_nid(start_pfn);436436+ nid = mem->nid;439437 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);440438 strcat(buf, default_zone->name);441439
+3
drivers/block/nbd.c
···12391239 case NBD_SET_SOCK:12401240 return nbd_add_socket(nbd, arg, false);12411241 case NBD_SET_BLKSIZE:12421242+ if (!arg || !is_power_of_2(arg) || arg < 512 ||12431243+ arg > PAGE_SIZE)12441244+ return -EINVAL;12421245 nbd_size_set(nbd, arg,12431246 div_s64(config->bytesize, arg));12441247 return 0;
+179-58
drivers/block/rbd.c
···4207420742084208 count += sprintf(&buf[count], "%s"42094209 "pool_id %llu\npool_name %s\n"42104210+ "pool_ns %s\n"42104211 "image_id %s\nimage_name %s\n"42114212 "snap_id %llu\nsnap_name %s\n"42124213 "overlap %llu\n",42134214 !count ? "" : "\n", /* first? */42144215 spec->pool_id, spec->pool_name,42164216+ spec->pool_ns ?: "",42154217 spec->image_id, spec->image_name ?: "(unknown)",42164218 spec->snap_id, spec->snap_name,42174219 rbd_dev->parent_overlap);···45864584 &rbd_dev->header.features);45874585}4588458645874587+struct parent_image_info {45884588+ u64 pool_id;45894589+ const char *pool_ns;45904590+ const char *image_id;45914591+ u64 snap_id;45924592+45934593+ bool has_overlap;45944594+ u64 overlap;45954595+};45964596+45974597+/*45984598+ * The caller is responsible for @pii.45994599+ */46004600+static int decode_parent_image_spec(void **p, void *end,46014601+ struct parent_image_info *pii)46024602+{46034603+ u8 struct_v;46044604+ u32 struct_len;46054605+ int ret;46064606+46074607+ ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",46084608+ &struct_v, &struct_len);46094609+ if (ret)46104610+ return ret;46114611+46124612+ ceph_decode_64_safe(p, end, pii->pool_id, e_inval);46134613+ pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);46144614+ if (IS_ERR(pii->pool_ns)) {46154615+ ret = PTR_ERR(pii->pool_ns);46164616+ pii->pool_ns = NULL;46174617+ return ret;46184618+ }46194619+ pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);46204620+ if (IS_ERR(pii->image_id)) {46214621+ ret = PTR_ERR(pii->image_id);46224622+ pii->image_id = NULL;46234623+ return ret;46244624+ }46254625+ ceph_decode_64_safe(p, end, pii->snap_id, e_inval);46264626+ return 0;46274627+46284628+e_inval:46294629+ return -EINVAL;46304630+}46314631+46324632+static int __get_parent_info(struct rbd_device *rbd_dev,46334633+ struct page *req_page,46344634+ struct page *reply_page,46354635+ struct parent_image_info *pii)46364636+{46374637+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;46384638+ size_t reply_len = PAGE_SIZE;46394639+ void *p, *end;46404640+ int ret;46414641+46424642+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,46434643+ "rbd", "parent_get", CEPH_OSD_FLAG_READ,46444644+ req_page, sizeof(u64), reply_page, &reply_len);46454645+ if (ret)46464646+ return ret == -EOPNOTSUPP ? 1 : ret;46474647+46484648+ p = page_address(reply_page);46494649+ end = p + reply_len;46504650+ ret = decode_parent_image_spec(&p, end, pii);46514651+ if (ret)46524652+ return ret;46534653+46544654+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,46554655+ "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,46564656+ req_page, sizeof(u64), reply_page, &reply_len);46574657+ if (ret)46584658+ return ret;46594659+46604660+ p = page_address(reply_page);46614661+ end = p + reply_len;46624662+ ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);46634663+ if (pii->has_overlap)46644664+ ceph_decode_64_safe(&p, end, pii->overlap, e_inval);46654665+46664666+ return 0;46674667+46684668+e_inval:46694669+ return -EINVAL;46704670+}46714671+46724672+/*46734673+ * The caller is responsible for @pii.46744674+ */46754675+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,46764676+ struct page *req_page,46774677+ struct page *reply_page,46784678+ struct parent_image_info *pii)46794679+{46804680+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;46814681+ size_t reply_len = PAGE_SIZE;46824682+ void *p, *end;46834683+ int ret;46844684+46854685+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,46864686+ "rbd", "get_parent", CEPH_OSD_FLAG_READ,46874687+ req_page, sizeof(u64), reply_page, &reply_len);46884688+ if (ret)46894689+ return ret;46904690+46914691+ p = page_address(reply_page);46924692+ end = p + reply_len;46934693+ ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);46944694+ pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);46954695+ if (IS_ERR(pii->image_id)) {46964696+ ret = PTR_ERR(pii->image_id);46974697+ pii->image_id = NULL;46984698+ return ret;46994699+ }47004700+ ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);47014701+ pii->has_overlap = true;47024702+ ceph_decode_64_safe(&p, end, pii->overlap, e_inval);47034703+47044704+ return 0;47054705+47064706+e_inval:47074707+ return -EINVAL;47084708+}47094709+47104710+static int get_parent_info(struct rbd_device *rbd_dev,47114711+ struct parent_image_info *pii)47124712+{47134713+ struct page *req_page, *reply_page;47144714+ void *p;47154715+ int ret;47164716+47174717+ req_page = alloc_page(GFP_KERNEL);47184718+ if (!req_page)47194719+ return -ENOMEM;47204720+47214721+ reply_page = alloc_page(GFP_KERNEL);47224722+ if (!reply_page) {47234723+ __free_page(req_page);47244724+ return -ENOMEM;47254725+ }47264726+47274727+ p = page_address(req_page);47284728+ ceph_encode_64(&p, rbd_dev->spec->snap_id);47294729+ ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);47304730+ if (ret > 0)47314731+ ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,47324732+ pii);47334733+47344734+ __free_page(req_page);47354735+ __free_page(reply_page);47364736+ return ret;47374737+}47384738+45894739static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)45904740{45914741 struct rbd_spec *parent_spec;45924592- size_t size;45934593- void *reply_buf = NULL;45944594- __le64 snapid;45954595- void *p;45964596- void *end;45974597- u64 pool_id;45984598- char *image_id;45994599- u64 snap_id;46004600- u64 overlap;47424742+ struct parent_image_info pii = { 0 };46014743 int ret;4602474446034745 parent_spec = rbd_spec_alloc();46044746 if (!parent_spec)46054747 return -ENOMEM;4606474846074607- size = sizeof (__le64) + /* pool_id */46084608- sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */46094609- sizeof (__le64) + /* snap_id */46104610- sizeof (__le64); /* overlap */46114611- reply_buf = kmalloc(size, GFP_KERNEL);46124612- if (!reply_buf) {46134613- ret = -ENOMEM;46144614- goto out_err;46154615- }46164616-46174617- snapid = cpu_to_le64(rbd_dev->spec->snap_id);46184618- ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,46194619- &rbd_dev->header_oloc, "get_parent",46204620- &snapid, sizeof(snapid), reply_buf, size);46214621- dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);46224622- if (ret < 0)47494749+ ret = get_parent_info(rbd_dev, &pii);47504750+ if (ret)46234751 goto out_err;4624475246254625- p = reply_buf;46264626- end = reply_buf + ret;46274627- ret = -ERANGE;46284628- ceph_decode_64_safe(&p, end, pool_id, out_err);46294629- if (pool_id == CEPH_NOPOOL) {47534753+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",47544754+ __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,47554755+ pii.has_overlap, pii.overlap);47564756+47574757+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {46304758 /*46314759 * Either the parent never existed, or we have46324760 * record of it but the image got flattened so it no···47654633 * overlap to 0. The effect of this is that all new47664634 * requests will be treated as if the image had no47674635 * parent.46364636+ *46374637+ * If !pii.has_overlap, the parent image spec is not46384638+ * applicable. It's there to avoid duplication in each46394639+ * snapshot record.47684640 */47694641 if (rbd_dev->parent_overlap) {47704642 rbd_dev->parent_overlap = 0;···47834647 /* The ceph file layout needs to fit pool id in 32 bits */4784464847854649 ret = -EIO;47864786- if (pool_id > (u64)U32_MAX) {46504650+ if (pii.pool_id > (u64)U32_MAX) {47874651 rbd_warn(NULL, "parent pool id too large (%llu > %u)",47884788- (unsigned long long)pool_id, U32_MAX);46524652+ (unsigned long long)pii.pool_id, U32_MAX);47894653 goto out_err;47904654 }47914791-47924792- image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);47934793- if (IS_ERR(image_id)) {47944794- ret = PTR_ERR(image_id);47954795- goto out_err;47964796- }47974797- ceph_decode_64_safe(&p, end, snap_id, out_err);47984798- ceph_decode_64_safe(&p, end, overlap, out_err);4799465548004656 /*48014657 * The parent won't change (except when the clone is···47954667 * record the parent spec we have not already done so.47964668 */47974669 if (!rbd_dev->parent_spec) {47984798- parent_spec->pool_id = pool_id;47994799- parent_spec->image_id = image_id;48004800- parent_spec->snap_id = snap_id;48014801-48024802- /* TODO: support cloning across namespaces */48034803- if (rbd_dev->spec->pool_ns) {48044804- parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,48054805- GFP_KERNEL);48064806- if (!parent_spec->pool_ns) {48074807- ret = -ENOMEM;48084808- goto out_err;48094809- }46704670+ parent_spec->pool_id = pii.pool_id;46714671+ if (pii.pool_ns && *pii.pool_ns) {46724672+ parent_spec->pool_ns = pii.pool_ns;46734673+ pii.pool_ns = NULL;48104674 }46754675+ parent_spec->image_id = pii.image_id;46764676+ pii.image_id = NULL;46774677+ parent_spec->snap_id = pii.snap_id;4811467848124679 rbd_dev->parent_spec = parent_spec;48134680 parent_spec = NULL; /* rbd_dev now owns this */48144814- } else {48154815- kfree(image_id);48164681 }4817468248184683 /*48194684 * We always update the parent overlap. If it's zero we issue48204685 * a warning, as we will proceed as if there was no parent.48214686 */48224822- if (!overlap) {46874687+ if (!pii.overlap) {48234688 if (parent_spec) {48244689 /* refresh, careful to warn just once */48254690 if (rbd_dev->parent_overlap)···48234702 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");48244703 }48254704 }48264826- rbd_dev->parent_overlap = overlap;47054705+ rbd_dev->parent_overlap = pii.overlap;4827470648284707out:48294708 ret = 0;48304709out_err:48314831- kfree(reply_buf);47104710+ kfree(pii.pool_ns);47114711+ kfree(pii.image_id);48324712 rbd_spec_put(parent_spec);48334833-48344713 return ret;48354714}48364715
+2-2
drivers/char/Kconfig
···566566 that CPU manufacturer (perhaps with the insistence or mandate567567 of a Nation State's intelligence or law enforcement agencies)568568 has not installed a hidden back door to compromise the CPU's569569- random number generation facilities.570570-569569+ random number generation facilities. This can also be configured570570+ at boot with "random.trust_cpu=on/off".
···101101/* color space conversion and gamma correction are not included */102102struct intel_vgpu_primary_plane_format {103103 u8 enabled; /* plane is enabled */104104- u8 tiled; /* X-tiled */104104+ u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */105105 u8 bpp; /* bits per pixel */106106 u32 hw_format; /* format field in the PRI_CTL register */107107 u32 drm_format; /* format in DRM definition */
···27082708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)27092709 intel_dp_stop_link_train(intel_dp);2710271027112711- intel_ddi_enable_pipe_clock(crtc_state);27112711+ if (!is_mst)27122712+ intel_ddi_enable_pipe_clock(crtc_state);27122713}2713271427142715static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,···28112810 bool is_mst = intel_crtc_has_type(old_crtc_state,28122811 INTEL_OUTPUT_DP_MST);2813281228142814- intel_ddi_disable_pipe_clock(old_crtc_state);28152815-28162816- /*28172817- * Power down sink before disabling the port, otherwise we end28182818- * up getting interrupts from the sink on detecting link loss.28192819- */28202820- if (!is_mst)28132813+ if (!is_mst) {28142814+ intel_ddi_disable_pipe_clock(old_crtc_state);28152815+ /*28162816+ * Power down sink before disabling the port, otherwise we end28172817+ * up getting interrupts from the sink on detecting link loss.28182818+ */28212819 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);28202820+ }2822282128232822 intel_disable_ddi_buf(encoder);28242823
+19-14
drivers/gpu/drm/i915/intel_dp.c
···41604160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);41614161}4162416241634163-/*41644164- * If display is now connected check links status,41654165- * there has been known issues of link loss triggering41664166- * long pulse.41674167- *41684168- * Some sinks (eg. ASUS PB287Q) seem to perform some41694169- * weird HPD ping pong during modesets. So we can apparently41704170- * end up with HPD going low during a modeset, and then41714171- * going back up soon after. And once that happens we must41724172- * retrain the link to get a picture. That's in case no41734173- * userspace component reacted to intermittent HPD dip.41744174- */41754163int intel_dp_retrain_link(struct intel_encoder *encoder,41764164 struct drm_modeset_acquire_ctx *ctx)41774165{···46494661}4650466246514663static int46524652-intel_dp_long_pulse(struct intel_connector *connector)46644664+intel_dp_long_pulse(struct intel_connector *connector,46654665+ struct drm_modeset_acquire_ctx *ctx)46534666{46544667 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);46554668 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);···47094720 */47104721 status = connector_status_disconnected;47114722 goto out;47234723+ } else {47244724+ /*47254725+ * If display is now connected check links status,47264726+ * there has been known issues of link loss triggering47274727+ * long pulse.47284728+ *47294729+ * Some sinks (eg. ASUS PB287Q) seem to perform some47304730+ * weird HPD ping pong during modesets. So we can apparently47314731+ * end up with HPD going low during a modeset, and then47324732+ * going back up soon after. And once that happens we must47334733+ * retrain the link to get a picture. That's in case no47344734+ * userspace component reacted to intermittent HPD dip.47354735+ */47364736+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;47374737+47384738+ intel_dp_retrain_link(encoder, ctx);47124739 }4713474047144741 /*···47864781 return ret;47874782 }4788478347894789- status = intel_dp_long_pulse(intel_dp->attached_connector);47844784+ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);47904785 }4791478647924787 intel_dp->detect_done = false;
+4
drivers/gpu/drm/i915/intel_dp_mst.c
···166166 struct intel_connector *connector =167167 to_intel_connector(old_conn_state->connector);168168169169+ intel_ddi_disable_pipe_clock(old_crtc_state);170170+169171 /* this can fail */170172 drm_dp_check_act_status(&intel_dp->mst_mgr);171173 /* and this can also fail */···254252 I915_WRITE(DP_TP_STATUS(port), temp);255253256254 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);255255+256256+ intel_ddi_enable_pipe_clock(pipe_config);257257}258258259259static void intel_mst_enable_dp(struct intel_encoder *encoder,
+50-19
drivers/gpu/drm/nouveau/dispnv50/disp.c
···11231123 int ret;1124112411251125 if (dpcd >= 0x12) {11261126- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);11261126+ /* Even if we're enabling MST, start with disabling the11271127+ * branching unit to clear any sink-side MST topology state11281128+ * that wasn't set by us11291129+ */11301130+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);11271131 if (ret < 0)11281132 return ret;1129113311301130- dpcd &= ~DP_MST_EN;11311131- if (state)11321132- dpcd |= DP_MST_EN;11331133-11341134- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);11351135- if (ret < 0)11361136- return ret;11341134+ if (state) {11351135+ /* Now, start initializing */11361136+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,11371137+ DP_MST_EN);11381138+ if (ret < 0)11391139+ return ret;11401140+ }11371141 }1138114211391143 return nvif_mthd(disp, 0, &args, sizeof(args));···11461142int11471143nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)11481144{11491149- int ret, state = 0;11451145+ struct drm_dp_aux *aux;11461146+ int ret;11471147+ bool old_state, new_state;11481148+ u8 mstm_ctrl;1150114911511150 if (!mstm)11521151 return 0;1153115211541154- if (dpcd[0] >= 0x12) {11551155- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);11531153+ mutex_lock(&mstm->mgr.lock);11541154+11551155+ old_state = mstm->mgr.mst_state;11561156+ new_state = old_state;11571157+ aux = mstm->mgr.aux;11581158+11591159+ if (old_state) {11601160+ /* Just check that the MST hub is still as we expect it */11611161+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);11621162+ if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {11631163+ DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");11641164+ new_state = false;11651165+ }11661166+ } else if (dpcd[0] >= 0x12) {11671167+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);11561168 if (ret < 0)11571157- return ret;11691169+ goto probe_error;1158117011591171 if (!(dpcd[1] & DP_MST_CAP))11601172 dpcd[0] = 0x11;11611173 else11621162- state = allow;11741174+ new_state = allow;11631175 }1164117611651165- ret = nv50_mstm_enable(mstm, dpcd[0], state);11661166- if (ret)11671167- return ret;11771177+ if (new_state == old_state) {11781178+ mutex_unlock(&mstm->mgr.lock);11791179+ return new_state;11801180+ }1168118111691169- ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);11821182+ ret = nv50_mstm_enable(mstm, dpcd[0], new_state);11831183+ if (ret)11841184+ goto probe_error;11851185+11861186+ mutex_unlock(&mstm->mgr.lock);11871187+11881188+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);11701189 if (ret)11711190 return nv50_mstm_enable(mstm, dpcd[0], 0);1172119111731173- return mstm->mgr.mst_state;11921192+ return new_state;11931193+11941194+probe_error:11951195+ mutex_unlock(&mstm->mgr.lock);11961196+ return ret;11741197}1175119811761199static void···21052074static const struct drm_mode_config_funcs21062075nv50_disp_func = {21072076 .fb_create = nouveau_user_framebuffer_create,21082108- .output_poll_changed = drm_fb_helper_output_poll_changed,20772077+ .output_poll_changed = nouveau_fbcon_output_poll_changed,21092078 .atomic_check = nv50_disp_atomic_check,21102079 .atomic_commit = nv50_disp_atomic_commit,21112080 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+60-50
drivers/gpu/drm/nouveau/nouveau_connector.c
···409409nouveau_connector_ddc_detect(struct drm_connector *connector)410410{411411 struct drm_device *dev = connector->dev;412412- struct nouveau_connector *nv_connector = nouveau_connector(connector);413413- struct nouveau_drm *drm = nouveau_drm(dev);414414- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);415415- struct nouveau_encoder *nv_encoder = NULL;412412+ struct nouveau_encoder *nv_encoder = NULL, *found = NULL;416413 struct drm_encoder *encoder;417417- int i, panel = -ENODEV;418418-419419- /* eDP panels need powering on by us (if the VBIOS doesn't default it420420- * to on) before doing any AUX channel transactions. LVDS panel power421421- * is handled by the SOR itself, and not required for LVDS DDC.422422- */423423- if (nv_connector->type == DCB_CONNECTOR_eDP) {424424- panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);425425- if (panel == 0) {426426- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);427427- msleep(300);428428- }429429- }414414+ int i, ret;415415+ bool switcheroo_ddc = false;430416431417 drm_connector_for_each_possible_encoder(connector, encoder, i) {432418 nv_encoder = nouveau_encoder(encoder);433419434434- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {435435- int ret = nouveau_dp_detect(nv_encoder);420420+ switch (nv_encoder->dcb->type) {421421+ case DCB_OUTPUT_DP:422422+ ret = nouveau_dp_detect(nv_encoder);436423 if (ret == NOUVEAU_DP_MST)437424 return NULL;438438- if (ret == NOUVEAU_DP_SST)425425+ else if (ret == NOUVEAU_DP_SST)426426+ found = nv_encoder;427427+428428+ break;429429+ case DCB_OUTPUT_LVDS:430430+ switcheroo_ddc = !!(vga_switcheroo_handler_flags() &431431+ VGA_SWITCHEROO_CAN_SWITCH_DDC);432432+ /* fall-through */433433+ default:434434+ if (!nv_encoder->i2c)439435 break;440440- } else441441- if ((vga_switcheroo_handler_flags() &442442- VGA_SWITCHEROO_CAN_SWITCH_DDC) &&443443- nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&444444- nv_encoder->i2c) {445445- int ret;446446- vga_switcheroo_lock_ddc(dev->pdev);447447- ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);448448- vga_switcheroo_unlock_ddc(dev->pdev);449449- if (ret)450450- break;451451- } else452452- if (nv_encoder->i2c) {436436+437437+ if (switcheroo_ddc)438438+ vga_switcheroo_lock_ddc(dev->pdev);453439 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))454454- break;440440+ found = nv_encoder;441441+ if (switcheroo_ddc)442442+ vga_switcheroo_unlock_ddc(dev->pdev);443443+444444+ break;455445 }446446+ if (found)447447+ break;456448 }457449458458- /* eDP panel not detected, restore panel power GPIO to previous459459- * state to avoid confusing the SOR for other output types.460460- */461461- if (!nv_encoder && panel == 0)462462- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);463463-464464- return nv_encoder;450450+ return found;465451}466452467453static struct nouveau_encoder *···541555 nv_connector->edid = NULL;542556 }543557544544- /* Outputs are only polled while runtime active, so acquiring a545545- * runtime PM ref here is unnecessary (and would deadlock upon546546- * runtime suspend because it waits for polling to finish).558558+ /* Outputs are only polled while runtime active, so resuming the559559+ * device here is unnecessary (and would deadlock upon runtime suspend560560+ * because it waits for polling to finish). We do however, want to561561+ * prevent the autosuspend timer from elapsing during this operation562562+ * if possible.547563 */548548- if (!drm_kms_helper_is_poll_worker()) {549549- ret = pm_runtime_get_sync(connector->dev->dev);564564+ if (drm_kms_helper_is_poll_worker()) {565565+ pm_runtime_get_noresume(dev->dev);566566+ } else {567567+ ret = pm_runtime_get_sync(dev->dev);550568 if (ret < 0 && ret != -EACCES)551569 return conn_status;552570 }···628638629639 out:630640631631- if (!drm_kms_helper_is_poll_worker()) {632632- pm_runtime_mark_last_busy(connector->dev->dev);633633- pm_runtime_put_autosuspend(connector->dev->dev);634634- }641641+ pm_runtime_mark_last_busy(dev->dev);642642+ pm_runtime_put_autosuspend(dev->dev);635643636644 return conn_status;637645}···10931105 const struct nvif_notify_conn_rep_v0 *rep = notify->data;10941106 const char *name = connector->name;10951107 struct nouveau_encoder *nv_encoder;11081108+ int ret;11091109+11101110+ ret = pm_runtime_get(drm->dev->dev);11111111+ if (ret == 0) {11121112+ /* We can't block here if there's a pending PM request11131113+ * running, as we'll deadlock nouveau_display_fini() when it11141114+ * calls nvif_put() on our nvif_notify struct. So, simply11151115+ * defer the hotplug event until the device finishes resuming11161116+ */11171117+ NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",11181118+ name);11191119+ schedule_work(&drm->hpd_work);11201120+11211121+ pm_runtime_put_noidle(drm->dev->dev);11221122+ return NVIF_NOTIFY_KEEP;11231123+ } else if (ret != 1 && ret != -EACCES) {11241124+ NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",11251125+ name, ret);11261126+ return NVIF_NOTIFY_DROP;11271127+ }1096112810971129 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {10981130 NV_DEBUG(drm, "service %s\n", name);···11301122 drm_helper_hpd_irq_event(connector->dev);11311123 }1132112411251125+ pm_runtime_mark_last_busy(drm->dev->dev);11261126+ pm_runtime_put_autosuspend(drm->dev->dev);11331127 return NVIF_NOTIFY_KEEP;11341128}11351129
+32-12
drivers/gpu/drm/nouveau/nouveau_display.c
···293293294294static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {295295 .fb_create = nouveau_user_framebuffer_create,296296- .output_poll_changed = drm_fb_helper_output_poll_changed,296296+ .output_poll_changed = nouveau_fbcon_output_poll_changed,297297};298298299299···355355 pm_runtime_get_sync(drm->dev->dev);356356357357 drm_helper_hpd_irq_event(drm->dev);358358- /* enable polling for external displays */359359- drm_kms_helper_poll_enable(drm->dev);360358361359 pm_runtime_mark_last_busy(drm->dev->dev);362360 pm_runtime_put_sync(drm->dev->dev);···377379{378380 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);379381 struct acpi_bus_event *info = data;382382+ int ret;380383381384 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {382385 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {383383- /*384384- * This may be the only indication we receive of a385385- * connector hotplug on a runtime suspended GPU,386386- * schedule hpd_work to check.387387- */388388- schedule_work(&drm->hpd_work);386386+ ret = pm_runtime_get(drm->dev->dev);387387+ if (ret == 1 || ret == -EACCES) {388388+ /* If the GPU is already awake, or in a state389389+ * where we can't wake it up, it can handle390390+ * it's own hotplug events.391391+ */392392+ pm_runtime_put_autosuspend(drm->dev->dev);393393+ } else if (ret == 0) {394394+ /* This may be the only indication we receive395395+ * of a connector hotplug on a runtime396396+ * suspended GPU, schedule hpd_work to check.397397+ */398398+ NV_DEBUG(drm, "ACPI requested connector reprobe\n");399399+ schedule_work(&drm->hpd_work);400400+ pm_runtime_put_noidle(drm->dev->dev);401401+ } else {402402+ NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",403403+ ret);404404+ }389405390406 /* acpi-video should not generate keypresses for this */391407 return NOTIFY_BAD;···423411 if (ret)424412 return ret;425413414414+ /* enable connector detection and polling for connectors without HPD415415+ * support416416+ */417417+ drm_kms_helper_poll_enable(dev);418418+426419 /* enable hotplug interrupts */427420 drm_connector_list_iter_begin(dev, &conn_iter);428421 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {···442425}443426444427void445445-nouveau_display_fini(struct drm_device *dev, bool suspend)428428+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)446429{447430 struct nouveau_display *disp = nouveau_display(dev);448431 struct nouveau_drm *drm = nouveau_drm(dev);···466449 nvif_notify_put(&conn->hpd);467450 }468451 drm_connector_list_iter_end(&conn_iter);452452+453453+ if (!runtime)454454+ cancel_work_sync(&drm->hpd_work);469455470456 drm_kms_helper_poll_disable(dev);471457 disp->fini(dev);···638618 }639619 }640620641641- nouveau_display_fini(dev, true);621621+ nouveau_display_fini(dev, true, runtime);642622 return 0;643623 }644624645645- nouveau_display_fini(dev, true);625625+ nouveau_display_fini(dev, true, runtime);646626647627 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {648628 struct nouveau_framebuffer *nouveau_fb;
···230230 mutex_unlock(&drm->master.lock);231231 }232232 if (ret) {233233- NV_ERROR(drm, "Client allocation failed: %d\n", ret);233233+ NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);234234 goto done;235235 }236236···240240 }, sizeof(struct nv_device_v0),241241 &cli->device);242242 if (ret) {243243- NV_ERROR(drm, "Device allocation failed: %d\n", ret);243243+ NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);244244 goto done;245245 }246246247247 ret = nvif_mclass(&cli->device.object, mmus);248248 if (ret < 0) {249249- NV_ERROR(drm, "No supported MMU class\n");249249+ NV_PRINTK(err, cli, "No supported MMU class\n");250250 goto done;251251 }252252253253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);254254 if (ret) {255255- NV_ERROR(drm, "MMU allocation failed: %d\n", ret);255255+ NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);256256 goto done;257257 }258258259259 ret = nvif_mclass(&cli->mmu.object, vmms);260260 if (ret < 0) {261261- NV_ERROR(drm, "No supported VMM class\n");261261+ NV_PRINTK(err, cli, "No supported VMM class\n");262262 goto done;263263 }264264265265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);266266 if (ret) {267267- NV_ERROR(drm, "VMM allocation failed: %d\n", ret);267267+ NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);268268 goto done;269269 }270270271271 ret = nvif_mclass(&cli->mmu.object, mems);272272 if (ret < 0) {273273- NV_ERROR(drm, "No supported MEM class\n");273273+ NV_PRINTK(err, cli, "No supported MEM class\n");274274 goto done;275275 }276276···592592 pm_runtime_allow(dev->dev);593593 pm_runtime_mark_last_busy(dev->dev);594594 pm_runtime_put(dev->dev);595595- } else {596596- /* enable polling for external displays */597597- drm_kms_helper_poll_enable(dev);598595 }596596+599597 return 0;600598601599fail_dispinit:···627629 nouveau_debugfs_fini(drm);628630629631 if (dev->mode_config.num_crtc)630630- nouveau_display_fini(dev, false);632632+ nouveau_display_fini(dev, false, false);631633 nouveau_display_destroy(dev);632634633635 nouveau_bios_takedown(dev);···833835 return -EBUSY;834836 }835837836836- drm_kms_helper_poll_disable(drm_dev);837838 nouveau_switcheroo_optimus_dsm();838839 ret = nouveau_do_suspend(drm_dev, true);839840 pci_save_state(pdev);
+57
drivers/gpu/drm/nouveau/nouveau_fbcon.c
···466466 console_unlock();467467468468 if (state == FBINFO_STATE_RUNNING) {469469+ nouveau_fbcon_hotplug_resume(drm->fbcon);469470 pm_runtime_mark_last_busy(drm->dev->dev);470471 pm_runtime_put_sync(drm->dev->dev);471472 }···488487 schedule_work(&drm->fbcon_work);489488}490489490490+void491491+nouveau_fbcon_output_poll_changed(struct drm_device *dev)492492+{493493+ struct nouveau_drm *drm = nouveau_drm(dev);494494+ struct nouveau_fbdev *fbcon = drm->fbcon;495495+ int ret;496496+497497+ if (!fbcon)498498+ return;499499+500500+ mutex_lock(&fbcon->hotplug_lock);501501+502502+ ret = pm_runtime_get(dev->dev);503503+ if (ret == 1 || ret == -EACCES) {504504+ drm_fb_helper_hotplug_event(&fbcon->helper);505505+506506+ pm_runtime_mark_last_busy(dev->dev);507507+ pm_runtime_put_autosuspend(dev->dev);508508+ } else if (ret == 0) {509509+ /* If the GPU was already in the process of suspending before510510+ * this event happened, then we can't block here as we'll511511+ * deadlock the runtime pmops since they wait for us to512512+ * finish. So, just defer this event for when we runtime513513+ * resume again. It will be handled by fbcon_work.514514+ */515515+ NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");516516+ fbcon->hotplug_waiting = true;517517+ pm_runtime_put_noidle(drm->dev->dev);518518+ } else {519519+ DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",520520+ ret);521521+ }522522+523523+ mutex_unlock(&fbcon->hotplug_lock);524524+}525525+526526+void527527+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)528528+{529529+ struct nouveau_drm *drm;530530+531531+ if (!fbcon)532532+ return;533533+ drm = nouveau_drm(fbcon->helper.dev);534534+535535+ mutex_lock(&fbcon->hotplug_lock);536536+ if (fbcon->hotplug_waiting) {537537+ fbcon->hotplug_waiting = false;538538+539539+ NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");540540+ drm_fb_helper_hotplug_event(&fbcon->helper);541541+ }542542+ mutex_unlock(&fbcon->hotplug_lock);543543+}544544+491545int492546nouveau_fbcon_init(struct drm_device *dev)493547{···561505562506 drm->fbcon = fbcon;563507 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);508508+ mutex_init(&fbcon->hotplug_lock);564509565510 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);566511
···275275 struct nvkm_outp *outp, *outt, *pair;276276 struct nvkm_conn *conn;277277 struct nvkm_head *head;278278+ struct nvkm_ior *ior;278279 struct nvbios_connE connE;279280 struct dcb_output dcbE;280281 u8 hpd = 0, ver, hdr;···398397 ret = disp->func->oneinit(disp);399398 if (ret)400399 return ret;400400+ }401401+402402+ /* Enforce identity-mapped SOR assignment for panels, which have403403+ * certain bits (ie. backlight controls) wired to a specific SOR.404404+ */405405+ list_for_each_entry(outp, &disp->outp, head) {406406+ if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||407407+ outp->conn->info.type == DCB_CONNECTOR_eDP) {408408+ ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);409409+ if (!WARN_ON(!ior))410410+ ior->identity = true;411411+ outp->identity = true;412412+ }401413 }402414403415 i = 0;
+46-8
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
···28282929#include <subdev/bios.h>3030#include <subdev/bios/init.h>3131+#include <subdev/gpio.h>3132#include <subdev/i2c.h>32333334#include <nvif/event.h>···413412}414413415414static void416416-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)415415+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)417416{418417 struct nvkm_dp *dp = nvkm_dp(outp);419419-420420- /* Prevent link from being retrained if sink sends an IRQ. */421421- atomic_set(&dp->lt.done, 0);422422- ior->dp.nr = 0;423418424419 /* Execute DisableLT script from DP Info Table. */425420 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],···423426 init.or = ior->id;424427 init.link = ior->arm.link;425428 );429429+}430430+431431+static void432432+nvkm_dp_release(struct nvkm_outp *outp)433433+{434434+ struct nvkm_dp *dp = nvkm_dp(outp);435435+436436+ /* Prevent link from being retrained if sink sends an IRQ. */437437+ atomic_set(&dp->lt.done, 0);438438+ dp->outp.ior->dp.nr = 0;426439}427440428441static int···498491 return ret;499492}500493501501-static void494494+static bool502495nvkm_dp_enable(struct nvkm_dp *dp, bool enable)503496{504497 struct nvkm_i2c_aux *aux = dp->aux;···512505513506 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,514507 sizeof(dp->dpcd)))515515- return;508508+ return true;516509 }517510518511 if (dp->present) {···522515 }523516524517 atomic_set(&dp->lt.done, 0);518518+ return false;525519}526520527521static int···563555static void564556nvkm_dp_init(struct nvkm_outp *outp)565557{558558+ struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;566559 struct nvkm_dp *dp = nvkm_dp(outp);560560+567561 nvkm_notify_put(&dp->outp.conn->hpd);568568- nvkm_dp_enable(dp, true);562562+563563+ /* eDP panels need powering on by us (if the VBIOS doesn't default it564564+ * to on) before doing any AUX channel transactions. LVDS panel power565565+ * is handled by the SOR itself, and not required for LVDS DDC.566566+ */567567+ if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {568568+ int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);569569+ if (power == 0)570570+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);571571+572572+ /* We delay here unconditionally, even if already powered,573573+ * because some laptop panels having a significant resume574574+ * delay before the panel begins responding.575575+ *576576+ * This is likely a bit of a hack, but no better idea for577577+ * handling this at the moment.578578+ */579579+ msleep(300);580580+581581+ /* If the eDP panel can't be detected, we need to restore582582+ * the panel power GPIO to avoid breaking another output.583583+ */584584+ if (!nvkm_dp_enable(dp, true) && power == 0)585585+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);586586+ } else {587587+ nvkm_dp_enable(dp, true);588588+ }589589+569590 nvkm_notify_get(&dp->hpd);570591}571592···613576 .fini = nvkm_dp_fini,614577 .acquire = nvkm_dp_acquire,615578 .release = nvkm_dp_release,579579+ .disable = nvkm_dp_disable,616580};617581618582static int
···501501 nv50_disp_super_ied_off(head, ior, 2);502502503503 /* If we're shutting down the OR's only active head, execute504504- * the output path's release function.504504+ * the output path's disable function.505505 */506506 if (ior->arm.head == (1 << head->id)) {507507- if ((outp = ior->arm.outp) && outp->func->release)508508- outp->func->release(outp, ior);507507+ if ((outp = ior->arm.outp) && outp->func->disable)508508+ outp->func->disable(outp, ior);509509 }510510}511511
+14-4
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
···9393 if (ior) {9494 outp->acquired &= ~user;9595 if (!outp->acquired) {9696+ if (outp->func->release && outp->ior)9797+ outp->func->release(outp);9698 outp->ior->asy.outp = NULL;9799 outp->ior = NULL;98100 }···129127 if (proto == UNKNOWN)130128 return -ENOSYS;131129130130+ /* Deal with panels requiring identity-mapped SOR assignment. */131131+ if (outp->identity) {132132+ ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);133133+ if (WARN_ON(!ior))134134+ return -ENOSPC;135135+ return nvkm_outp_acquire_ior(outp, user, ior);136136+ }137137+132138 /* First preference is to reuse the OR that is currently armed133139 * on HW, if any, in order to prevent unnecessary switching.134140 */135141 list_for_each_entry(ior, &outp->disp->ior, head) {136136- if (!ior->asy.outp && ior->arm.outp == outp)142142+ if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)137143 return nvkm_outp_acquire_ior(outp, user, ior);138144 }139145140146 /* Failing that, a completely unused OR is the next best thing. */141147 list_for_each_entry(ior, &outp->disp->ior, head) {142142- if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&148148+ if (!ior->identity &&149149+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&143150 (ior->func->route.set || ior->id == __ffs(outp->info.or)))144151 return nvkm_outp_acquire_ior(outp, user, ior);145152 }···157146 * but will be released during the next modeset.158147 */159148 list_for_each_entry(ior, &outp->disp->ior, head) {160160- if (!ior->asy.outp && ior->type == type &&149149+ if (!ior->identity && !ior->asy.outp && ior->type == type &&161150 (ior->func->route.set || ior->id == __ffs(outp->info.or)))162151 return nvkm_outp_acquire_ior(outp, user, ior);163152 }···256245 outp->index = index;257246 outp->info = *dcbE;258247 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);259259- outp->or = ffs(outp->info.or) - 1;260248261249 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "262250 "edid %x bus %d head %x",
···579579}580580EXPORT_SYMBOL_GPL(sensor_hub_device_close);581581582582+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,583583+ unsigned int *rsize)584584+{585585+ /*586586+ * Checks if the report descriptor of Thinkpad Helix 2 has a logical587587+ * minimum for magnetic flux axis greater than the maximum.588588+ */589589+ if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&590590+ *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&591591+ rdesc[915] == 0x81 && rdesc[916] == 0x08 &&592592+ rdesc[917] == 0x00 && rdesc[918] == 0x27 &&593593+ rdesc[921] == 0x07 && rdesc[922] == 0x00) {594594+ /* Sets negative logical minimum for mag x, y and z */595595+ rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;596596+ rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;597597+ rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;598598+ rdesc[917] = rdesc[938] = rdesc[959] = 0xff;599599+ }600600+601601+ return rdesc;602602+}603603+582604static int sensor_hub_probe(struct hid_device *hdev,583605 const struct hid_device_id *id)584606{···765743 .probe = sensor_hub_probe,766744 .remove = sensor_hub_remove,767745 .raw_event = sensor_hub_raw_event,746746+ .report_fixup = sensor_hub_report_fixup,768747#ifdef CONFIG_PM769748 .suspend = sensor_hub_suspend,770749 .resume = sensor_hub_resume,
+7-4
drivers/hid/i2c-hid/i2c-hid.c
···170170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },171171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,172172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },173173- { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,174174- I2C_HID_QUIRK_RESEND_REPORT_DESCR },175173 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,176174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },177175 { 0, 0 }···12331235 pm_runtime_enable(dev);1234123612351237 enable_irq(client->irq);12361236- ret = i2c_hid_hwreset(client);12381238+12391239+ /* Instead of resetting device, simply powers the device on. This12401240+ * solves "incomplete reports" on Raydium devices 2386:3118 and12411241+ * 2386:4B3312421242+ */12431243+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);12371244 if (ret)12381245 return ret;1239124612401240- /* RAYDIUM device (2386:3118) need to re-send report descr cmd12471247+ /* Some devices need to re-send report descr cmd12411248 * after resume, after this it will be back normal.12421249 * otherwise it issues too many incomplete reports.12431250 */
···164164MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");165165MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");166166MODULE_LICENSE("GPL v2");167167+MODULE_ALIAS("platform:raspberrypi-hwmon");
···401401 return ret;402402403403 for (msg = msgs; msg < emsg; msg++) {404404- /* If next message is read, skip the stop condition */405405- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);406406- /* but, force it if I2C_M_STOP is set */407407- if (msg->flags & I2C_M_STOP)408408- stop = true;404404+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */405405+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);409406410407 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);411408 if (ret)
+2-5
drivers/i2c/busses/i2c-uniphier.c
···248248 return ret;249249250250 for (msg = msgs; msg < emsg; msg++) {251251- /* If next message is read, skip the stop condition */252252- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);253253- /* but, force it if I2C_M_STOP is set */254254- if (msg->flags & I2C_M_STOP)255255- stop = true;251251+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */252252+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);256253257254 ret = uniphier_i2c_master_xfer_one(adap, msg, stop);258255 if (ret)
+4
drivers/i2c/busses/i2c-xiic.c
···532532{533533 u8 rx_watermark;534534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;535535+ unsigned long flags;535536536537 /* Clear and enable Rx full interrupt. */537538 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);···548547 rx_watermark = IIC_RX_FIFO_DEPTH;549548 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);550549550550+ local_irq_save(flags);551551 if (!(msg->flags & I2C_M_NOSTART))552552 /* write the address */553553 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,···558556559557 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,560558 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));559559+ local_irq_restore(flags);560560+561561 if (i2c->nmsgs == 1)562562 /* very last, enable bus not busy as well */563563 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
···16851685 schp = to_c4iw_cq(qhp->ibqp.send_cq);1686168616871687 if (qhp->ibqp.uobject) {16881688+16891689+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */16901690+ if (qhp->wq.flushed)16911691+ return;16921692+16931693+ qhp->wq.flushed = 1;16881694 t4_set_wq_in_error(&qhp->wq, 0);16891695 t4_set_cq_in_error(&rchp->cq);16901696 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
···1027102710281028 skb_queue_head_init(&skqueue);1029102910301030+ netif_tx_lock_bh(p->dev);10301031 spin_lock_irq(&priv->lock);10311032 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);10321033 if (p->neigh)10331034 while ((skb = __skb_dequeue(&p->neigh->queue)))10341035 __skb_queue_tail(&skqueue, skb);10351036 spin_unlock_irq(&priv->lock);10371037+ netif_tx_unlock_bh(p->dev);1036103810371039 while ((skb = __skb_dequeue(&skqueue))) {10381040 skb->dev = p->dev;
+3-1
drivers/irqchip/irq-gic-v3-its.c
···14391439 * The consequence of the above is that allocation is cost is low, but14401440 * freeing is expensive. We assumes that freeing rarely occurs.14411441 */14421442+#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */1442144314431444static DEFINE_MUTEX(lpi_range_lock);14441445static LIST_HEAD(lpi_range_list);···16261625{16271626 phys_addr_t paddr;1628162716291629- lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);16281628+ lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),16291629+ ITS_MAX_LPI_NRBITS);16301630 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);16311631 if (!gic_rdists->prop_page) {16321632 pr_err("Failed to allocate PROPBASE\n");
+5-5
drivers/md/md-cluster.c
···12761276static int resync_finish(struct mddev *mddev)12771277{12781278 struct md_cluster_info *cinfo = mddev->cluster_info;12791279+ int ret = 0;1279128012801281 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);12811281- dlm_unlock_sync(cinfo->resync_lockres);1282128212831283 /*12841284 * If resync thread is interrupted so we can't say resync is finished,12851285 * another node will launch resync thread to continue.12861286 */12871287- if (test_bit(MD_CLOSING, &mddev->flags))12881288- return 0;12891289- else12901290- return resync_info_update(mddev, 0, 0);12871287+ if (!test_bit(MD_CLOSING, &mddev->flags))12881288+ ret = resync_info_update(mddev, 0, 0);12891289+ dlm_unlock_sync(cinfo->resync_lockres);12901290+ return ret;12911291}1292129212931293static int area_resyncing(struct mddev *mddev, int direction,
+4-1
drivers/md/raid10.c
···45294529 allow_barrier(conf);45304530 }4531453145324532+ raise_barrier(conf, 0);45324533read_more:45334534 /* Now schedule reads for blocks from sector_nr to last */45344535 r10_bio = raid10_alloc_init_r10buf(conf);45354536 r10_bio->state = 0;45364536- raise_barrier(conf, sectors_done != 0);45374537+ raise_barrier(conf, 1);45374538 atomic_set(&r10_bio->remaining, 0);45384539 r10_bio->mddev = mddev;45394540 r10_bio->sector = sector_nr;···46294628 sectors_done += nr_sectors;46304629 if (sector_nr <= last)46314630 goto read_more;46314631+46324632+ lower_barrier(conf);4632463346334634 /* Now that we have done the whole section we can46344635 * update reshape_progress
+5
drivers/md/raid5-log.h
···4646extern void ppl_quiesce(struct r5conf *conf, int quiesce);4747extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);48484949+static inline bool raid5_has_log(struct r5conf *conf)5050+{5151+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);5252+}5353+4954static inline bool raid5_has_ppl(struct r5conf *conf)5055{5156 return test_bit(MD_HAS_PPL, &conf->mddev->flags);
···411411 if (ret < 0)412412 goto error;413413 }414414- } else {414414+ } else if (pdata) {415415 for (i = 0; i < pdata->num_sub_devices; i++) {416416 pdata->sub_devices[i].dev.parent = dev;417417 ret = platform_device_register(&pdata->sub_devices[i]);
+11-13
drivers/net/ethernet/amazon/ena/ena_com.c
···459459 cqe = &admin_queue->cq.entries[head_masked];460460461461 /* Go over all the completions */462462- while ((cqe->acq_common_descriptor.flags &462462+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &463463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {464464 /* Do not read the rest of the completion entry before the465465 * phase bit was validated466466 */467467- rmb();467467+ dma_rmb();468468 ena_com_handle_single_admin_completion(admin_queue, cqe);469469470470 head_masked++;···627627 mmio_read_reg |= mmio_read->seq_num &628628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;629629630630- /* make sure read_resp->req_id get updated before the hw can write631631- * there632632- */633633- wmb();630630+ writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);634631635635- writel_relaxed(mmio_read_reg,636636- ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);637637-638638- mmiowb();639632 for (i = 0; i < timeout; i++) {640640- if (read_resp->req_id == mmio_read->seq_num)633633+ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)641634 break;642635643636 udelay(1);···17891796 aenq_common = &aenq_e->aenq_common_desc;1790179717911798 /* Go over all the events */17921792- while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==17931793- phase) {17991799+ while ((READ_ONCE(aenq_common->flags) &18001800+ ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {18011801+ /* Make sure the phase bit (ownership) is as expected before18021802+ * reading the rest of the descriptor.18031803+ */18041804+ dma_rmb();18051805+17941806 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",17951807 aenq_common->group, aenq_common->syndrom,17961808 (u64)aenq_common->timestamp_low +
+6
drivers/net/ethernet/amazon/ena/ena_eth_com.c
···5151 if (desc_phase != expected_phase)5252 return NULL;53535454+ /* Make sure we read the rest of the descriptor after the phase bit5555+ * has been read5656+ */5757+ dma_rmb();5858+5459 return cdesc;5560}5661···498493 if (cdesc_phase != expected_phase)499494 return -EAGAIN;500495496496+ dma_rmb();501497 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {502498 pr_err("Invalid req id %d\n", cdesc->req_id);503499 return -EINVAL;
···76767777static int ena_rss_init_default(struct ena_adapter *adapter);7878static void check_for_admin_com_state(struct ena_adapter *adapter);7979-static void ena_destroy_device(struct ena_adapter *adapter);7979+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);8080static int ena_restore_device(struct ena_adapter *adapter);81818282static void ena_tx_timeout(struct net_device *dev)···461461 return -ENOMEM;462462 }463463464464- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,464464+ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,465465 DMA_FROM_DEVICE);466466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {467467 u64_stats_update_begin(&rx_ring->syncp);···478478 rx_info->page_offset = 0;479479 ena_buf = &rx_info->ena_buf;480480 ena_buf->paddr = dma;481481- ena_buf->len = PAGE_SIZE;481481+ ena_buf->len = ENA_PAGE_SIZE;482482483483 return 0;484484}···495495 return;496496 }497497498498- dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,498498+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,499499 DMA_FROM_DEVICE);500500501501 __free_page(page);···551551 rx_ring->qid, i, num);552552 }553553554554- if (likely(i)) {555555- /* Add memory barrier to make sure the desc were written before556556- * issue a doorbell557557- */558558- wmb();559559- ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);560560- mmiowb();561561- }554554+ /* ena_com_write_sq_doorbell issues a wmb() */555555+ if (likely(i))556556+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);562557563558 rx_ring->next_to_use = next_to_use;564559···911916 do {912917 dma_unmap_page(rx_ring->dev,913918 dma_unmap_addr(&rx_info->ena_buf, paddr),914914- PAGE_SIZE, DMA_FROM_DEVICE);919919+ ENA_PAGE_SIZE, DMA_FROM_DEVICE);915920916921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,917917- rx_info->page_offset, len, PAGE_SIZE);922922+ rx_info->page_offset, len, ENA_PAGE_SIZE);918923919924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,920925 "rx skb updated. len %d. data_len %d\n",···18951900 "Destroy failure, restarting device\n");18961901 ena_dump_stats_to_dmesg(adapter);18971902 /* rtnl lock already obtained in dev_ioctl() layer */18981898- ena_destroy_device(adapter);19031903+ ena_destroy_device(adapter, false);18991904 ena_restore_device(adapter);19001905 }19011906···21072112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,21082113 tx_ring->ring_size);2109211421102110- /* This WMB is aimed to:21112111- * 1 - perform smp barrier before reading next_to_completion21122112- * 2 - make sure the desc were written before trigger DB21132113- */21142114- wmb();21152115-21162115 /* stop the queue when no more space available, the packet can have up21172116 * to sgl_size + 2. one for the meta descriptor and one for header21182117 * (if the header is larger than tx_max_header_size).···21252136 * stop the queue but meanwhile clean_tx_irq updates21262137 * next_to_completion and terminates.21272138 * The queue will remain stopped forever.21282128- * To solve this issue this function perform rmb, check21292129- * the wakeup condition and wake up the queue if needed.21392139+ * To solve this issue add a mb() to make sure that21402140+ * netif_tx_stop_queue() write is vissible before checking if21412141+ * there is additional space in the queue.21302142 */21312131- smp_rmb();21432143+ smp_mb();2132214421332145 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)21342146 > ENA_TX_WAKEUP_THRESH) {···21412151 }2142215221432153 if (netif_xmit_stopped(txq) || !skb->xmit_more) {21442144- /* trigger the dma engine */21452145- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);21542154+ /* trigger the dma engine. ena_com_write_sq_doorbell()21552155+ * has a mb21562156+ */21572157+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);21462158 u64_stats_update_begin(&tx_ring->syncp);21472159 tx_ring->tx_stats.doorbells++;21482160 u64_stats_update_end(&tx_ring->syncp);···25422550 return rc;25432551}2544255225452545-static void ena_destroy_device(struct ena_adapter *adapter)25532553+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)25462554{25472555 struct net_device *netdev = adapter->netdev;25482556 struct ena_com_dev *ena_dev = adapter->ena_dev;25492557 bool dev_up;25582558+25592559+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))25602560+ return;2550256125512562 netif_carrier_off(netdev);25522563···25582563 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);25592564 adapter->dev_up_before_reset = dev_up;2560256525612561- ena_com_set_admin_running_state(ena_dev, false);25662566+ if (!graceful)25672567+ ena_com_set_admin_running_state(ena_dev, false);2562256825632569 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))25642570 ena_down(adapter);···25872591 adapter->reset_reason = ENA_REGS_RESET_NORMAL;2588259225892593 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);25942594+ clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);25902595}2591259625922597static int ena_restore_device(struct ena_adapter *adapter)···26322635 }26332636 }2634263726382638+ set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);26352639 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));26362640 dev_err(&pdev->dev, "Device reset completed successfully\n");26372641···26632665 return;26642666 }26652667 rtnl_lock();26662666- ena_destroy_device(adapter);26682668+ ena_destroy_device(adapter, false);26672669 ena_restore_device(adapter);26682670 rtnl_unlock();26692671}···34073409 netdev->rx_cpu_rmap = NULL;34083410 }34093411#endif /* CONFIG_RFS_ACCEL */34103410-34113411- unregister_netdev(netdev);34123412 del_timer_sync(&adapter->timer_service);3413341334143414 cancel_work_sync(&adapter->reset_task);3415341534163416- /* Reset the device only if the device is running. */34163416+ unregister_netdev(netdev);34173417+34183418+ /* If the device is running then we want to make sure the device will be34193419+ * reset to make sure no more events will be issued by the device.34203420+ */34173421 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))34183418- ena_com_dev_reset(ena_dev, adapter->reset_reason);34223422+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);3419342334203420- ena_free_mgmnt_irq(adapter);34213421-34223422- ena_disable_msix(adapter);34243424+ rtnl_lock();34253425+ ena_destroy_device(adapter, true);34263426+ rtnl_unlock();3423342734243428 free_netdev(netdev);34253425-34263426- ena_com_mmio_reg_read_request_destroy(ena_dev);34273427-34283428- ena_com_abort_admin_commands(ena_dev);34293429-34303430- ena_com_wait_for_abort_completion(ena_dev);34313431-34323432- ena_com_admin_destroy(ena_dev);3433342934343430 ena_com_rss_destroy(ena_dev);34353431···34593467 "ignoring device reset request as the device is being suspended\n");34603468 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);34613469 }34623462- ena_destroy_device(adapter);34703470+ ena_destroy_device(adapter, true);34633471 rtnl_unlock();34643472 return 0;34653473}
+11
drivers/net/ethernet/amazon/ena/ena_netdev.h
···355355356356int ena_get_sset_count(struct net_device *netdev, int sset);357357358358+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the359359+ * driver passas 0.360360+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to361361+ * 16kB.362362+ */363363+#if PAGE_SIZE > SZ_16K364364+#define ENA_PAGE_SIZE SZ_16K365365+#else366366+#define ENA_PAGE_SIZE PAGE_SIZE367367+#endif368368+358369#endif /* !(ENA_H) */
···11+# SPDX-License-Identifier: GPL-2.012#23# Makefile for the Renesas device drivers.34#
+1-5
drivers/net/ethernet/renesas/ravb_ptp.c
···11+// SPDX-License-Identifier: GPL-2.0+12/* PTP 1588 clock using the Renesas Ethernet AVB23 *34 * Copyright (C) 2013-2015 Renesas Electronics Corporation45 * Copyright (C) 2015 Renesas Solutions Corp.56 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>66- *77- * This program is free software; you can redistribute it and/or modify88- * it under the terms of the GNU General Public License as published by99- * the Free Software Foundation; either version 2 of the License, or1010- * (at your option) any later version.117 */128139#include "ravb.h"
+29-1
drivers/net/usb/qmi_wwan.c
···967967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),968968 .driver_info = (unsigned long)&qmi_wwan_info,969969 },970970+ { /* Quectel EP06/EG06/EM06 */971971+ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,972972+ USB_CLASS_VENDOR_SPEC,973973+ USB_SUBCLASS_VENDOR_SPEC,974974+ 0xff),975975+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,976976+ },970977971978 /* 3. Combined interface devices matching on interface number */972979 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */···12621255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */12631256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */12641257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */12651265- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */1266125812671259 /* 4. Gobi 1000 devices */12681260 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */···13371331 return false;13381332}1339133313341334+static bool quectel_ep06_diag_detected(struct usb_interface *intf)13351335+{13361336+ struct usb_device *dev = interface_to_usbdev(intf);13371337+ struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;13381338+13391339+ if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&13401340+ le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&13411341+ intf_desc.bNumEndpoints == 2)13421342+ return true;13431343+13441344+ return false;13451345+}13461346+13401347static int qmi_wwan_probe(struct usb_interface *intf,13411348 const struct usb_device_id *prod)13421349{···13831364 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");13841365 return -ENODEV;13851366 }13671367+13681368+ /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so13691369+ * we need to match on class/subclass/protocol. These values are13701370+ * identical for the diagnostic- and QMI-interface, but bNumEndpoints is13711371+ * different. Ignore the current interface if the number of endpoints13721372+ * the number for the diag interface (two).13731373+ */13741374+ if (quectel_ep06_diag_detected(intf))13751375+ return -ENODEV;1386137613871377 return usbnet_probe(intf, id);13881378}
+10-14
drivers/net/xen-netfront.c
···8787/* IRQ name is queue name with "-tx" or "-rx" appended */8888#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)89899090-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);9191-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);9090+static DECLARE_WAIT_QUEUE_HEAD(module_wq);92919392struct netfront_stats {9493 u64 packets;···13311332 netif_carrier_off(netdev);1332133313331334 xenbus_switch_state(dev, XenbusStateInitialising);13341334- wait_event(module_load_q,13351335- xenbus_read_driver_state(dev->otherend) !=13361336- XenbusStateClosed &&13371337- xenbus_read_driver_state(dev->otherend) !=13381338- XenbusStateUnknown);13351335+ wait_event(module_wq,13361336+ xenbus_read_driver_state(dev->otherend) !=13371337+ XenbusStateClosed &&13381338+ xenbus_read_driver_state(dev->otherend) !=13391339+ XenbusStateUnknown);13391340 return netdev;1340134113411342 exit:···2009201020102011 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));2011201220132013+ wake_up_all(&module_wq);20142014+20122015 switch (backend_state) {20132016 case XenbusStateInitialising:20142017 case XenbusStateInitialised:20152018 case XenbusStateReconfiguring:20162019 case XenbusStateReconfigured:20172017- break;20182018-20192020 case XenbusStateUnknown:20202020- wake_up_all(&module_unload_q);20212021 break;2022202220232023 case XenbusStateInitWait:···20322034 break;2033203520342036 case XenbusStateClosed:20352035- wake_up_all(&module_unload_q);20362037 if (dev->state == XenbusStateClosed)20372038 break;20382039 /* Missed the backend's CLOSING state -- fallthrough */20392040 case XenbusStateClosing:20402040- wake_up_all(&module_unload_q);20412041 xenbus_frontend_closed(dev);20422042 break;20432043 }···2143214721442148 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {21452149 xenbus_switch_state(dev, XenbusStateClosing);21462146- wait_event(module_unload_q,21502150+ wait_event(module_wq,21472151 xenbus_read_driver_state(dev->otherend) ==21482152 XenbusStateClosing ||21492153 xenbus_read_driver_state(dev->otherend) ==21502154 XenbusStateUnknown);2151215521522156 xenbus_switch_state(dev, XenbusStateClosed);21532153- wait_event(module_unload_q,21572157+ wait_event(module_wq,21542158 xenbus_read_driver_state(dev->otherend) ==21552159 XenbusStateClosed ||21562160 xenbus_read_driver_state(dev->otherend) ==
···5252 default y5353 depends on SCSI5454 ---help---5555- This option enables the new blk-mq based I/O path for SCSI5656- devices by default. With the option the scsi_mod.use_blk_mq5757- module/boot option defaults to Y, without it to N, but it can5858- still be overridden either way.5555+ This option enables the blk-mq based I/O path for SCSI devices by5656+ default. With this option the scsi_mod.use_blk_mq module/boot5757+ option defaults to Y, without it to N, but it can still be5858+ overridden either way.59596060- If unsure say N.6060+ If unsure say Y.61616262config SCSI_PROC_FS6363 bool "legacy /proc/scsi/ support"
+1-1
drivers/scsi/aacraid/aacraid.h
···13461346struct aac_hba_map_info {13471347 __le32 rmw_nexus; /* nexus for native HBA devices */13481348 u8 devtype; /* device type */13491349- u8 reset_state; /* 0 - no reset, 1..x - */13491349+ s8 reset_state; /* 0 - no reset, 1..x - */13501350 /* after xth TM LUN reset */13511351 u16 qd_limit;13521352 u32 scan_counter;
+53-18
drivers/scsi/csiostor/csio_hw.c
···16021602}1603160316041604/**16051605+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits16061606+ * @caps32: a 32-bit Port Capabilities value16071607+ *16081608+ * Returns the equivalent 16-bit Port Capabilities value. Note that16091609+ * not all 32-bit Port Capabilities can be represented in the 16-bit16101610+ * Port Capabilities and some fields/values may not make it.16111611+ */16121612+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)16131613+{16141614+ fw_port_cap16_t caps16 = 0;16151615+16161616+ #define CAP32_TO_CAP16(__cap) \16171617+ do { \16181618+ if (caps32 & FW_PORT_CAP32_##__cap) \16191619+ caps16 |= FW_PORT_CAP_##__cap; \16201620+ } while (0)16211621+16221622+ CAP32_TO_CAP16(SPEED_100M);16231623+ CAP32_TO_CAP16(SPEED_1G);16241624+ CAP32_TO_CAP16(SPEED_10G);16251625+ CAP32_TO_CAP16(SPEED_25G);16261626+ CAP32_TO_CAP16(SPEED_40G);16271627+ CAP32_TO_CAP16(SPEED_100G);16281628+ CAP32_TO_CAP16(FC_RX);16291629+ CAP32_TO_CAP16(FC_TX);16301630+ CAP32_TO_CAP16(802_3_PAUSE);16311631+ CAP32_TO_CAP16(802_3_ASM_DIR);16321632+ CAP32_TO_CAP16(ANEG);16331633+ CAP32_TO_CAP16(FORCE_PAUSE);16341634+ CAP32_TO_CAP16(MDIAUTO);16351635+ CAP32_TO_CAP16(MDISTRAIGHT);16361636+ CAP32_TO_CAP16(FEC_RS);16371637+ CAP32_TO_CAP16(FEC_BASER_RS);16381638+16391639+ #undef CAP32_TO_CAP1616401640+16411641+ return caps16;16421642+}16431643+16441644+/**16051645 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities16061646 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value16071647 *···17991759 val = 1;1800176018011761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,18021802- hw->pfn, 0, 1, ¶m, &val, false,17621762+ hw->pfn, 0, 1, ¶m, &val, true,18031763 NULL);1804176418051765 if (csio_mb_issue(hw, mbp)) {···18091769 return -EINVAL;18101770 }1811177118121812- csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,18131813- &val);18141814- if (retval != FW_SUCCESS) {18151815- csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",18161816- portid, retval);18171817- mempool_free(mbp, hw->mb_mempool);18181818- return -EINVAL;18191819- }18201820-18211821- fw_caps = val;17721772+ csio_mb_process_read_params_rsp(hw, mbp, &retval,17731773+ 0, NULL);17741774+ fw_caps = retval ? FW_CAPS16 : FW_CAPS32;18221775 }1823177618241777 /* Read PORT information */···23972364}2398236523992366/*24002400- * Returns -EINVAL if attempts to flash the firmware failed24012401- * else returns 0,23672367+ * Returns -EINVAL if attempts to flash the firmware failed,23682368+ * -ENOMEM if memory allocation failed else returns 0,24022369 * if flashing was not attempted because the card had the24032370 * latest firmware ECANCELED is returned24042371 */···24262393 return -EINVAL;24272394 }2428239523962396+ /* allocate memory to read the header of the firmware on the23972397+ * card23982398+ */23992399+ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);24002400+ if (!card_fw)24012401+ return -ENOMEM;24022402+24292403 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))24302404 fw_bin_file = FW_FNAME_T5;24312405 else···24452405 fw_data = fw->data;24462406 fw_size = fw->size;24472407 }24482448-24492449- /* allocate memory to read the header of the firmware on the24502450- * card24512451- */24522452- card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);2453240824542409 /* upgrade FW logic */24552410 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
···672672#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */673673#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */674674#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */675675-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */675675+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */676676677677 uint32_t hba_flag; /* hba generic flags */678678#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
+4-4
drivers/scsi/lpfc/lpfc_attr.c
···5122512251235123/*51245124# lpfc_fdmi_on: Controls FDMI support.51255125-# 0 No FDMI support (default)51265126-# 1 Traditional FDMI support51255125+# 0 No FDMI support51265126+# 1 Traditional FDMI support (default)51275127# Traditional FDMI support means the driver will assume FDMI-2 support;51285128# however, if that fails, it will fallback to FDMI-1.51295129# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.51305130# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of51315131# lpfc_fdmi_on.51325132-# Value range [0,1]. Default value is 0.51325132+# Value range [0,1]. Default value is 1.51335133*/51345134-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");51345134+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");5135513551365136/*51375137# Specifies the maximum number of ELS cmds we can have outstanding (for
···58005800 * root: the root of the parent directory58015801 * rsv: block reservation58025802 * items: the number of items that we need do reservation58035803- * qgroup_reserved: used to return the reserved size in qgroup58035803+ * use_global_rsv: allow fallback to the global block reservation58045804 *58055805 * This function is used to reserve the space for snapshot/subvolume58065806 * creation and deletion. Those operations are different with the···58105810 * the space reservation mechanism in start_transaction().58115811 */58125812int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,58135813- struct btrfs_block_rsv *rsv,58145814- int items,58135813+ struct btrfs_block_rsv *rsv, int items,58155814 bool use_global_rsv)58165815{58165816+ u64 qgroup_num_bytes = 0;58175817 u64 num_bytes;58185818 int ret;58195819 struct btrfs_fs_info *fs_info = root->fs_info;···5821582158225822 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {58235823 /* One for parent inode, two for dir entries */58245824- num_bytes = 3 * fs_info->nodesize;58255825- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);58245824+ qgroup_num_bytes = 3 * fs_info->nodesize;58255825+ ret = btrfs_qgroup_reserve_meta_prealloc(root,58265826+ qgroup_num_bytes, true);58265827 if (ret)58275828 return ret;58285828- } else {58295829- num_bytes = 0;58305829 }5831583058325831 num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);···58375838 if (ret == -ENOSPC && use_global_rsv)58385839 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);5839584058405840- if (ret && num_bytes)58415841- btrfs_qgroup_free_meta_prealloc(root, num_bytes);58415841+ if (ret && qgroup_num_bytes)58425842+ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);5842584358435844 return ret;58445845}
+84-33
fs/btrfs/inode.c
···12711271 u64 disk_num_bytes;12721272 u64 ram_bytes;12731273 int extent_type;12741274- int ret, err;12741274+ int ret;12751275 int type;12761276 int nocow;12771277 int check_prev = 1;···14031403 * if there are pending snapshots for this root,14041404 * we fall into common COW way.14051405 */14061406- if (!nolock) {14071407- err = btrfs_start_write_no_snapshotting(root);14081408- if (!err)14091409- goto out_check;14101410- }14061406+ if (!nolock && atomic_read(&root->snapshot_force_cow))14071407+ goto out_check;14111408 /*14121409 * force cow if csum exists in the range.14131410 * this ensure that csum for a given extent are···14131416 ret = csum_exist_in_range(fs_info, disk_bytenr,14141417 num_bytes);14151418 if (ret) {14161416- if (!nolock)14171417- btrfs_end_write_no_snapshotting(root);14181418-14191419 /*14201420 * ret could be -EIO if the above fails to read14211421 * metadata.···14251431 WARN_ON_ONCE(nolock);14261432 goto out_check;14271433 }14281428- if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {14291429- if (!nolock)14301430- btrfs_end_write_no_snapshotting(root);14341434+ if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))14311435 goto out_check;14321432- }14331436 nocow = 1;14341437 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {14351438 extent_end = found_key.offset +···14391448out_check:14401449 if (extent_end <= start) {14411450 path->slots[0]++;14421442- if (!nolock && nocow)14431443- btrfs_end_write_no_snapshotting(root);14441451 if (nocow)14451452 btrfs_dec_nocow_writers(fs_info, disk_bytenr);14461453 goto next_slot;···14601471 end, page_started, nr_written, 1,14611472 NULL);14621473 if (ret) {14631463- if (!nolock && nocow)14641464- btrfs_end_write_no_snapshotting(root);14651474 if (nocow)14661475 btrfs_dec_nocow_writers(fs_info,14671476 disk_bytenr);···14791492 ram_bytes, BTRFS_COMPRESS_NONE,14801493 BTRFS_ORDERED_PREALLOC);14811494 if (IS_ERR(em)) {14821482- if (!nolock && nocow)14831483- btrfs_end_write_no_snapshotting(root);14841495 if (nocow)14851496 btrfs_dec_nocow_writers(fs_info,14861497 disk_bytenr);···15171532 EXTENT_CLEAR_DATA_RESV,15181533 PAGE_UNLOCK | PAGE_SET_PRIVATE2);1519153415201520- if (!nolock && nocow)15211521- btrfs_end_write_no_snapshotting(root);15221535 cur_offset = extent_end;1523153615241537 /*···66226639 drop_inode = 1;66236640 } else {66246641 struct dentry *parent = dentry->d_parent;66426642+ int ret;66436643+66256644 err = btrfs_update_inode(trans, root, inode);66266645 if (err)66276646 goto fail;···66376652 goto fail;66386653 }66396654 d_instantiate(dentry, inode);66406640- btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);66556655+ ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,66566656+ true, NULL);66576657+ if (ret == BTRFS_NEED_TRANS_COMMIT) {66586658+ err = btrfs_commit_transaction(trans);66596659+ trans = NULL;66606660+ }66416661 }6642666266436663fail:···93789388 u64 new_idx = 0;93799389 u64 root_objectid;93809390 int ret;93819381- int ret2;93829391 bool root_log_pinned = false;93839392 bool dest_log_pinned = false;93939393+ struct btrfs_log_ctx ctx_root;93949394+ struct btrfs_log_ctx ctx_dest;93959395+ bool sync_log_root = false;93969396+ bool sync_log_dest = false;93979397+ bool commit_transaction = false;9384939893859399 /* we only allow rename subvolume link between subvolumes */93869400 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)93879401 return -EXDEV;94029402+94039403+ btrfs_init_log_ctx(&ctx_root, old_inode);94049404+ btrfs_init_log_ctx(&ctx_dest, new_inode);9388940593899406 /* close the race window with snapshot create/destroy ioctl */93909407 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)···9539954295409543 if (root_log_pinned) {95419544 parent = new_dentry->d_parent;95429542- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),95439543- parent);95459545+ ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),95469546+ BTRFS_I(old_dir), parent,95479547+ false, &ctx_root);95489548+ if (ret == BTRFS_NEED_LOG_SYNC)95499549+ sync_log_root = true;95509550+ else if (ret == BTRFS_NEED_TRANS_COMMIT)95519551+ commit_transaction = true;95529552+ ret = 0;95449553 btrfs_end_log_trans(root);95459554 root_log_pinned = false;95469555 }95479556 if (dest_log_pinned) {95489548- parent = old_dentry->d_parent;95499549- btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),95509550- parent);95579557+ if (!commit_transaction) {95589558+ parent = old_dentry->d_parent;95599559+ ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),95609560+ BTRFS_I(new_dir), parent,95619561+ false, &ctx_dest);95629562+ if (ret == BTRFS_NEED_LOG_SYNC)95639563+ sync_log_dest = true;95649564+ else if (ret == BTRFS_NEED_TRANS_COMMIT)95659565+ commit_transaction = true;95669566+ ret = 0;95679567+ }95519568 btrfs_end_log_trans(dest);95529569 dest_log_pinned = false;95539570 }···95949583 dest_log_pinned = false;95959584 }95969585 }95979597- ret2 = btrfs_end_transaction(trans);95989598- ret = ret ? ret : ret2;95869586+ if (!ret && sync_log_root && !commit_transaction) {95879587+ ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,95889588+ &ctx_root);95899589+ if (ret)95909590+ commit_transaction = true;95919591+ }95929592+ if (!ret && sync_log_dest && !commit_transaction) {95939593+ ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,95949594+ &ctx_dest);95959595+ if (ret)95969596+ commit_transaction = true;95979597+ }95989598+ if (commit_transaction) {95999599+ ret = btrfs_commit_transaction(trans);96009600+ } else {96019601+ int ret2;96029602+96039603+ ret2 = btrfs_end_transaction(trans);96049604+ ret = ret ? ret : ret2;96059605+ }95999606out_notrans:96009607 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)96019608 up_read(&fs_info->subvol_sem);···96909661 int ret;96919662 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));96929663 bool log_pinned = false;96649664+ struct btrfs_log_ctx ctx;96659665+ bool sync_log = false;96669666+ bool commit_transaction = false;9693966796949668 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)96959669 return -EPERM;···98509818 if (log_pinned) {98519819 struct dentry *parent = new_dentry->d_parent;9852982098539853- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),98549854- parent);98219821+ btrfs_init_log_ctx(&ctx, old_inode);98229822+ ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),98239823+ BTRFS_I(old_dir), parent,98249824+ false, &ctx);98259825+ if (ret == BTRFS_NEED_LOG_SYNC)98269826+ sync_log = true;98279827+ else if (ret == BTRFS_NEED_TRANS_COMMIT)98289828+ commit_transaction = true;98299829+ ret = 0;98559830 btrfs_end_log_trans(root);98569831 log_pinned = false;98579832 }···98959856 btrfs_end_log_trans(root);98969857 log_pinned = false;98979858 }98989898- btrfs_end_transaction(trans);98599859+ if (!ret && sync_log) {98609860+ ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);98619861+ if (ret)98629862+ commit_transaction = true;98639863+ }98649864+ if (commit_transaction) {98659865+ ret = btrfs_commit_transaction(trans);98669866+ } else {98679867+ int ret2;98689868+98699869+ ret2 = btrfs_end_transaction(trans);98709870+ ret = ret ? ret : ret2;98719871+ }98999872out_notrans:99009873 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)99019874 up_read(&fs_info->subvol_sem);
+35
fs/btrfs/ioctl.c
···747747 struct btrfs_pending_snapshot *pending_snapshot;748748 struct btrfs_trans_handle *trans;749749 int ret;750750+ bool snapshot_force_cow = false;750751751752 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))752753 return -EINVAL;···764763 goto free_pending;765764 }766765766766+ /*767767+ * Force new buffered writes to reserve space even when NOCOW is768768+ * possible. This is to avoid later writeback (running dealloc) to769769+ * fallback to COW mode and unexpectedly fail with ENOSPC.770770+ */767771 atomic_inc(&root->will_be_snapshotted);768772 smp_mb__after_atomic();769773 /* wait for no snapshot writes */···778772 ret = btrfs_start_delalloc_inodes(root);779773 if (ret)780774 goto dec_and_free;775775+776776+ /*777777+ * All previous writes have started writeback in NOCOW mode, so now778778+ * we force future writes to fallback to COW mode during snapshot779779+ * creation.780780+ */781781+ atomic_inc(&root->snapshot_force_cow);782782+ snapshot_force_cow = true;781783782784 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);783785···851837fail:852838 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);853839dec_and_free:840840+ if (snapshot_force_cow)841841+ atomic_dec(&root->snapshot_force_cow);854842 if (atomic_dec_and_test(&root->will_be_snapshotted))855843 wake_up_var(&root->will_be_snapshotted);856844free_pending:···3469345334703454 same_lock_start = min_t(u64, loff, dst_loff);34713455 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;34563456+ } else {34573457+ /*34583458+ * If the source and destination inodes are different, the34593459+ * source's range end offset matches the source's i_size, that34603460+ * i_size is not a multiple of the sector size, and the34613461+ * destination range does not go past the destination's i_size,34623462+ * we must round down the length to the nearest sector size34633463+ * multiple. If we don't do this adjustment we end replacing34643464+ * with zeroes the bytes in the range that starts at the34653465+ * deduplication range's end offset and ends at the next sector34663466+ * size multiple.34673467+ */34683468+ if (loff + olen == i_size_read(src) &&34693469+ dst_loff + len < i_size_read(dst)) {34703470+ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;34713471+34723472+ len = round_down(i_size_read(src), sz) - loff;34733473+ olen = len;34743474+ }34723475 }3473347634743477again:
···60256025 * Call this after adding a new name for a file and it will properly60266026 * update the log to reflect the new name.60276027 *60286028- * It will return zero if all goes well, and it will return 1 if a60296029- * full transaction commit is required.60286028+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's60296029+ * true (because it's not used).60306030+ *60316031+ * Return value depends on whether @sync_log is true or false.60326032+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be60336033+ * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT60346034+ * otherwise.60356035+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to60366036+ * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,60376037+ * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be60386038+ * committed (without attempting to sync the log).60306039 */60316040int btrfs_log_new_name(struct btrfs_trans_handle *trans,60326041 struct btrfs_inode *inode, struct btrfs_inode *old_dir,60336033- struct dentry *parent)60426042+ struct dentry *parent,60436043+ bool sync_log, struct btrfs_log_ctx *ctx)60346044{60356045 struct btrfs_fs_info *fs_info = trans->fs_info;60466046+ int ret;6036604760376048 /*60386049 * this will force the logging code to walk the dentry chain···60586047 */60596048 if (inode->logged_trans <= fs_info->last_trans_committed &&60606049 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))60616061- return 0;60506050+ return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :60516051+ BTRFS_DONT_NEED_LOG_SYNC;6062605260636063- return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,60646064- LOG_INODE_EXISTS, NULL);60536053+ if (sync_log) {60546054+ struct btrfs_log_ctx ctx2;60556055+60566056+ btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);60576057+ ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,60586058+ LOG_INODE_EXISTS, &ctx2);60596059+ if (ret == BTRFS_NO_LOG_SYNC)60606060+ return BTRFS_DONT_NEED_TRANS_COMMIT;60616061+ else if (ret)60626062+ return BTRFS_NEED_TRANS_COMMIT;60636063+60646064+ ret = btrfs_sync_log(trans, inode->root, &ctx2);60656065+ if (ret)60666066+ return BTRFS_NEED_TRANS_COMMIT;60676067+ return BTRFS_DONT_NEED_TRANS_COMMIT;60686068+ }60696069+60706070+ ASSERT(ctx);60716071+ ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,60726072+ LOG_INODE_EXISTS, ctx);60736073+ if (ret == BTRFS_NO_LOG_SYNC)60746074+ return BTRFS_DONT_NEED_LOG_SYNC;60756075+ else if (ret)60766076+ return BTRFS_NEED_TRANS_COMMIT;60776077+60786078+ return BTRFS_NEED_LOG_SYNC;60656079}60666080
···4491449144924492 /* Now btrfs_update_device() will change the on-disk size. */44934493 ret = btrfs_update_device(trans, device);44944494- btrfs_end_transaction(trans);44944494+ if (ret < 0) {44954495+ btrfs_abort_transaction(trans, ret);44964496+ btrfs_end_transaction(trans);44974497+ } else {44984498+ ret = btrfs_commit_transaction(trans);44994499+ }44954500done:44964501 btrfs_free_path(path);44974502 if (ret) {
+11-5
fs/ceph/super.c
···602602603603/*604604 * create a new fs client605605+ *606606+ * Success or not, this function consumes @fsopt and @opt.605607 */606608static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,607609 struct ceph_options *opt)···611609 struct ceph_fs_client *fsc;612610 int page_count;613611 size_t size;614614- int err = -ENOMEM;612612+ int err;615613616614 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);617617- if (!fsc)618618- return ERR_PTR(-ENOMEM);615615+ if (!fsc) {616616+ err = -ENOMEM;617617+ goto fail;618618+ }619619620620 fsc->client = ceph_create_client(opt, fsc);621621 if (IS_ERR(fsc->client)) {622622 err = PTR_ERR(fsc->client);623623 goto fail;624624 }625625+ opt = NULL; /* fsc->client now owns this */625626626627 fsc->client->extra_mon_dispatch = extra_mon_dispatch;627628 fsc->client->osdc.abort_on_full = true;···682677 ceph_destroy_client(fsc->client);683678fail:684679 kfree(fsc);680680+ if (opt)681681+ ceph_destroy_options(opt);682682+ destroy_mount_options(fsopt);685683 return ERR_PTR(err);686684}687685···10501042 fsc = create_fs_client(fsopt, opt);10511043 if (IS_ERR(fsc)) {10521044 res = ERR_CAST(fsc);10531053- destroy_mount_options(fsopt);10541054- ceph_destroy_options(opt);10551045 goto out_final;10561046 }10571047
···248248 * MacOS server pads after SMB2.1 write response with 3 bytes249249 * of junk. Other servers match RFC1001 len to actual250250 * SMB2/SMB3 frame length (header + smb2 response specific data)251251- * Some windows servers do too when compounding is used.252252- * Log the server error (once), but allow it and continue251251+ * Some windows servers also pad up to 8 bytes when compounding.252252+ * If pad is longer than eight bytes, log the server behavior253253+ * (once), since may indicate a problem but allow it and continue253254 * since the frame is parseable.254255 */255256 if (clc_len < len) {256256- printk_once(KERN_WARNING257257- "SMB2 server sent bad RFC1001 len %d not %d\n",258258- len, clc_len);257257+ pr_warn_once(258258+ "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",259259+ len, clc_len, command, mid);259260 return 0;260261 }262262+ pr_warn_once(263263+ "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",264264+ len, clc_len, command, mid);261265262266 return 1;263267 }
···21782178 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||21792179 *oplock == SMB2_OPLOCK_LEVEL_NONE)21802180 req->RequestedOplockLevel = *oplock;21812181+ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&21822182+ (oparms->create_options & CREATE_NOT_FILE))21832183+ req->RequestedOplockLevel = *oplock; /* no srv lease support */21812184 else {21822185 rc = add_lease_context(server, iov, &n_iov,21832186 oparms->fid->lease_key, oplock);
+1-10
fs/nilfs2/alloc.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * alloc.c - NILFS dat/inode allocator34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Originally written by Koji Sato.178 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
+1-10
fs/nilfs2/alloc.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Originally written by Koji Sato.178 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
+1-10
fs/nilfs2/bmap.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * bmap.c - NILFS block mapping.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/bmap.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * bmap.h - NILFS block mapping.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/btnode.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * btnode.c - NILFS B-tree node cache34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Originally written by Seiji Kihara.178 * Fully revised by Ryusuke Konishi for stabilization and simplification.
+1-10
fs/nilfs2/btnode.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * btnode.h - NILFS B-tree node cache34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Seiji Kihara.178 * Revised by Ryusuke Konishi.
+1-10
fs/nilfs2/btree.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * btree.c - NILFS B-tree.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/btree.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * btree.h - NILFS B-tree.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/cpfile.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * cpfile.c - NILFS checkpoint file.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/cpfile.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * cpfile.h - NILFS checkpoint file.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/dat.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * dat.c - NILFS disk address translation.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/dat.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * dat.h - NILFS disk address translation.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/dir.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * dir.c - NILFS directory entry operations34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Modified for NILFS by Amagai Yoshiji.178 */
+1-10
fs/nilfs2/direct.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * direct.c - NILFS direct block pointer.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/direct.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * direct.h - NILFS direct block pointer.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/file.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * file.c - NILFS regular file handling primitives including fsync().34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Amagai Yoshiji and Ryusuke Konishi.178 */
+1-10
fs/nilfs2/gcinode.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * gcinode.c - dummy inodes to buffer blocks for garbage collection34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.178 * Revised by Ryusuke Konishi.
+1-10
fs/nilfs2/ifile.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * ifile.c - NILFS inode file34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Amagai Yoshiji.178 * Revised by Ryusuke Konishi.
+1-10
fs/nilfs2/ifile.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * ifile.h - NILFS inode file34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Amagai Yoshiji.178 * Revised by Ryusuke Konishi.
+1-10
fs/nilfs2/inode.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * inode.c - NILFS inode operations.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/ioctl.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * ioctl.c - NILFS ioctl operations.34 *45 * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/mdt.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * mdt.c - meta data file for NILFS34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 */
+1-10
fs/nilfs2/mdt.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * mdt.h - NILFS meta data file prototype and definitions34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 */
+1-10
fs/nilfs2/namei.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * namei.c - NILFS pathname lookup operations.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.178 */
+1-10
fs/nilfs2/nilfs.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * nilfs.h - NILFS local header file.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato and Ryusuke Konishi.178 */
+1-10
fs/nilfs2/page.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * page.c - buffer/page management specific to NILFS34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi and Seiji Kihara.178 */
+1-10
fs/nilfs2/page.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * page.h - buffer/page management specific to NILFS34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi and Seiji Kihara.178 */
+1-10
fs/nilfs2/recovery.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * recovery.c - NILFS recovery logic34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 */
+1-10
fs/nilfs2/segbuf.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * segbuf.c - NILFS segment buffer34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/segbuf.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * segbuf.h - NILFS Segment buffer prototypes and definitions34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/segment.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * segment.c - NILFS segment constructor.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/segment.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * segment.h - NILFS Segment constructor prototypes and definitions34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/sufile.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * sufile.c - NILFS segment usage file.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 * Revised by Ryusuke Konishi.
+1-10
fs/nilfs2/sufile.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * sufile.h - NILFS segment usage file.34 *45 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Koji Sato.178 */
+1-10
fs/nilfs2/super.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * super.c - NILFS module and super block management.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 */
+1-10
fs/nilfs2/sysfs.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * sysfs.c - sysfs support implementation.34 *45 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.56 * Copyright (C) 2014 HGST, Inc., a Western Digital Company.66- *77- * This program is free software; you can redistribute it and/or modify88- * it under the terms of the GNU General Public License as published by99- * the Free Software Foundation; either version 2 of the License, or1010- * (at your option) any later version.1111- *1212- * This program is distributed in the hope that it will be useful,1313- * but WITHOUT ANY WARRANTY; without even the implied warranty of1414- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1515- * GNU General Public License for more details.167 *178 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>189 */
+1-10
fs/nilfs2/sysfs.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * sysfs.h - sysfs support declarations.34 *45 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.56 * Copyright (C) 2014 HGST, Inc., a Western Digital Company.66- *77- * This program is free software; you can redistribute it and/or modify88- * it under the terms of the GNU General Public License as published by99- * the Free Software Foundation; either version 2 of the License, or1010- * (at your option) any later version.1111- *1212- * This program is distributed in the hope that it will be useful,1313- * but WITHOUT ANY WARRANTY; without even the implied warranty of1414- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1515- * GNU General Public License for more details.167 *178 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>189 */
+1-10
fs/nilfs2/the_nilfs.c
···11+// SPDX-License-Identifier: GPL-2.0+12/*23 * the_nilfs.c - the_nilfs shared structure.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
+1-10
fs/nilfs2/the_nilfs.h
···11+/* SPDX-License-Identifier: GPL-2.0+ */12/*23 * the_nilfs.h - the_nilfs shared structure.34 *45 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License as published by88- * the Free Software Foundation; either version 2 of the License, or99- * (at your option) any later version.1010- *1111- * This program is distributed in the hope that it will be useful,1212- * but WITHOUT ANY WARRANTY; without even the implied warranty of1313- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414- * GNU General Public License for more details.156 *167 * Written by Ryusuke Konishi.178 *
···5656 struct list_head all_blkcgs_node;5757#ifdef CONFIG_CGROUP_WRITEBACK5858 struct list_head cgwb_list;5959+ refcount_t cgwb_refcnt;5960#endif6061};6162···9089 /* the blkg and policy id this per-policy data belongs to */9190 struct blkcg_gq *blkg;9291 int plid;9393- bool offline;9492};95939694/*···386386{387387 return cpd ? cpd->blkcg : NULL;388388}389389+390390+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);391391+392392+#ifdef CONFIG_CGROUP_WRITEBACK393393+394394+/**395395+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list396396+ * @blkcg: blkcg of interest397397+ *398398+ * This is used to track the number of active wb's related to a blkcg.399399+ */400400+static inline void blkcg_cgwb_get(struct blkcg *blkcg)401401+{402402+ refcount_inc(&blkcg->cgwb_refcnt);403403+}404404+405405+/**406406+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list407407+ * @blkcg: blkcg of interest408408+ *409409+ * This is used to track the number of active wb's related to a blkcg.410410+ * When this count goes to zero, all active wb has finished so the411411+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().412412+ * This work may occur in cgwb_release_workfn() on the cgwb_release413413+ * workqueue.414414+ */415415+static inline void blkcg_cgwb_put(struct blkcg *blkcg)416416+{417417+ if (refcount_dec_and_test(&blkcg->cgwb_refcnt))418418+ blkcg_destroy_blkgs(blkcg);419419+}420420+421421+#else422422+423423+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }424424+425425+static inline void blkcg_cgwb_put(struct blkcg *blkcg)426426+{427427+ /* wb isn't being accounted, so trigger destruction right away */428428+ blkcg_destroy_blkgs(blkcg);429429+}430430+431431+#endif389432390433/**391434 * blkg_path - format cgroup path of blkg
+1
include/linux/hid.h
···526526 const char *name;527527 bool registered;528528 struct list_head reports; /* the list of reports */529529+ unsigned int application; /* application usage for this input */529530};530531531532enum hid_type {
···607607 bool bringup = st->bringup;608608 enum cpuhp_state state;609609610610+ if (WARN_ON_ONCE(!st->should_run))611611+ return;612612+610613 /*611614 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures612615 * that if we see ->should_run we also see the rest of the state.613616 */614617 smp_mb();615615-616616- if (WARN_ON_ONCE(!st->should_run))617617- return;618618619619 cpuhp_lock_acquire(bringup);620620···916916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);917917 if (ret) {918918 st->target = prev_state;919919- undo_cpu_down(cpu, st);919919+ if (st->state < prev_state)920920+ undo_cpu_down(cpu, st);920921 break;921922 }922923 }···970969 * to do the further cleanups.971970 */972971 ret = cpuhp_down_callbacks(cpu, st, target);973973- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {972972+ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {974973 cpuhp_reset_state(st, prev_state);975974 __cpuhp_kick_ap(st);976975 }
+1-2
kernel/fork.c
···550550 goto out;551551 }552552 /* a new mm has just been created */553553- arch_dup_mmap(oldmm, mm);554554- retval = 0;553553+ retval = arch_dup_mmap(oldmm, mm);555554out:556555 up_write(&mm->mmap_sem);557556 flush_tlb_mm(oldmm);
···133133 spin_unlock_irqrestore(&watchdog_lock, *flags);134134}135135136136+static int clocksource_watchdog_kthread(void *data);137137+static void __clocksource_change_rating(struct clocksource *cs, int rating);138138+136139/*137140 * Interval: 0.5sec Threshold: 0.0625s138141 */139142#define WATCHDOG_INTERVAL (HZ >> 1)140143#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)144144+145145+static void clocksource_watchdog_work(struct work_struct *work)146146+{147147+ /*148148+ * We cannot directly run clocksource_watchdog_kthread() here, because149149+ * clocksource_select() calls timekeeping_notify() which uses150150+ * stop_machine(). One cannot use stop_machine() from a workqueue() due151151+ * lock inversions wrt CPU hotplug.152152+ *153153+ * Also, we only ever run this work once or twice during the lifetime154154+ * of the kernel, so there is no point in creating a more permanent155155+ * kthread for this.156156+ *157157+ * If kthread_run fails the next watchdog scan over the158158+ * watchdog_list will find the unstable clock again.159159+ */160160+ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");161161+}141162142163static void __clocksource_unstable(struct clocksource *cs)143164{···166145 cs->flags |= CLOCK_SOURCE_UNSTABLE;167146168147 /*169169- * If the clocksource is registered clocksource_watchdog_work() will148148+ * If the clocksource is registered clocksource_watchdog_kthread() will170149 * re-rate and re-select.171150 */172151 if (list_empty(&cs->list)) {···177156 if (cs->mark_unstable)178157 cs->mark_unstable(cs);179158180180- /* kick clocksource_watchdog_work() */159159+ /* kick clocksource_watchdog_kthread() */181160 if (finished_booting)182161 schedule_work(&watchdog_work);183162}···187166 * @cs: clocksource to be marked unstable188167 *189168 * This function is called by the x86 TSC code to mark clocksources as unstable;190190- * it defers demotion and re-selection to a work.169169+ * it defers demotion and re-selection to a kthread.191170 */192171void clocksource_mark_unstable(struct clocksource *cs)193172{···412391 }413392}414393415415-static void __clocksource_change_rating(struct clocksource *cs, int rating);416416-417417-static int __clocksource_watchdog_work(void)394394+static int __clocksource_watchdog_kthread(void)418395{419396 struct clocksource *cs, *tmp;420397 unsigned long flags;···437418 return select;438419}439420440440-static void clocksource_watchdog_work(struct work_struct *work)421421+static int clocksource_watchdog_kthread(void *data)441422{442423 mutex_lock(&clocksource_mutex);443443- if (__clocksource_watchdog_work())424424+ if (__clocksource_watchdog_kthread())444425 clocksource_select();445426 mutex_unlock(&clocksource_mutex);427427+ return 0;446428}447429448430static bool clocksource_is_watchdog(struct clocksource *cs)···462442static void clocksource_select_watchdog(bool fallback) { }463443static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }464444static inline void clocksource_resume_watchdog(void) { }465465-static inline int __clocksource_watchdog_work(void) { return 0; }445445+static inline int __clocksource_watchdog_kthread(void) { return 0; }466446static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }467447void clocksource_mark_unstable(struct clocksource *cs) { }468448···830810 /*831811 * Run the watchdog first to eliminate unstable clock sources832812 */833833- __clocksource_watchdog_work();813813+ __clocksource_watchdog_kthread();834814 clocksource_select();835815 mutex_unlock(&clocksource_mutex);836816 return 0;
+2-2
lib/Kconfig.debug
···12771277 time. This is really bad from a security perspective, and12781278 so architecture maintainers really need to do what they can12791279 to get the CRNG seeded sooner after the system is booted.12801280- However, since users can not do anything actionble to12801280+ However, since users cannot do anything actionable to12811281 address this, by default the kernel will issue only a single12821282 warning for the first use of unseeded randomness.1283128312841284 Say Y here if you want to receive warnings for all uses of12851285 unseeded randomness. This will be of use primarily for12861286- those developers interersted in improving the security of12861286+ those developers interested in improving the security of12871287 Linux kernels running on their architecture (or12881288 subarchitecture).12891289
···821821 * but we need to be consistent with PTEs and architectures that822822 * can't support a 'special' bit.823823 */824824- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));824824+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&825825+ !pfn_t_devmap(pfn));825826 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==826827 (VM_PFNMAP|VM_MIXEDMAP));827828 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));828828- BUG_ON(!pfn_t_devmap(pfn));829829830830 if (addr < vma->vm_start || addr >= vma->vm_end)831831 return VM_FAULT_SIGBUS;
+5-4
mm/kmemleak.c
···2097209720982098 kmemleak_initialized = 1;2099209921002100+ dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,21012101+ &kmemleak_fops);21022102+ if (!dentry)21032103+ pr_warn("Failed to create the debugfs kmemleak file\n");21042104+21002105 if (kmemleak_error) {21012106 /*21022107 * Some error occurred and kmemleak was disabled. There is a···21132108 return -ENOMEM;21142109 }2115211021162116- dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,21172117- &kmemleak_fops);21182118- if (!dentry)21192119- pr_warn("Failed to create the debugfs kmemleak file\n");21202111 mutex_lock(&scan_mutex);21212112 start_scan_thread();21222113 mutex_unlock(&scan_mutex);
-2
mm/memcontrol.c
···17011701 if (mem_cgroup_out_of_memory(memcg, mask, order))17021702 return OOM_SUCCESS;1703170317041704- WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "17051705- "This looks like a misconfiguration or a kernel bug.");17061704 return OOM_FAILED;17071705}17081706
+2-1
mm/memory_hotplug.c
···13331333 if (__PageMovable(page))13341334 return pfn;13351335 if (PageHuge(page)) {13361336- if (page_huge_active(page))13361336+ if (hugepage_migration_supported(page_hstate(page)) &&13371337+ page_huge_active(page))13371338 return pfn;13381339 else13391340 pfn = round_up(pfn + 1,
+11-3
mm/oom_kill.c
···522522523523 tlb_gather_mmu(&tlb, mm, start, end);524524 if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {525525+ tlb_finish_mmu(&tlb, start, end);525526 ret = false;526527 continue;527528 }···11041103 }1105110411061105 select_bad_process(oc);11071107- /* Found nothing?!?! Either we hang forever, or we panic. */11081108- if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {11061106+ /* Found nothing?!?! */11071107+ if (!oc->chosen) {11091108 dump_header(oc, NULL);11101110- panic("Out of memory and no killable processes...\n");11091109+ pr_warn("Out of memory and no killable processes...\n");11101110+ /*11111111+ * If we got here due to an actual allocation at the11121112+ * system level, we cannot survive this and will enter11131113+ * an endless loop in the allocator. Bail out now.11141114+ */11151115+ if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))11161116+ panic("System is deadlocked on memory\n");11111117 }11121118 if (oc->chosen && oc->chosen != (void *)-1UL)11131119 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
+4
mm/page_alloc.c
···77087708 * handle each tail page individually in migration.77097709 */77107710 if (PageHuge(page)) {77117711+77127712+ if (!hugepage_migration_supported(page_hstate(page)))77137713+ goto unmovable;77147714+77117715 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;77127716 continue;77137717 }
+7-4
mm/util.c
···435435EXPORT_SYMBOL(kvmalloc_node);436436437437/**438438- * kvfree - free memory allocated with kvmalloc439439- * @addr: pointer returned by kvmalloc438438+ * kvfree() - Free memory.439439+ * @addr: Pointer to allocated memory.440440 *441441- * If the memory is allocated from vmalloc area it is freed with vfree().442442- * Otherwise kfree() is used.441441+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().442442+ * It is slightly more efficient to use kfree() or vfree() if you are certain443443+ * that you know which one to use.444444+ *445445+ * Context: Any context except NMI.443446 */444447void kvfree(const void *addr)445448{
-3
net/core/skbuff.c
···939939940940 WARN_ON_ONCE(!in_task());941941942942- if (!sock_flag(sk, SOCK_ZEROCOPY))943943- return NULL;944944-945942 skb = sock_omalloc(sk, 0, GFP_KERNEL);946943 if (!skb)947944 return NULL;
···106106107107if NF_NAT_IPV4108108109109+config NF_NAT_MASQUERADE_IPV4110110+ bool111111+112112+if NF_TABLES109113config NFT_CHAIN_NAT_IPV4110114 depends on NF_TABLES_IPV4111115 tristate "IPv4 nf_tables nat chain support"···118114 chain type is used to perform Network Address Translation (NAT)119115 packet transformations such as the source, destination address and120116 source and destination ports.121121-122122-config NF_NAT_MASQUERADE_IPV4123123- bool124117125118config NFT_MASQ_IPV4126119 tristate "IPv4 masquerading support for nf_tables"···136135 help137136 This is the expression that provides IPv4 redirect support for138137 nf_tables.138138+endif # NF_TABLES139139140140config NF_NAT_SNMP_BASIC141141 tristate "Basic SNMP-ALG support"
···63806380 if (!queue->synflood_warned &&63816381 net->ipv4.sysctl_tcp_syncookies != 2 &&63826382 xchg(&queue->synflood_warned, 1) == 0)63836383- pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",63846384- proto, ntohs(tcp_hdr(skb)->dest), msg);63836383+ net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",63846384+ proto, ntohs(tcp_hdr(skb)->dest), msg);6385638563866386 return want_cookie;63876387}
···18741874 * Returns 0 if there are still iucv pathes defined18751875 * 1 if there are no iucv pathes defined18761876 */18771877-int iucv_path_table_empty(void)18771877+static int iucv_path_table_empty(void)18781878{18791879 int i;18801880
+6-6
net/netfilter/Kconfig
···771771 depends on NETFILTER_ADVANCED772772 ---help---773773 This option adds a `CHECKSUM' target, which can be used in the iptables mangle774774- table.774774+ table to work around buggy DHCP clients in virtualized environments.775775776776- You can use this target to compute and fill in the checksum in777777- a packet that lacks a checksum. This is particularly useful,778778- if you need to work around old applications such as dhcp clients,779779- that do not work well with checksum offloads, but don't want to disable780780- checksum offload in your device.776776+ Some old DHCP clients drop packets because they are not aware777777+ that the checksum would normally be offloaded to hardware and778778+ thus should be considered valid.779779+ This target can be used to fill in the checksum using iptables780780+ when such packets are sent via a virtual network device.781781782782 To compile it as a module, choose M here. If unsure, say N.783783
+26
net/netfilter/nf_conntrack_proto.c
···776776};777777#endif778778779779+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)780780+{781781+ u8 nfproto = (unsigned long)_nfproto;782782+783783+ if (nf_ct_l3num(ct) != nfproto)784784+ return 0;785785+786786+ if (nf_ct_protonum(ct) == IPPROTO_TCP &&787787+ ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {788788+ ct->proto.tcp.seen[0].td_maxwin = 0;789789+ ct->proto.tcp.seen[1].td_maxwin = 0;790790+ }791791+792792+ return 0;793793+}794794+779795static int nf_ct_netns_do_get(struct net *net, u8 nfproto)780796{781797 struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);798798+ bool fixup_needed = false;782799 int err = 0;783800784801 mutex_lock(&nf_ct_proto_mutex);···815798 ARRAY_SIZE(ipv4_conntrack_ops));816799 if (err)817800 cnet->users4 = 0;801801+ else802802+ fixup_needed = true;818803 break;819804#if IS_ENABLED(CONFIG_IPV6)820805 case NFPROTO_IPV6:···833814 ARRAY_SIZE(ipv6_conntrack_ops));834815 if (err)835816 cnet->users6 = 0;817817+ else818818+ fixup_needed = true;836819 break;837820#endif838821 default:···843822 }844823 out_unlock:845824 mutex_unlock(&nf_ct_proto_mutex);825825+826826+ if (fixup_needed)827827+ nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,828828+ (void *)(unsigned long)nfproto, 0, 0);829829+846830 return err;847831}848832
···380380 __noclone|381381 __deprecated|382382 __read_mostly|383383+ __ro_after_init|383384 __kprobes|384385 $InitAttribute|385386 ____cacheline_aligned|···33123311 # known declaration macros33133312 $sline =~ /^\+\s+$declaration_macros/ ||33143313 # start of struct or union or enum33153315- $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||33143314+ $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||33163315 # start or end of block or continuation of declaration33173316 $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||33183317 # bitfield continuation
+3-2
scripts/depmod.sh
···1111KERNELRELEASE=$212121313if ! test -r System.map ; then1414+ echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&21415 exit 01516fi16171718if [ -z $(command -v $DEPMOD) ]; then1818- echo "'make modules_install' requires $DEPMOD. Please install it." >&21919+ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&21920 echo "This is probably in the kmod package." >&22020- exit 12121+ exit 02122fi22232324# older versions of depmod require the version string to start with three
-1
scripts/kconfig/Makefile
···221221222222# check if necessary packages are available, and configure build flags223223define filechk_conf_cfg224224- $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \225224 $(CONFIG_SHELL) $<226225endef227226
···3030 int alias;3131 int refs;3232 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;3333- int hwcache_align, object_size, objs_per_slab;3434- int sanity_checks, slab_size, store_user, trace;3333+ unsigned int hwcache_align, object_size, objs_per_slab;3434+ unsigned int sanity_checks, slab_size, store_user, trace;3535 int order, poison, reclaim_account, red_zone;3636 unsigned long partial, objects, slabs, objects_partial, objects_total;3737 unsigned long alloc_fastpath, alloc_slowpath;
+8-13
virt/kvm/arm/mmu.c
···18171817 return 0;18181818}1819181918201820-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)18211821-{18221822- unsigned long end = hva + PAGE_SIZE;18231823-18241824- if (!kvm->arch.pgd)18251825- return 0;18261826-18271827- trace_kvm_unmap_hva(hva);18281828- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);18291829- return 0;18301830-}18311831-18321820int kvm_unmap_hva_range(struct kvm *kvm,18331821 unsigned long start, unsigned long end)18341822{···18481860void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)18491861{18501862 unsigned long end = hva + PAGE_SIZE;18631863+ kvm_pfn_t pfn = pte_pfn(pte);18511864 pte_t stage2_pte;1852186518531866 if (!kvm->arch.pgd)18541867 return;1855186818561869 trace_kvm_set_spte_hva(hva);18571857- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);18701870+18711871+ /*18721872+ * We've moved a page around, probably through CoW, so let's treat it18731873+ * just like a translation fault and clean the cache to the PoC.18741874+ */18751875+ clean_dcache_guest_page(pfn, PAGE_SIZE);18761876+ stage2_pte = pfn_pte(pfn, PAGE_S2);18581877 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);18591878}18601879
-15
virt/kvm/arm/trace.h
···134134 __entry->vcpu_pc, __entry->instr, __entry->cpsr)135135);136136137137-TRACE_EVENT(kvm_unmap_hva,138138- TP_PROTO(unsigned long hva),139139- TP_ARGS(hva),140140-141141- TP_STRUCT__entry(142142- __field( unsigned long, hva )143143- ),144144-145145- TP_fast_assign(146146- __entry->hva = hva;147147- ),148148-149149- TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)150150-);151151-152137TRACE_EVENT(kvm_unmap_hva_range,153138 TP_PROTO(unsigned long start, unsigned long end),154139 TP_ARGS(start, end),