Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'from-rusty/modules-next' into for-4.5/core

As agreed with Rusty, we're taking a current module-next pile through
livepatching.git, as it contains solely patches that are pre-requisity
for module page protection cleanups in livepatching. Rusty will be
restarting module-next from scratch.

Signed-off-by: Jiri Kosina <jkosina@suse.cz>

+3800 -2219
+5 -2
Documentation/IPMI.txt
··· 587 587 588 588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> 589 589 preaction=<preaction type> preop=<preop type> start_now=x 590 - nowayout=x ifnum_to_use=n 590 + nowayout=x ifnum_to_use=n panic_wdt_timeout=<t> 591 591 592 592 ifnum_to_use specifies which interface the watchdog timer should use. 593 593 The default is -1, which means to pick the first one registered. ··· 597 597 occur (if pretimeout is zero, then pretimeout will not be enabled). Note 598 598 that the pretimeout is the time before the final timeout. So if the 599 599 timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout 600 - will occur in 40 second (10 seconds before the timeout). 600 + will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout 601 + is the value of timeout which is set on kernel panic, in order to let actions 602 + such as kdump to occur during panic. 601 603 602 604 The action may be "reset", "power_cycle", or "power_off", and 603 605 specifies what to do when the timer times out, and defaults to ··· 636 634 ipmi_watchdog.preop=<preop type> 637 635 ipmi_watchdog.start_now=x 638 636 ipmi_watchdog.nowayout=x 637 + ipmi_watchdog.panic_wdt_timeout=<t> 639 638 640 639 The options are the same as the module parameter options. 641 640
+3
Documentation/block/null_blk.txt
··· 70 70 parameter. 71 71 1: The multi-queue block layer is instantiated with a hardware dispatch 72 72 queue for each CPU node in the system. 73 + 74 + use_lightnvm=[0/1]: Default: 0 75 + Register device with LightNVM. Requires blk-mq to be used.
+1
Documentation/i2c/busses/i2c-i801
··· 32 32 * Intel Sunrise Point-LP (PCH) 33 33 * Intel DNV (SOC) 34 34 * Intel Broxton (SOC) 35 + * Intel Lewisburg (PCH) 35 36 Datasheets: Publicly available at the Intel website 36 37 37 38 On Intel Patsburg and later chipsets, both the normal host SMBus controller
-3
Documentation/kernel-parameters.txt
··· 1583 1583 hwp_only 1584 1584 Only load intel_pstate on systems which support 1585 1585 hardware P state control (HWP) if available. 1586 - no_acpi 1587 - Don't use ACPI processor performance control objects 1588 - _PSS and _PPC specified limits. 1589 1586 1590 1587 intremap= [X86-64, Intel-IOMMU] 1591 1588 on enable Interrupt Remapping (default)
+17 -2
MAINTAINERS
··· 2449 2449 2450 2450 BROADCOM STB NAND FLASH DRIVER 2451 2451 M: Brian Norris <computersforpeace@gmail.com> 2452 + M: Kamal Dasu <kdasu.kdev@gmail.com> 2452 2453 L: linux-mtd@lists.infradead.org 2454 + L: bcm-kernel-feedback-list@broadcom.com 2453 2455 S: Maintained 2454 2456 F: drivers/mtd/nand/brcmnand/ 2455 2457 ··· 2931 2929 F: drivers/platform/x86/compal-laptop.c 2932 2930 2933 2931 CONEXANT ACCESSRUNNER USB DRIVER 2934 - M: Simon Arlott <cxacru@fire.lp0.eu> 2935 2932 L: accessrunner-general@lists.sourceforge.net 2936 2933 W: http://accessrunner.sourceforge.net/ 2937 - S: Maintained 2934 + S: Orphan 2938 2935 F: drivers/usb/atm/cxacru.c 2939 2936 2940 2937 CONFIGFS ··· 4410 4409 4411 4410 FPGA MANAGER FRAMEWORK 4412 4411 M: Alan Tull <atull@opensource.altera.com> 4412 + R: Moritz Fischer <moritz.fischer@ettus.com> 4413 4413 S: Maintained 4414 4414 F: drivers/fpga/ 4415 4415 F: include/linux/fpga/fpga-mgr.h ··· 6366 6364 LIGHTNVM PLATFORM SUPPORT 6367 6365 M: Matias Bjorling <mb@lightnvm.io> 6368 6366 W: http://github/OpenChannelSSD 6367 + L: linux-block@vger.kernel.org 6369 6368 S: Maintained 6370 6369 F: drivers/lightnvm/ 6371 6370 F: include/linux/lightnvm.h ··· 7904 7901 S: Maintained 7905 7902 F: net/openvswitch/ 7906 7903 F: include/uapi/linux/openvswitch.h 7904 + 7905 + OPERATING PERFORMANCE POINTS (OPP) 7906 + M: Viresh Kumar <vireshk@kernel.org> 7907 + M: Nishanth Menon <nm@ti.com> 7908 + M: Stephen Boyd <sboyd@codeaurora.org> 7909 + L: linux-pm@vger.kernel.org 7910 + S: Maintained 7911 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git 7912 + F: drivers/base/power/opp/ 7913 + F: include/linux/pm_opp.h 7914 + F: Documentation/power/opp.txt 7915 + F: Documentation/devicetree/bindings/opp/ 7907 7916 7908 7917 OPL4 DRIVER 7909 7918 M: Clemens Ladisch <clemens@ladisch.de>
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 4 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc1 4 + EXTRAVERSION = -rc2 5 5 NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/kernel/module.c
··· 160 160 161 161 /* The small sections were sorted to the end of the segment. 162 162 The following should definitely cover them. */ 163 - gp = (u64)me->module_core + me->core_size - 0x8000; 163 + gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; 164 164 got = sechdrs[me->arch.gotsecindex].sh_addr; 165 165 166 166 for (i = 0; i < n; i++) {
+2 -2
arch/arc/kernel/unwind.c
··· 372 372 return NULL; 373 373 374 374 init_unwind_table(table, module->name, 375 - module->module_core, module->core_size, 376 - module->module_init, module->init_size, 375 + module->core_layout.base, module->core_layout.size, 376 + module->init_layout.base, module->init_layout.size, 377 377 table_start, table_size, 378 378 NULL, 0); 379 379
+12 -4
arch/arm/boot/dts/imx27.dtsi
··· 486 486 compatible = "fsl,imx27-usb"; 487 487 reg = <0x10024000 0x200>; 488 488 interrupts = <56>; 489 - clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 489 + clocks = <&clks IMX27_CLK_USB_IPG_GATE>, 490 + <&clks IMX27_CLK_USB_AHB_GATE>, 491 + <&clks IMX27_CLK_USB_DIV>; 492 + clock-names = "ipg", "ahb", "per"; 490 493 fsl,usbmisc = <&usbmisc 0>; 491 494 status = "disabled"; 492 495 }; ··· 498 495 compatible = "fsl,imx27-usb"; 499 496 reg = <0x10024200 0x200>; 500 497 interrupts = <54>; 501 - clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 498 + clocks = <&clks IMX27_CLK_USB_IPG_GATE>, 499 + <&clks IMX27_CLK_USB_AHB_GATE>, 500 + <&clks IMX27_CLK_USB_DIV>; 501 + clock-names = "ipg", "ahb", "per"; 502 502 fsl,usbmisc = <&usbmisc 1>; 503 503 dr_mode = "host"; 504 504 status = "disabled"; ··· 511 505 compatible = "fsl,imx27-usb"; 512 506 reg = <0x10024400 0x200>; 513 507 interrupts = <55>; 514 - clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 508 + clocks = <&clks IMX27_CLK_USB_IPG_GATE>, 509 + <&clks IMX27_CLK_USB_AHB_GATE>, 510 + <&clks IMX27_CLK_USB_DIV>; 511 + clock-names = "ipg", "ahb", "per"; 515 512 fsl,usbmisc = <&usbmisc 2>; 516 513 dr_mode = "host"; 517 514 status = "disabled"; ··· 524 515 #index-cells = <1>; 525 516 compatible = "fsl,imx27-usbmisc"; 526 517 reg = <0x10024600 0x200>; 527 - clocks = <&clks IMX27_CLK_USB_AHB_GATE>; 528 518 }; 529 519 530 520 sahara2: sahara@10025000 {
+1 -1
arch/arm/kernel/module-plts.c
··· 32 32 33 33 static bool in_init(const struct module *mod, u32 addr) 34 34 { 35 - return addr - (u32)mod->module_init < mod->init_size; 35 + return addr - (u32)mod->init_layout.base < mod->init_layout.size; 36 36 } 37 37 38 38 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+1 -6
arch/arm/kvm/arm.c
··· 564 564 vcpu_sleep(vcpu); 565 565 566 566 /* 567 - * Disarming the background timer must be done in a 568 - * preemptible context, as this call may sleep. 569 - */ 570 - kvm_timer_flush_hwstate(vcpu); 571 - 572 - /* 573 567 * Preparing the interrupts to be injected also 574 568 * involves poking the GIC, which must be done in a 575 569 * non-preemptible context. 576 570 */ 577 571 preempt_disable(); 572 + kvm_timer_flush_hwstate(vcpu); 578 573 kvm_vgic_flush_hwstate(vcpu); 579 574 580 575 local_irq_disable();
+7 -8
arch/arm/kvm/mmu.c
··· 98 98 __kvm_flush_dcache_pud(pud); 99 99 } 100 100 101 + static bool kvm_is_device_pfn(unsigned long pfn) 102 + { 103 + return !pfn_valid(pfn); 104 + } 105 + 101 106 /** 102 107 * stage2_dissolve_pmd() - clear and flush huge PMD entry 103 108 * @kvm: pointer to kvm structure. ··· 218 213 kvm_tlb_flush_vmid_ipa(kvm, addr); 219 214 220 215 /* No need to invalidate the cache for device mappings */ 221 - if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 216 + if (!kvm_is_device_pfn(__phys_to_pfn(addr))) 222 217 kvm_flush_dcache_pte(old_pte); 223 218 224 219 put_page(virt_to_page(pte)); ··· 310 305 311 306 pte = pte_offset_kernel(pmd, addr); 312 307 do { 313 - if (!pte_none(*pte) && 314 - (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 308 + if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) 315 309 kvm_flush_dcache_pte(*pte); 316 310 } while (pte++, addr += PAGE_SIZE, addr != end); 317 311 } ··· 1039 1035 return false; 1040 1036 1041 1037 return kvm_vcpu_dabt_iswrite(vcpu); 1042 - } 1043 - 1044 - static bool kvm_is_device_pfn(unsigned long pfn) 1045 - { 1046 - return !pfn_valid(pfn); 1047 1038 } 1048 1039 1049 1040 /**
+21
arch/arm64/Kconfig
··· 316 316 317 317 If unsure, say Y. 318 318 319 + config ARM64_ERRATUM_834220 320 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" 321 + depends on KVM 322 + default y 323 + help 324 + This option adds an alternative code sequence to work around ARM 325 + erratum 834220 on Cortex-A57 parts up to r1p2. 326 + 327 + Affected Cortex-A57 parts might report a Stage 2 translation 328 + fault as the result of a Stage 1 fault for load crossing a 329 + page boundary when there is a permission or device memory 330 + alignment fault at Stage 1 and a translation fault at Stage 2. 331 + 332 + The workaround is to verify that the Stage 1 translation 333 + doesn't generate a fault before handling the Stage 2 fault. 334 + Please note that this does not necessarily enable the workaround, 335 + as it depends on the alternative framework, which will only patch 336 + the kernel if an affected CPU is detected. 337 + 338 + If unsure, say Y. 339 + 319 340 config ARM64_ERRATUM_845719 320 341 bool "Cortex-A53: 845719: a load might read incorrect data" 321 342 depends on COMPAT
+1 -1
arch/arm64/crypto/aes-ce-cipher.c
··· 237 237 static struct crypto_alg aes_alg = { 238 238 .cra_name = "aes", 239 239 .cra_driver_name = "aes-ce", 240 - .cra_priority = 300, 240 + .cra_priority = 250, 241 241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 242 242 .cra_blocksize = AES_BLOCK_SIZE, 243 243 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+10 -6
arch/arm64/include/asm/barrier.h
··· 64 64 65 65 #define smp_load_acquire(p) \ 66 66 ({ \ 67 - typeof(*p) ___p1; \ 67 + union { typeof(*p) __val; char __c[1]; } __u; \ 68 68 compiletime_assert_atomic_type(*p); \ 69 69 switch (sizeof(*p)) { \ 70 70 case 1: \ 71 71 asm volatile ("ldarb %w0, %1" \ 72 - : "=r" (___p1) : "Q" (*p) : "memory"); \ 72 + : "=r" (*(__u8 *)__u.__c) \ 73 + : "Q" (*p) : "memory"); \ 73 74 break; \ 74 75 case 2: \ 75 76 asm volatile ("ldarh %w0, %1" \ 76 - : "=r" (___p1) : "Q" (*p) : "memory"); \ 77 + : "=r" (*(__u16 *)__u.__c) \ 78 + : "Q" (*p) : "memory"); \ 77 79 break; \ 78 80 case 4: \ 79 81 asm volatile ("ldar %w0, %1" \ 80 - : "=r" (___p1) : "Q" (*p) : "memory"); \ 82 + : "=r" (*(__u32 *)__u.__c) \ 83 + : "Q" (*p) : "memory"); \ 81 84 break; \ 82 85 case 8: \ 83 86 asm volatile ("ldar %0, %1" \ 84 - : "=r" (___p1) : "Q" (*p) : "memory"); \ 87 + : "=r" (*(__u64 *)__u.__c) \ 88 + : "Q" (*p) : "memory"); \ 85 89 break; \ 86 90 } \ 87 - ___p1; \ 91 + __u.__val; \ 88 92 }) 89 93 90 94 #define read_barrier_depends() do { } while(0)
+1 -2
arch/arm64/include/asm/compat.h
··· 23 23 */ 24 24 #include <linux/types.h> 25 25 #include <linux/sched.h> 26 - #include <linux/ptrace.h> 27 26 28 27 #define COMPAT_USER_HZ 100 29 28 #ifdef __AARCH64EB__ ··· 233 234 return (u32)(unsigned long)uptr; 234 235 } 235 236 236 - #define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) 237 + #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) 237 238 238 239 static inline void __user *arch_compat_alloc_user_space(long len) 239 240 {
+2 -1
arch/arm64/include/asm/cpufeature.h
··· 29 29 #define ARM64_HAS_PAN 4 30 30 #define ARM64_HAS_LSE_ATOMICS 5 31 31 #define ARM64_WORKAROUND_CAVIUM_23154 6 32 + #define ARM64_WORKAROUND_834220 7 32 33 33 - #define ARM64_NCAPS 7 34 + #define ARM64_NCAPS 8 34 35 35 36 #ifndef __ASSEMBLY__ 36 37
+3 -10
arch/arm64/include/asm/dma-mapping.h
··· 18 18 19 19 #ifdef __KERNEL__ 20 20 21 - #include <linux/acpi.h> 22 21 #include <linux/types.h> 23 22 #include <linux/vmalloc.h> 24 23 ··· 25 26 #include <asm/xen/hypervisor.h> 26 27 27 28 #define DMA_ERROR_CODE (~(dma_addr_t)0) 28 - extern struct dma_map_ops *dma_ops; 29 29 extern struct dma_map_ops dummy_dma_ops; 30 30 31 31 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 32 32 { 33 - if (unlikely(!dev)) 34 - return dma_ops; 35 - else if (dev->archdata.dma_ops) 33 + if (dev && dev->archdata.dma_ops) 36 34 return dev->archdata.dma_ops; 37 - else if (acpi_disabled) 38 - return dma_ops; 39 35 40 36 /* 41 - * When ACPI is enabled, if arch_set_dma_ops is not called, 42 - * we will disable device DMA capability by setting it 43 - * to dummy_dma_ops. 37 + * We expect no ISA devices, and all other DMA masters are expected to 38 + * have someone call arch_setup_dma_ops at device creation time. 44 39 */ 45 40 return &dummy_dma_ops; 46 41 }
+5 -3
arch/arm64/include/asm/kvm_emulate.h
··· 99 99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 100 100 } 101 101 102 + /* 103 + * vcpu_reg should always be passed a register number coming from a 104 + * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 105 + * with banked registers. 106 + */ 102 107 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 103 108 { 104 - if (vcpu_mode_is_32bit(vcpu)) 105 - return vcpu_reg32(vcpu, reg_num); 106 - 107 109 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 108 110 } 109 111
+1 -1
arch/arm64/include/asm/mmu_context.h
··· 101 101 #define destroy_context(mm) do { } while(0) 102 102 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 103 103 104 - #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 104 + #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) 105 105 106 106 /* 107 107 * This is called when "tsk" is about to enter lazy TLB mode.
+1
arch/arm64/include/asm/pgtable.h
··· 81 81 82 82 #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 83 83 #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) 84 + #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) 84 85 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 85 86 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 86 87
+9
arch/arm64/kernel/cpu_errata.c
··· 75 75 (1 << MIDR_VARIANT_SHIFT) | 2), 76 76 }, 77 77 #endif 78 + #ifdef CONFIG_ARM64_ERRATUM_834220 79 + { 80 + /* Cortex-A57 r0p0 - r1p2 */ 81 + .desc = "ARM erratum 834220", 82 + .capability = ARM64_WORKAROUND_834220, 83 + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 84 + (1 << MIDR_VARIANT_SHIFT) | 2), 85 + }, 86 + #endif 78 87 #ifdef CONFIG_ARM64_ERRATUM_845719 79 88 { 80 89 /* Cortex-A53 r0p[01234] */
+5
arch/arm64/kernel/cpuinfo.c
··· 30 30 #include <linux/seq_file.h> 31 31 #include <linux/sched.h> 32 32 #include <linux/smp.h> 33 + #include <linux/delay.h> 33 34 34 35 /* 35 36 * In case the boot CPU is hotpluggable, we record its initial state and ··· 112 111 * "processor". Give glibc what it expects. 113 112 */ 114 113 seq_printf(m, "processor\t: %d\n", i); 114 + 115 + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 116 + loops_per_jiffy / (500000UL/HZ), 117 + loops_per_jiffy / (5000UL/HZ) % 100); 115 118 116 119 /* 117 120 * Dump out the common processor features in a single line.
+5 -9
arch/arm64/kernel/efi.c
··· 224 224 { 225 225 efi_memory_desc_t *md; 226 226 227 + init_new_context(NULL, &efi_mm); 228 + 227 229 for_each_efi_memory_desc(&memmap, md) { 228 230 u64 paddr, npages, size; 229 231 pgprot_t prot; ··· 256 254 else 257 255 prot = PAGE_KERNEL; 258 256 259 - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); 257 + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, 258 + __pgprot(pgprot_val(prot) | PTE_NG)); 260 259 } 261 260 return true; 262 261 } ··· 332 329 333 330 static void efi_set_pgd(struct mm_struct *mm) 334 331 { 335 - if (mm == &init_mm) 336 - cpu_set_reserved_ttbr0(); 337 - else 338 - cpu_switch_mm(mm->pgd, mm); 339 - 340 - local_flush_tlb_all(); 341 - if (icache_is_aivivt()) 342 - __local_flush_icache_all(); 332 + switch_mm(NULL, mm, NULL); 343 333 } 344 334 345 335 void efi_virtmap_load(void)
+10
arch/arm64/kernel/suspend.c
··· 1 + #include <linux/ftrace.h> 1 2 #include <linux/percpu.h> 2 3 #include <linux/slab.h> 3 4 #include <asm/cacheflush.h> ··· 72 71 local_dbg_save(flags); 73 72 74 73 /* 74 + * Function graph tracer state gets incosistent when the kernel 75 + * calls functions that never return (aka suspend finishers) hence 76 + * disable graph tracing during their execution. 77 + */ 78 + pause_graph_tracing(); 79 + 80 + /* 75 81 * mm context saved on the stack, it will be restored when 76 82 * the cpu comes out of reset through the identity mapped 77 83 * page tables, so that the thread address space is properly ··· 118 110 if (hw_breakpoint_restore) 119 111 hw_breakpoint_restore(NULL); 120 112 } 113 + 114 + unpause_graph_tracing(); 121 115 122 116 /* 123 117 * Restore pstate flags. OS lock and mdscr have been already
+12 -2
arch/arm64/kvm/hyp.S
··· 864 864 ENDPROC(__kvm_flush_vm_context) 865 865 866 866 __kvm_hyp_panic: 867 + // Stash PAR_EL1 before corrupting it in __restore_sysregs 868 + mrs x0, par_el1 869 + push x0, xzr 870 + 867 871 // Guess the context by looking at VTTBR: 868 872 // If zero, then we're already a host. 869 873 // Otherwise restore a minimal host context before panicing. ··· 902 898 mrs x3, esr_el2 903 899 mrs x4, far_el2 904 900 mrs x5, hpfar_el2 905 - mrs x6, par_el1 901 + pop x6, xzr // active context PAR_EL1 906 902 mrs x7, tpidr_el2 907 903 908 904 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ ··· 918 914 ENDPROC(__kvm_hyp_panic) 919 915 920 916 __hyp_panic_str: 921 - .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" 917 + .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" 922 918 923 919 .align 2 924 920 ··· 1019 1015 b.ne 1f // Not an abort we care about 1020 1016 1021 1017 /* This is an abort. Check for permission fault */ 1018 + alternative_if_not ARM64_WORKAROUND_834220 1022 1019 and x2, x1, #ESR_ELx_FSC_TYPE 1023 1020 cmp x2, #FSC_PERM 1024 1021 b.ne 1f // Not a permission fault 1022 + alternative_else 1023 + nop // Use the permission fault path to 1024 + nop // check for a valid S1 translation, 1025 + nop // regardless of the ESR value. 1026 + alternative_endif 1025 1027 1026 1028 /* 1027 1029 * Check for Stage-1 page table walk, which is guaranteed
+1 -1
arch/arm64/kvm/inject_fault.c
··· 48 48 49 49 /* Note: These now point to the banked copies */ 50 50 *vcpu_spsr(vcpu) = new_spsr_value; 51 - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 51 + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 52 52 53 53 /* Branch to exception vector */ 54 54 if (sctlr & (1 << 13))
+17 -18
arch/arm64/mm/dma-mapping.c
··· 18 18 */ 19 19 20 20 #include <linux/gfp.h> 21 + #include <linux/acpi.h> 21 22 #include <linux/export.h> 22 23 #include <linux/slab.h> 23 24 #include <linux/genalloc.h> ··· 28 27 #include <linux/swiotlb.h> 29 28 30 29 #include <asm/cacheflush.h> 31 - 32 - struct dma_map_ops *dma_ops; 33 - EXPORT_SYMBOL(dma_ops); 34 30 35 31 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, 36 32 bool coherent) ··· 513 515 514 516 static int __init arm64_dma_init(void) 515 517 { 516 - int ret; 517 - 518 - dma_ops = &swiotlb_dma_ops; 519 - 520 - ret = atomic_pool_init(); 521 - 522 - return ret; 518 + return atomic_pool_init(); 523 519 } 524 520 arch_initcall(arm64_dma_init); 525 521 ··· 544 552 { 545 553 bool coherent = is_device_dma_coherent(dev); 546 554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 555 + size_t iosize = size; 547 556 void *addr; 548 557 549 558 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) 550 559 return NULL; 560 + 561 + size = PAGE_ALIGN(size); 562 + 551 563 /* 552 564 * Some drivers rely on this, and we probably don't want the 553 565 * possibility of stale kernel data being read by devices anyway. ··· 562 566 struct page **pages; 563 567 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); 564 568 565 - pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, 569 + pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, 566 570 flush_page); 567 571 if (!pages) 568 572 return NULL; ··· 570 574 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, 571 575 __builtin_return_address(0)); 572 576 if (!addr) 573 - iommu_dma_free(dev, pages, size, handle); 577 + iommu_dma_free(dev, pages, iosize, handle); 574 578 } else { 575 579 struct page *page; 576 580 /* ··· 587 591 if (!addr) 588 592 return NULL; 589 593 590 - *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); 594 + *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); 591 595 if (iommu_dma_mapping_error(dev, *handle)) { 592 596 if (coherent) 593 597 __free_pages(page, get_order(size)); ··· 602 606 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 603 607 dma_addr_t handle, struct dma_attrs *attrs) 604 608 { 609 + size_t iosize = size; 610 + 611 + size = PAGE_ALIGN(size); 605 612 /* 606 613 * @cpu_addr will be one of 3 things depending on how it was allocated: 607 614 * - A remapped array of pages from iommu_dma_alloc(), for all ··· 616 617 * Hence how dodgy the below logic looks... 617 618 */ 618 619 if (__in_atomic_pool(cpu_addr, size)) { 619 - iommu_dma_unmap_page(dev, handle, size, 0, NULL); 620 + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); 620 621 __free_from_pool(cpu_addr, size); 621 622 } else if (is_vmalloc_addr(cpu_addr)){ 622 623 struct vm_struct *area = find_vm_area(cpu_addr); 623 624 624 625 if (WARN_ON(!area || !area->pages)) 625 626 return; 626 - iommu_dma_free(dev, area->pages, size, &handle); 627 + iommu_dma_free(dev, area->pages, iosize, &handle); 627 628 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 628 629 } else { 629 - iommu_dma_unmap_page(dev, handle, size, 0, NULL); 630 + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); 630 631 __free_pages(virt_to_page(cpu_addr), get_order(size)); 631 632 } 632 633 } ··· 983 984 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 984 985 struct iommu_ops *iommu, bool coherent) 985 986 { 986 - if (!acpi_disabled && !dev->archdata.dma_ops) 987 - dev->archdata.dma_ops = dma_ops; 987 + if (!dev->archdata.dma_ops) 988 + dev->archdata.dma_ops = &swiotlb_dma_ops; 988 989 989 990 dev->archdata.dma_coherent = coherent; 990 991 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+7 -7
arch/arm64/mm/mmu.c
··· 362 362 * for now. This will get more fine grained later once all memory 363 363 * is mapped 364 364 */ 365 - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 366 - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 365 + unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); 366 + unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); 367 367 368 368 if (end < kernel_x_start) { 369 369 create_mapping(start, __phys_to_virt(start), ··· 451 451 { 452 452 #ifdef CONFIG_DEBUG_RODATA 453 453 /* now that we are actually fully mapped, make the start/end more fine grained */ 454 - if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { 454 + if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { 455 455 unsigned long aligned_start = round_down(__pa(_stext), 456 - SECTION_SIZE); 456 + SWAPPER_BLOCK_SIZE); 457 457 458 458 create_mapping(aligned_start, __phys_to_virt(aligned_start), 459 459 __pa(_stext) - aligned_start, 460 460 PAGE_KERNEL); 461 461 } 462 462 463 - if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { 463 + if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { 464 464 unsigned long aligned_end = round_up(__pa(__init_end), 465 - SECTION_SIZE); 465 + SWAPPER_BLOCK_SIZE); 466 466 create_mapping(__pa(__init_end), (unsigned long)__init_end, 467 467 aligned_end - __pa(__init_end), 468 468 PAGE_KERNEL); ··· 475 475 { 476 476 create_mapping_late(__pa(_stext), (unsigned long)_stext, 477 477 (unsigned long)_etext - (unsigned long)_stext, 478 - PAGE_KERNEL_EXEC | PTE_RDONLY); 478 + PAGE_KERNEL_ROX); 479 479 480 480 } 481 481 #endif
+6 -6
arch/avr32/kernel/module.c
··· 118 118 * Increase core size to make room for GOT and set start 119 119 * offset for GOT. 120 120 */ 121 - module->core_size = ALIGN(module->core_size, 4); 122 - module->arch.got_offset = module->core_size; 123 - module->core_size += module->arch.got_size; 121 + module->core_layout.size = ALIGN(module->core_layout.size, 4); 122 + module->arch.got_offset = module->core_layout.size; 123 + module->core_layout.size += module->arch.got_size; 124 124 125 125 return 0; 126 126 ··· 177 177 if (!info->got_initialized) { 178 178 Elf32_Addr *gotent; 179 179 180 - gotent = (module->module_core 180 + gotent = (module->core_layout.base 181 181 + module->arch.got_offset 182 182 + info->got_offset); 183 183 *gotent = relocation; ··· 255 255 */ 256 256 pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", 257 257 relocation, module->arch.got_offset, 258 - module->module_core); 259 - relocation -= ((unsigned long)module->module_core 258 + module->core_layout.base); 259 + relocation -= ((unsigned long)module->core_layout.base 260 260 + module->arch.got_offset); 261 261 *location = relocation; 262 262 break;
+7 -7
arch/ia64/kernel/module.c
··· 486 486 static inline int 487 487 in_init (const struct module *mod, uint64_t addr) 488 488 { 489 - return addr - (uint64_t) mod->module_init < mod->init_size; 489 + return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size; 490 490 } 491 491 492 492 static inline int 493 493 in_core (const struct module *mod, uint64_t addr) 494 494 { 495 - return addr - (uint64_t) mod->module_core < mod->core_size; 495 + return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size; 496 496 } 497 497 498 498 static inline int ··· 675 675 break; 676 676 677 677 case RV_BDREL: 678 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); 678 + val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base); 679 679 break; 680 680 681 681 case RV_LTV: ··· 810 810 * addresses have been selected... 811 811 */ 812 812 uint64_t gp; 813 - if (mod->core_size > MAX_LTOFF) 813 + if (mod->core_layout.size > MAX_LTOFF) 814 814 /* 815 815 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated 816 816 * at the end of the module. 817 817 */ 818 - gp = mod->core_size - MAX_LTOFF / 2; 818 + gp = mod->core_layout.size - MAX_LTOFF / 2; 819 819 else 820 - gp = mod->core_size / 2; 821 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 820 + gp = mod->core_layout.size / 2; 821 + gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8); 822 822 mod->arch.gp = gp; 823 823 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); 824 824 }
+2 -2
arch/metag/kernel/module.c
··· 176 176 tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); 177 177 178 178 /* Init, or core PLT? */ 179 - if (location >= mod->module_core 180 - && location < mod->module_core + mod->core_size) 179 + if (location >= mod->core_layout.base 180 + && location < mod->core_layout.base + mod->core_layout.size) 181 181 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; 182 182 else 183 183 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
+6 -1
arch/mips/ath79/setup.c
··· 216 216 AR71XX_RESET_SIZE); 217 217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, 218 218 AR71XX_PLL_SIZE); 219 + ath79_detect_sys_type(); 219 220 ath79_ddr_ctrl_init(); 220 221 221 - ath79_detect_sys_type(); 222 222 if (mips_machtype != ATH79_MACH_GENERIC_OF) 223 223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); 224 224 ··· 281 281 "Generic", 282 282 "Generic AR71XX/AR724X/AR913X based board", 283 283 ath79_generic_init); 284 + 285 + MIPS_MACHINE(ATH79_MACH_GENERIC_OF, 286 + "DTB", 287 + "Generic AR71XX/AR724X/AR913X based board (DT)", 288 + NULL);
+1 -1
arch/mips/boot/dts/qca/ar9132.dtsi
··· 107 107 miscintc: interrupt-controller@18060010 { 108 108 compatible = "qca,ar9132-misc-intc", 109 109 "qca,ar7100-misc-intc"; 110 - reg = <0x18060010 0x4>; 110 + reg = <0x18060010 0x8>; 111 111 112 112 interrupt-parent = <&cpuintc>; 113 113 interrupts = <6>;
+2 -1
arch/mips/include/asm/page.h
··· 200 200 { 201 201 /* avoid <linux/mm.h> include hell */ 202 202 extern unsigned long max_mapnr; 203 + unsigned long pfn_offset = ARCH_PFN_OFFSET; 203 204 204 - return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; 205 + return pfn >= pfn_offset && pfn < max_mapnr; 205 206 } 206 207 207 208 #elif defined(CONFIG_SPARSEMEM)
+3 -3
arch/mips/kernel/vpe.c
··· 205 205 || s->sh_entsize != ~0UL) 206 206 continue; 207 207 s->sh_entsize = 208 - get_offset((unsigned long *)&mod->core_size, s); 208 + get_offset((unsigned long *)&mod->core_layout.size, s); 209 209 } 210 210 211 211 if (m == 0) 212 - mod->core_text_size = mod->core_size; 212 + mod->core_layout.text_size = mod->core_layout.size; 213 213 214 214 } 215 215 } ··· 641 641 layout_sections(&mod, hdr, sechdrs, secstrings); 642 642 } 643 643 644 - v->load_addr = alloc_progmem(mod.core_size); 644 + v->load_addr = alloc_progmem(mod.core_layout.size); 645 645 if (!v->load_addr) 646 646 return -ENOMEM; 647 647
+1 -1
arch/mips/kvm/emulate.c
··· 1581 1581 1582 1582 base = (inst >> 21) & 0x1f; 1583 1583 op_inst = (inst >> 16) & 0x1f; 1584 - offset = inst & 0xffff; 1584 + offset = (int16_t)inst; 1585 1585 cache = (inst >> 16) & 0x3; 1586 1586 op = (inst >> 18) & 0x7; 1587 1587
+10 -6
arch/mips/kvm/locore.S
··· 157 157 158 158 FEXPORT(__kvm_mips_load_asid) 159 159 /* Set the ASID for the Guest Kernel */ 160 - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 161 - /* addresses shift to 0x80000000 */ 162 - bltz t0, 1f /* If kernel */ 160 + PTR_L t0, VCPU_COP0(k1) 161 + LONG_L t0, COP0_STATUS(t0) 162 + andi t0, KSU_USER | ST0_ERL | ST0_EXL 163 + xori t0, KSU_USER 164 + bnez t0, 1f /* If kernel */ 163 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 164 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 165 167 1: ··· 476 474 mtc0 t0, CP0_EPC 477 475 478 476 /* Set the ASID for the Guest Kernel */ 479 - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 480 - /* addresses shift to 0x80000000 */ 481 - bltz t0, 1f /* If kernel */ 477 + PTR_L t0, VCPU_COP0(k1) 478 + LONG_L t0, COP0_STATUS(t0) 479 + andi t0, KSU_USER | ST0_ERL | ST0_EXL 480 + xori t0, KSU_USER 481 + bnez t0, 1f /* If kernel */ 482 482 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 483 483 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 484 484 1:
+4 -1
arch/mips/kvm/mips.c
··· 279 279 280 280 if (!gebase) { 281 281 err = -ENOMEM; 282 - goto out_free_cpu; 282 + goto out_uninit_cpu; 283 283 } 284 284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 285 285 ALIGN(size, PAGE_SIZE), gebase); ··· 342 342 343 343 out_free_gebase: 344 344 kfree(gebase); 345 + 346 + out_uninit_cpu: 347 + kvm_vcpu_uninit(vcpu); 345 348 346 349 out_free_cpu: 347 350 kfree(vcpu);
+3
arch/parisc/Kconfig
··· 108 108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB 109 109 default 2 110 110 111 + config SYS_SUPPORTS_HUGETLBFS 112 + def_bool y if PA20 113 + 111 114 source "init/Kconfig" 112 115 113 116 source "kernel/Kconfig.freezer"
+85
arch/parisc/include/asm/hugetlb.h
··· 1 + #ifndef _ASM_PARISC64_HUGETLB_H 2 + #define _ASM_PARISC64_HUGETLB_H 3 + 4 + #include <asm/page.h> 5 + #include <asm-generic/hugetlb.h> 6 + 7 + 8 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 9 + pte_t *ptep, pte_t pte); 10 + 11 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 12 + pte_t *ptep); 13 + 14 + static inline int is_hugepage_only_range(struct mm_struct *mm, 15 + unsigned long addr, 16 + unsigned long len) { 17 + return 0; 18 + } 19 + 20 + /* 21 + * If the arch doesn't supply something else, assume that hugepage 22 + * size aligned regions are ok without further preparation. 23 + */ 24 + static inline int prepare_hugepage_range(struct file *file, 25 + unsigned long addr, unsigned long len) 26 + { 27 + if (len & ~HPAGE_MASK) 28 + return -EINVAL; 29 + if (addr & ~HPAGE_MASK) 30 + return -EINVAL; 31 + return 0; 32 + } 33 + 34 + static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 35 + unsigned long addr, unsigned long end, 36 + unsigned long floor, 37 + unsigned long ceiling) 38 + { 39 + free_pgd_range(tlb, addr, end, floor, ceiling); 40 + } 41 + 42 + static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 43 + unsigned long addr, pte_t *ptep) 44 + { 45 + } 46 + 47 + static inline int huge_pte_none(pte_t pte) 48 + { 49 + return pte_none(pte); 50 + } 51 + 52 + static inline pte_t huge_pte_wrprotect(pte_t pte) 53 + { 54 + return pte_wrprotect(pte); 55 + } 56 + 57 + static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 58 + unsigned long addr, pte_t *ptep) 59 + { 60 + pte_t old_pte = *ptep; 61 + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 62 + } 63 + 64 + static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 65 + unsigned long addr, pte_t *ptep, 66 + pte_t pte, int dirty) 67 + { 68 + int changed = !pte_same(*ptep, pte); 69 + if (changed) { 70 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 71 + flush_tlb_page(vma, addr); 72 + } 73 + return changed; 74 + } 75 + 76 + static inline pte_t huge_ptep_get(pte_t *ptep) 77 + { 78 + return *ptep; 79 + } 80 + 81 + static inline void arch_clear_hugepage_flags(struct page *page) 82 + { 83 + } 84 + 85 + #endif /* _ASM_PARISC64_HUGETLB_H */
+12 -1
arch/parisc/include/asm/page.h
··· 145 145 #endif /* CONFIG_DISCONTIGMEM */ 146 146 147 147 #ifdef CONFIG_HUGETLB_PAGE 148 - #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 148 + #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ 149 149 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 150 150 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 151 151 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 152 + 153 + #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) 154 + # define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */ 155 + # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M 156 + #elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) 157 + # define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */ 158 + # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M 159 + #else 160 + # define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */ 161 + # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M 152 162 #endif 163 + #endif /* CONFIG_HUGETLB_PAGE */ 153 164 154 165 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 155 166
+1 -1
arch/parisc/include/asm/pgalloc.h
··· 35 35 PxD_FLAG_VALID | 36 36 PxD_FLAG_ATTACHED) 37 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 38 - /* The first pmd entry also is marked with _PAGE_GATEWAY as 38 + /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as 39 39 * a signal that this pmd may not be freed */ 40 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 41 41 #endif
+22 -4
arch/parisc/include/asm/pgtable.h
··· 83 83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 84 84 85 85 /* This is the size of the initially mapped kernel memory */ 86 - #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 86 + #ifdef CONFIG_64BIT 87 + #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ 88 + #else 89 + #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ 90 + #endif 87 91 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 88 92 89 93 #if CONFIG_PGTABLE_LEVELS == 3 ··· 171 167 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 172 168 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 173 169 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 174 - /* bit 21 was formerly the FLUSH bit but is now unused */ 170 + #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ 175 171 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 176 172 177 173 /* N.B. The bits are defined in terms of a 32 bit word above, so the */ ··· 198 194 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 199 195 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 200 196 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 197 + #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) 201 198 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 202 199 203 200 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) ··· 222 217 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 223 218 #define PxD_FLAG_MASK (0xf) 224 219 #define PxD_FLAG_SHIFT (4) 225 - #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 220 + #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) 226 221 227 222 #ifndef __ASSEMBLY__ 228 223 ··· 368 363 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 369 364 370 365 /* 366 + * Huge pte definitions. 367 + */ 368 + #ifdef CONFIG_HUGETLB_PAGE 369 + #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) 370 + #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE)) 371 + #else 372 + #define pte_huge(pte) (0) 373 + #define pte_mkhuge(pte) (pte) 374 + #endif 375 + 376 + 377 + /* 371 378 * Conversion functions: convert a page and protection to a page entry, 372 379 * and a page entry and page directory to the page they refer to. 373 380 */ ··· 427 410 /* Find an entry in the second-level page table.. */ 428 411 429 412 #if CONFIG_PGTABLE_LEVELS == 3 413 + #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 430 414 #define pmd_offset(dir,address) \ 431 - ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 415 + ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) 432 416 #else 433 417 #define pmd_offset(dir,addr) ((pmd_t *) dir) 434 418 #endif
-27
arch/parisc/include/asm/processor.h
··· 192 192 */ 193 193 typedef unsigned int elf_caddr_t; 194 194 195 - #define start_thread_som(regs, new_pc, new_sp) do { \ 196 - unsigned long *sp = (unsigned long *)new_sp; \ 197 - __u32 spaceid = (__u32)current->mm->context; \ 198 - unsigned long pc = (unsigned long)new_pc; \ 199 - /* offset pc for priv. level */ \ 200 - pc |= 3; \ 201 - \ 202 - regs->iasq[0] = spaceid; \ 203 - regs->iasq[1] = spaceid; \ 204 - regs->iaoq[0] = pc; \ 205 - regs->iaoq[1] = pc + 4; \ 206 - regs->sr[2] = LINUX_GATEWAY_SPACE; \ 207 - regs->sr[3] = 0xffff; \ 208 - regs->sr[4] = spaceid; \ 209 - regs->sr[5] = spaceid; \ 210 - regs->sr[6] = spaceid; \ 211 - regs->sr[7] = spaceid; \ 212 - regs->gr[ 0] = USER_PSW; \ 213 - regs->gr[30] = ((new_sp)+63)&~63; \ 214 - regs->gr[31] = pc; \ 215 - \ 216 - get_user(regs->gr[26],&sp[0]); \ 217 - get_user(regs->gr[25],&sp[-1]); \ 218 - get_user(regs->gr[24],&sp[-2]); \ 219 - get_user(regs->gr[23],&sp[-3]); \ 220 - } while(0) 221 - 222 195 /* The ELF abi wants things done a "wee bit" differently than 223 196 * som does. Supporting this behavior here avoids 224 197 * having our own version of create_elf_tables.
-10
arch/parisc/include/uapi/asm/mman.h
··· 49 49 #define MADV_DONTFORK 10 /* don't inherit across fork */ 50 50 #define MADV_DOFORK 11 /* do inherit across fork */ 51 51 52 - /* The range 12-64 is reserved for page size specification. */ 53 - #define MADV_4K_PAGES 12 /* Use 4K pages */ 54 - #define MADV_16K_PAGES 14 /* Use 16K pages */ 55 - #define MADV_64K_PAGES 16 /* Use 64K pages */ 56 - #define MADV_256K_PAGES 18 /* Use 256K pages */ 57 - #define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ 58 - #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ 59 - #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ 60 - #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ 61 - 62 52 #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ 63 53 #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ 64 54
+8
arch/parisc/kernel/asm-offsets.c
··· 290 290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 291 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 292 292 BLANK(); 293 + /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text 294 + * and kernel data on physical huge pages */ 295 + #ifdef CONFIG_HUGETLB_PAGE 296 + DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); 297 + #else 298 + DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); 299 + #endif 300 + BLANK(); 293 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 294 302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 295 303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+34 -22
arch/parisc/kernel/entry.S
··· 502 502 STREG \pte,0(\ptp) 503 503 .endm 504 504 505 + /* We have (depending on the page size): 506 + * - 38 to 52-bit Physical Page Number 507 + * - 12 to 26-bit page offset 508 + */ 505 509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 506 510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 507 - #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 511 + #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 512 + #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 508 513 509 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 510 - .macro convert_for_tlb_insert20 pte 515 + .macro convert_for_tlb_insert20 pte,tmp 516 + #ifdef CONFIG_HUGETLB_PAGE 517 + copy \pte,\tmp 518 + extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 519 + 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 520 + 521 + depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 522 + (63-58)+PAGE_ADD_SHIFT,\pte 523 + extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 524 + depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 525 + (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 526 + #else /* Huge pages disabled */ 511 527 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 512 528 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 513 529 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 514 530 (63-58)+PAGE_ADD_SHIFT,\pte 531 + #endif 515 532 .endm 516 533 517 534 /* Convert the pte and prot to tlb insertion values. How 518 535 * this happens is quite subtle, read below */ 519 - .macro make_insert_tlb spc,pte,prot 536 + .macro make_insert_tlb spc,pte,prot,tmp 520 537 space_to_prot \spc \prot /* create prot id from space */ 521 538 /* The following is the real subtlety. This is depositing 522 539 * T <-> _PAGE_REFTRAP ··· 570 553 depdi 1,12,1,\prot 571 554 572 555 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 573 - convert_for_tlb_insert20 \pte 556 + convert_for_tlb_insert20 \pte \tmp 574 557 .endm 575 558 576 559 /* Identical macro to make_insert_tlb above, except it ··· 663 646 664 647 665 648 /* 666 - * Align fault_vector_20 on 4K boundary so that both 667 - * fault_vector_11 and fault_vector_20 are on the 668 - * same page. This is only necessary as long as we 669 - * write protect the kernel text, which we may stop 670 - * doing once we use large page translations to cover 671 - * the static part of the kernel address space. 649 + * Fault_vectors are architecturally required to be aligned on a 2K 650 + * boundary 672 651 */ 673 652 674 653 .text 675 - 676 - .align 4096 654 + .align 2048 677 655 678 656 ENTRY(fault_vector_20) 679 657 /* First vector is invalid (0) */ ··· 1159 1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1160 1148 update_accessed ptp,pte,t0,t1 1161 1149 1162 - make_insert_tlb spc,pte,prot 1150 + make_insert_tlb spc,pte,prot,t1 1163 1151 1164 1152 idtlbt pte,prot 1165 1153 ··· 1185 1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1186 1174 update_accessed ptp,pte,t0,t1 1187 1175 1188 - make_insert_tlb spc,pte,prot 1176 + make_insert_tlb spc,pte,prot,t1 1189 1177 1190 1178 idtlbt pte,prot 1191 1179 ··· 1279 1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1280 1268 update_accessed ptp,pte,t0,t1 1281 1269 1282 - make_insert_tlb spc,pte,prot 1270 + make_insert_tlb spc,pte,prot,t1 1283 1271 1284 1272 f_extend pte,t1 1285 1273 ··· 1307 1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1308 1296 update_accessed ptp,pte,t0,t1 1309 1297 1310 - make_insert_tlb spc,pte,prot 1298 + make_insert_tlb spc,pte,prot,t1 1311 1299 1312 1300 f_extend pte,t1 1313 1301 ··· 1416 1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1417 1405 update_accessed ptp,pte,t0,t1 1418 1406 1419 - make_insert_tlb spc,pte,prot 1407 + make_insert_tlb spc,pte,prot,t1 1420 1408 1421 1409 iitlbt pte,prot 1422 1410 ··· 1440 1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1441 1429 update_accessed ptp,pte,t0,t1 1442 1430 1443 - make_insert_tlb spc,pte,prot 1431 + make_insert_tlb spc,pte,prot,t1 1444 1432 1445 1433 iitlbt pte,prot 1446 1434 ··· 1526 1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1527 1515 update_accessed ptp,pte,t0,t1 1528 1516 1529 - make_insert_tlb spc,pte,prot 1517 + make_insert_tlb spc,pte,prot,t1 1530 1518 1531 1519 f_extend pte,t1 1532 1520 ··· 1546 1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1547 1535 update_accessed ptp,pte,t0,t1 1548 1536 1549 - make_insert_tlb spc,pte,prot 1537 + make_insert_tlb spc,pte,prot,t1 1550 1538 1551 1539 f_extend pte,t1 1552 1540 ··· 1578 1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1579 1567 update_dirty ptp,pte,t1 1580 1568 1581 - make_insert_tlb spc,pte,prot 1569 + make_insert_tlb spc,pte,prot,t1 1582 1570 1583 1571 idtlbt pte,prot 1584 1572 ··· 1622 1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1623 1611 update_dirty ptp,pte,t1 1624 1612 1625 - make_insert_tlb spc,pte,prot 1613 + make_insert_tlb spc,pte,prot,t1 1626 1614 1627 1615 f_extend pte,t1 1628 1616
+2 -2
arch/parisc/kernel/head.S
··· 69 69 stw,ma %arg2,4(%r1) 70 70 stw,ma %arg3,4(%r1) 71 71 72 - /* Initialize startup VM. Just map first 8/16 MB of memory */ 72 + /* Initialize startup VM. Just map first 16/32 MB of memory */ 73 73 load32 PA(swapper_pg_dir),%r4 74 74 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 75 75 mtctl %r4,%cr25 /* Initialize user root pointer */ ··· 107 107 /* Now initialize the PTEs themselves. We use RWX for 108 108 * everything ... it will get remapped correctly later */ 109 109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ 110 - ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 + load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 111 111 load32 PA(pg0),%r1 112 112 113 113 $pgt_fill_loop:
+16 -16
arch/parisc/kernel/module.c
··· 42 42 * We are not doing SEGREL32 handling correctly. According to the ABI, we 43 43 * should do a value offset, like this: 44 44 * if (in_init(me, (void *)val)) 45 - * val -= (uint32_t)me->module_init; 45 + * val -= (uint32_t)me->init_layout.base; 46 46 * else 47 - * val -= (uint32_t)me->module_core; 47 + * val -= (uint32_t)me->core_layout.base; 48 48 * However, SEGREL32 is used only for PARISC unwind entries, and we want 49 49 * those entries to have an absolute address, and not just an offset. 50 50 * ··· 100 100 * or init pieces the location is */ 101 101 static inline int in_init(struct module *me, void *loc) 102 102 { 103 - return (loc >= me->module_init && 104 - loc <= (me->module_init + me->init_size)); 103 + return (loc >= me->init_layout.base && 104 + loc <= (me->init_layout.base + me->init_layout.size)); 105 105 } 106 106 107 107 static inline int in_core(struct module *me, void *loc) 108 108 { 109 - return (loc >= me->module_core && 110 - loc <= (me->module_core + me->core_size)); 109 + return (loc >= me->core_layout.base && 110 + loc <= (me->core_layout.base + me->core_layout.size)); 111 111 } 112 112 113 113 static inline int in_local(struct module *me, void *loc) ··· 367 367 } 368 368 369 369 /* align things a bit */ 370 - me->core_size = ALIGN(me->core_size, 16); 371 - me->arch.got_offset = me->core_size; 372 - me->core_size += gots * sizeof(struct got_entry); 370 + me->core_layout.size = ALIGN(me->core_layout.size, 16); 371 + me->arch.got_offset = me->core_layout.size; 372 + me->core_layout.size += gots * sizeof(struct got_entry); 373 373 374 - me->core_size = ALIGN(me->core_size, 16); 375 - me->arch.fdesc_offset = me->core_size; 376 - me->core_size += fdescs * sizeof(Elf_Fdesc); 374 + me->core_layout.size = ALIGN(me->core_layout.size, 16); 375 + me->arch.fdesc_offset = me->core_layout.size; 376 + me->core_layout.size += fdescs * sizeof(Elf_Fdesc); 377 377 378 378 me->arch.got_max = gots; 379 379 me->arch.fdesc_max = fdescs; ··· 391 391 392 392 BUG_ON(value == 0); 393 393 394 - got = me->module_core + me->arch.got_offset; 394 + got = me->core_layout.base + me->arch.got_offset; 395 395 for (i = 0; got[i].addr; i++) 396 396 if (got[i].addr == value) 397 397 goto out; ··· 409 409 #ifdef CONFIG_64BIT 410 410 static Elf_Addr get_fdesc(struct module *me, unsigned long value) 411 411 { 412 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; 412 + Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; 413 413 414 414 if (!value) { 415 415 printk(KERN_ERR "%s: zero OPD requested!\n", me->name); ··· 427 427 428 428 /* Create new one */ 429 429 fdesc->addr = value; 430 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; 430 + fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; 431 431 return (Elf_Addr)fdesc; 432 432 } 433 433 #endif /* CONFIG_64BIT */ ··· 839 839 840 840 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; 841 841 end = table + sechdrs[me->arch.unwind_section].sh_size; 842 - gp = (Elf_Addr)me->module_core + me->arch.got_offset; 842 + gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; 843 843 844 844 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", 845 845 me->arch.unwind_section, table, end, gp);
+13 -1
arch/parisc/kernel/setup.c
··· 130 130 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 131 131 #endif 132 132 133 - printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); 133 + printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ", 134 + (int)(PAGE_SIZE / 1024)); 135 + #ifdef CONFIG_HUGETLB_PAGE 136 + printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size", 137 + 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20)); 138 + #else 139 + printk(KERN_CONT "disabled"); 140 + #endif 141 + printk(KERN_CONT ".\n"); 142 + 134 143 135 144 pdc_console_init(); 136 145 ··· 386 377 void start_parisc(void) 387 378 { 388 379 extern void start_kernel(void); 380 + extern void early_trap_init(void); 389 381 390 382 int ret, cpunum; 391 383 struct pdc_coproc_cfg coproc_cfg; ··· 406 396 } else { 407 397 panic("must have an fpu to boot linux"); 408 398 } 399 + 400 + early_trap_init(); /* initialize checksum of fault_vector */ 409 401 410 402 start_kernel(); 411 403 // not reached
+2 -2
arch/parisc/kernel/syscall.S
··· 369 369 ldo -16(%r30),%r29 /* Reference param save area */ 370 370 #endif 371 371 ldo TASK_REGS(%r1),%r26 372 - bl do_syscall_trace_exit,%r2 372 + BL do_syscall_trace_exit,%r2 373 373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 374 374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 375 375 LDREG TI_TASK(%r1), %r1 ··· 390 390 #ifdef CONFIG_64BIT 391 391 ldo -16(%r30),%r29 /* Reference param save area */ 392 392 #endif 393 - bl do_syscall_trace_exit,%r2 393 + BL do_syscall_trace_exit,%r2 394 394 ldo TASK_REGS(%r1),%r26 395 395 396 396 ldil L%syscall_exit_rfi,%r1
+15 -20
arch/parisc/kernel/traps.c
··· 807 807 } 808 808 809 809 810 - int __init check_ivt(void *iva) 810 + void __init initialize_ivt(const void *iva) 811 811 { 812 812 extern u32 os_hpmc_size; 813 813 extern const u32 os_hpmc[]; ··· 818 818 u32 *hpmcp; 819 819 u32 length; 820 820 821 - if (strcmp((char *)iva, "cows can fly")) 822 - return -1; 821 + if (strcmp((const char *)iva, "cows can fly")) 822 + panic("IVT invalid"); 823 823 824 824 ivap = (u32 *)iva; 825 825 ··· 839 839 check += ivap[i]; 840 840 841 841 ivap[5] = -check; 842 - 843 - return 0; 844 842 } 845 843 844 + 845 + /* early_trap_init() is called before we set up kernel mappings and 846 + * write-protect the kernel */ 847 + void __init early_trap_init(void) 848 + { 849 + extern const void fault_vector_20; 850 + 846 851 #ifndef CONFIG_64BIT 847 - extern const void fault_vector_11; 852 + extern const void fault_vector_11; 853 + initialize_ivt(&fault_vector_11); 848 854 #endif 849 - extern const void fault_vector_20; 855 + 856 + initialize_ivt(&fault_vector_20); 857 + } 850 858 851 859 void __init trap_init(void) 852 860 { 853 - void *iva; 854 - 855 - if (boot_cpu_data.cpu_type >= pcxu) 856 - iva = (void *) &fault_vector_20; 857 - else 858 - #ifdef CONFIG_64BIT 859 - panic("Can't boot 64-bit OS on PA1.1 processor!"); 860 - #else 861 - iva = (void *) &fault_vector_11; 862 - #endif 863 - 864 - if (check_ivt(iva)) 865 - panic("IVT invalid"); 866 861 }
+6 -3
arch/parisc/kernel/vmlinux.lds.S
··· 60 60 EXIT_DATA 61 61 } 62 62 PERCPU_SECTION(8) 63 - . = ALIGN(PAGE_SIZE); 63 + . = ALIGN(HUGEPAGE_SIZE); 64 64 __init_end = .; 65 65 /* freed after init ends here */ 66 66 ··· 116 116 * that we can properly leave these 117 117 * as writable 118 118 */ 119 - . = ALIGN(PAGE_SIZE); 119 + . = ALIGN(HUGEPAGE_SIZE); 120 120 data_start = .; 121 121 122 122 EXCEPTION_TABLE(8) ··· 135 135 _edata = .; 136 136 137 137 /* BSS */ 138 - BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) 138 + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) 139 139 140 + /* bootmap is allocated in setup_bootmem() directly behind bss. */ 141 + 142 + . = ALIGN(HUGEPAGE_SIZE); 140 143 _end = . ; 141 144 142 145 STABS_DEBUG
+1
arch/parisc/mm/Makefile
··· 3 3 # 4 4 5 5 obj-y := init.o fault.o ioremap.o 6 + obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+161
arch/parisc/mm/hugetlbpage.c
··· 1 + /* 2 + * PARISC64 Huge TLB page support. 3 + * 4 + * This parisc implementation is heavily based on the SPARC and x86 code. 5 + * 6 + * Copyright (C) 2015 Helge Deller <deller@gmx.de> 7 + */ 8 + 9 + #include <linux/fs.h> 10 + #include <linux/mm.h> 11 + #include <linux/hugetlb.h> 12 + #include <linux/pagemap.h> 13 + #include <linux/sysctl.h> 14 + 15 + #include <asm/mman.h> 16 + #include <asm/pgalloc.h> 17 + #include <asm/tlb.h> 18 + #include <asm/tlbflush.h> 19 + #include <asm/cacheflush.h> 20 + #include <asm/mmu_context.h> 21 + 22 + 23 + unsigned long 24 + hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 25 + unsigned long len, unsigned long pgoff, unsigned long flags) 26 + { 27 + struct hstate *h = hstate_file(file); 28 + 29 + if (len & ~huge_page_mask(h)) 30 + return -EINVAL; 31 + if (len > TASK_SIZE) 32 + return -ENOMEM; 33 + 34 + if (flags & MAP_FIXED) 35 + if (prepare_hugepage_range(file, addr, len)) 36 + return -EINVAL; 37 + 38 + if (addr) 39 + addr = ALIGN(addr, huge_page_size(h)); 40 + 41 + /* we need to make sure the colouring is OK */ 42 + return arch_get_unmapped_area(file, addr, len, pgoff, flags); 43 + } 44 + 45 + 46 + pte_t *huge_pte_alloc(struct mm_struct *mm, 47 + unsigned long addr, unsigned long sz) 48 + { 49 + pgd_t *pgd; 50 + pud_t *pud; 51 + pmd_t *pmd; 52 + pte_t *pte = NULL; 53 + 54 + /* We must align the address, because our caller will run 55 + * set_huge_pte_at() on whatever we return, which writes out 56 + * all of the sub-ptes for the hugepage range. So we have 57 + * to give it the first such sub-pte. 58 + */ 59 + addr &= HPAGE_MASK; 60 + 61 + pgd = pgd_offset(mm, addr); 62 + pud = pud_alloc(mm, pgd, addr); 63 + if (pud) { 64 + pmd = pmd_alloc(mm, pud, addr); 65 + if (pmd) 66 + pte = pte_alloc_map(mm, NULL, pmd, addr); 67 + } 68 + return pte; 69 + } 70 + 71 + pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 72 + { 73 + pgd_t *pgd; 74 + pud_t *pud; 75 + pmd_t *pmd; 76 + pte_t *pte = NULL; 77 + 78 + addr &= HPAGE_MASK; 79 + 80 + pgd = pgd_offset(mm, addr); 81 + if (!pgd_none(*pgd)) { 82 + pud = pud_offset(pgd, addr); 83 + if (!pud_none(*pud)) { 84 + pmd = pmd_offset(pud, addr); 85 + if (!pmd_none(*pmd)) 86 + pte = pte_offset_map(pmd, addr); 87 + } 88 + } 89 + return pte; 90 + } 91 + 92 + /* Purge data and instruction TLB entries. Must be called holding 93 + * the pa_tlb_lock. The TLB purge instructions are slow on SMP 94 + * machines since the purge must be broadcast to all CPUs. 95 + */ 96 + static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) 97 + { 98 + int i; 99 + 100 + /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate 101 + * Linux standard huge pages (e.g. 2 MB) */ 102 + BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); 103 + 104 + addr &= HPAGE_MASK; 105 + addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; 106 + 107 + for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { 108 + mtsp(mm->context, 1); 109 + pdtlb(addr); 110 + if (unlikely(split_tlb)) 111 + pitlb(addr); 112 + addr += (1UL << REAL_HPAGE_SHIFT); 113 + } 114 + } 115 + 116 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 117 + pte_t *ptep, pte_t entry) 118 + { 119 + unsigned long addr_start; 120 + int i; 121 + 122 + addr &= HPAGE_MASK; 123 + addr_start = addr; 124 + 125 + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 126 + /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry) 127 + * instead, but then we get double locking on pa_tlb_lock. */ 128 + *ptep = entry; 129 + ptep++; 130 + 131 + /* Drop the PAGE_SIZE/non-huge tlb entry */ 132 + purge_tlb_entries(mm, addr); 133 + 134 + addr += PAGE_SIZE; 135 + pte_val(entry) += PAGE_SIZE; 136 + } 137 + 138 + purge_tlb_entries_huge(mm, addr_start); 139 + } 140 + 141 + 142 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 143 + pte_t *ptep) 144 + { 145 + pte_t entry; 146 + 147 + entry = *ptep; 148 + set_huge_pte_at(mm, addr, ptep, __pte(0)); 149 + 150 + return entry; 151 + } 152 + 153 + int pmd_huge(pmd_t pmd) 154 + { 155 + return 0; 156 + } 157 + 158 + int pud_huge(pud_t pud) 159 + { 160 + return 0; 161 + }
+17 -23
arch/parisc/mm/init.c
··· 409 409 unsigned long vaddr; 410 410 unsigned long ro_start; 411 411 unsigned long ro_end; 412 - unsigned long fv_addr; 413 - unsigned long gw_addr; 414 - extern const unsigned long fault_vector_20; 415 - extern void * const linux_gateway_page; 412 + unsigned long kernel_end; 416 413 417 414 ro_start = __pa((unsigned long)_text); 418 415 ro_end = __pa((unsigned long)&data_start); 419 - fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 420 - gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 416 + kernel_end = __pa((unsigned long)&_end); 421 417 422 418 end_paddr = start_paddr + size; 423 419 ··· 471 475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 472 476 pte_t pte; 473 477 474 - /* 475 - * Map the fault vector writable so we can 476 - * write the HPMC checksum. 477 - */ 478 478 if (force) 479 479 pte = __mk_pte(address, pgprot); 480 - else if (parisc_text_address(vaddr) && 481 - address != fv_addr) 480 + else if (parisc_text_address(vaddr)) { 482 481 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 482 + if (address >= ro_start && address < kernel_end) 483 + pte = pte_mkhuge(pte); 484 + } 483 485 else 484 486 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 485 - if (address >= ro_start && address < ro_end 486 - && address != fv_addr 487 - && address != gw_addr) 488 - pte = __mk_pte(address, PAGE_KERNEL_RO); 489 - else 487 + if (address >= ro_start && address < ro_end) { 488 + pte = __mk_pte(address, PAGE_KERNEL_EXEC); 489 + pte = pte_mkhuge(pte); 490 + } else 490 491 #endif 492 + { 491 493 pte = __mk_pte(address, pgprot); 494 + if (address >= ro_start && address < kernel_end) 495 + pte = pte_mkhuge(pte); 496 + } 492 497 493 498 if (address >= end_paddr) { 494 499 if (force) ··· 533 536 534 537 /* force the kernel to see the new TLB entries */ 535 538 __flush_tlb_range(0, init_begin, init_end); 536 - /* Attempt to catch anyone trying to execute code here 537 - * by filling the page with BRK insns. 538 - */ 539 - memset((void *)init_begin, 0x00, init_end - init_begin); 539 + 540 540 /* finally dump all the instructions which were cached, since the 541 541 * pages are no-longer executable */ 542 542 flush_icache_range(init_begin, init_end); 543 543 544 - free_initmem_default(-1); 544 + free_initmem_default(POISON_FREE_INITMEM); 545 545 546 546 /* set up a new led state on systems shipped LED State panel */ 547 547 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); ··· 722 728 unsigned long size; 723 729 724 730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 725 - end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); 726 731 size = pmem_ranges[range].pages << PAGE_SHIFT; 732 + end_paddr = start_paddr + size; 727 733 728 734 map_pages((unsigned long)__va(start_paddr), start_paddr, 729 735 size, PAGE_KERNEL, 0);
+1
arch/powerpc/include/asm/systbl.h
··· 382 382 SYSCALL(shmdt) 383 383 SYSCALL(shmget) 384 384 COMPAT_SYS(shmctl) 385 + SYSCALL(mlock2)
+1 -1
arch/powerpc/include/asm/unistd.h
··· 12 12 #include <uapi/asm/unistd.h> 13 13 14 14 15 - #define __NR_syscalls 378 15 + #define __NR_syscalls 379 16 16 17 17 #define __NR__exit __NR_exit 18 18 #define NR_syscalls __NR_syscalls
+1
arch/powerpc/include/uapi/asm/unistd.h
··· 400 400 #define __NR_shmdt 375 401 401 #define __NR_shmget 376 402 402 #define __NR_shmctl 377 403 + #define __NR_mlock2 378 403 404 404 405 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+3 -3
arch/powerpc/kernel/module_32.c
··· 188 188 189 189 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); 190 190 /* Init, or core PLT? */ 191 - if (location >= mod->module_core 192 - && location < mod->module_core + mod->core_size) 191 + if (location >= mod->core_layout.base 192 + && location < mod->core_layout.base + mod->core_layout.size) 193 193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; 194 194 else 195 195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; ··· 296 296 } 297 297 #ifdef CONFIG_DYNAMIC_FTRACE 298 298 module->arch.tramp = 299 - do_plt_call(module->module_core, 299 + do_plt_call(module->core_layout.base, 300 300 (unsigned long)ftrace_caller, 301 301 sechdrs, module); 302 302 #endif
+11 -11
arch/s390/kernel/module.c
··· 159 159 160 160 /* Increase core size by size of got & plt and set start 161 161 offsets for got and plt. */ 162 - me->core_size = ALIGN(me->core_size, 4); 163 - me->arch.got_offset = me->core_size; 164 - me->core_size += me->arch.got_size; 165 - me->arch.plt_offset = me->core_size; 166 - me->core_size += me->arch.plt_size; 162 + me->core_layout.size = ALIGN(me->core_layout.size, 4); 163 + me->arch.got_offset = me->core_layout.size; 164 + me->core_layout.size += me->arch.got_size; 165 + me->arch.plt_offset = me->core_layout.size; 166 + me->core_layout.size += me->arch.plt_size; 167 167 return 0; 168 168 } 169 169 ··· 279 279 if (info->got_initialized == 0) { 280 280 Elf_Addr *gotent; 281 281 282 - gotent = me->module_core + me->arch.got_offset + 282 + gotent = me->core_layout.base + me->arch.got_offset + 283 283 info->got_offset; 284 284 *gotent = val; 285 285 info->got_initialized = 1; ··· 302 302 rc = apply_rela_bits(loc, val, 0, 64, 0); 303 303 else if (r_type == R_390_GOTENT || 304 304 r_type == R_390_GOTPLTENT) { 305 - val += (Elf_Addr) me->module_core - loc; 305 + val += (Elf_Addr) me->core_layout.base - loc; 306 306 rc = apply_rela_bits(loc, val, 1, 32, 1); 307 307 } 308 308 break; ··· 315 315 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ 316 316 if (info->plt_initialized == 0) { 317 317 unsigned int *ip; 318 - ip = me->module_core + me->arch.plt_offset + 318 + ip = me->core_layout.base + me->arch.plt_offset + 319 319 info->plt_offset; 320 320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 321 321 ip[1] = 0x100a0004; ··· 334 334 val - loc + 0xffffUL < 0x1ffffeUL) || 335 335 (r_type == R_390_PLT32DBL && 336 336 val - loc + 0xffffffffULL < 0x1fffffffeULL))) 337 - val = (Elf_Addr) me->module_core + 337 + val = (Elf_Addr) me->core_layout.base + 338 338 me->arch.plt_offset + 339 339 info->plt_offset; 340 340 val += rela->r_addend - loc; ··· 356 356 case R_390_GOTOFF32: /* 32 bit offset to GOT. */ 357 357 case R_390_GOTOFF64: /* 64 bit offset to GOT. */ 358 358 val = val + rela->r_addend - 359 - ((Elf_Addr) me->module_core + me->arch.got_offset); 359 + ((Elf_Addr) me->core_layout.base + me->arch.got_offset); 360 360 if (r_type == R_390_GOTOFF16) 361 361 rc = apply_rela_bits(loc, val, 0, 16, 0); 362 362 else if (r_type == R_390_GOTOFF32) ··· 366 366 break; 367 367 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ 368 368 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ 369 - val = (Elf_Addr) me->module_core + me->arch.got_offset + 369 + val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + 370 370 rela->r_addend - loc; 371 371 if (r_type == R_390_GOTPC) 372 372 rc = apply_rela_bits(loc, val, 1, 32, 0);
+5 -2
arch/s390/kvm/interrupt.c
··· 1030 1030 src_id, 0); 1031 1031 1032 1032 /* sending vcpu invalid */ 1033 - if (src_id >= KVM_MAX_VCPUS || 1034 - kvm_get_vcpu(vcpu->kvm, src_id) == NULL) 1033 + if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) 1035 1034 return -EINVAL; 1036 1035 1037 1036 if (sclp.has_sigpif) ··· 1108 1109 irq->u.emerg.code); 1109 1110 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1110 1111 irq->u.emerg.code, 0); 1112 + 1113 + /* sending vcpu invalid */ 1114 + if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) 1115 + return -EINVAL; 1111 1116 1112 1117 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1113 1118 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+5 -1
arch/s390/kvm/kvm-s390.c
··· 342 342 r = 0; 343 343 break; 344 344 case KVM_CAP_S390_VECTOR_REGISTERS: 345 - if (MACHINE_HAS_VX) { 345 + mutex_lock(&kvm->lock); 346 + if (atomic_read(&kvm->online_vcpus)) { 347 + r = -EBUSY; 348 + } else if (MACHINE_HAS_VX) { 346 349 set_kvm_facility(kvm->arch.model.fac->mask, 129); 347 350 set_kvm_facility(kvm->arch.model.fac->list, 129); 348 351 r = 0; 349 352 } else 350 353 r = -EINVAL; 354 + mutex_unlock(&kvm->lock); 351 355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", 352 356 r ? "(not available)" : "(success)"); 353 357 break;
+1 -1
arch/s390/kvm/priv.c
··· 660 660 661 661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 662 662 663 - if (!MACHINE_HAS_PFMF) 663 + if (!test_kvm_facility(vcpu->kvm, 8)) 664 664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 665 665 666 666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+2 -6
arch/s390/kvm/sigp.c
··· 291 291 u16 cpu_addr, u32 parameter, u64 *status_reg) 292 292 { 293 293 int rc; 294 - struct kvm_vcpu *dst_vcpu; 294 + struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 295 295 296 - if (cpu_addr >= KVM_MAX_VCPUS) 297 - return SIGP_CC_NOT_OPERATIONAL; 298 - 299 - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 300 296 if (!dst_vcpu) 301 297 return SIGP_CC_NOT_OPERATIONAL; 302 298 ··· 474 478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 475 479 476 480 if (order_code == SIGP_EXTERNAL_CALL) { 477 - dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 481 + dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 478 482 BUG_ON(dest_vcpu == NULL); 479 483 480 484 kvm_s390_vcpu_wakeup(dest_vcpu);
+1 -2
arch/x86/include/asm/msr-index.h
··· 35 35 #define MSR_IA32_PERFCTR0 0x000000c1 36 36 #define MSR_IA32_PERFCTR1 0x000000c2 37 37 #define MSR_FSB_FREQ 0x000000cd 38 - #define MSR_NHM_PLATFORM_INFO 0x000000ce 38 + #define MSR_PLATFORM_INFO 0x000000ce 39 39 40 40 #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 41 41 #define NHM_C3_AUTO_DEMOTE (1UL << 25) ··· 44 44 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 45 45 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 46 46 47 - #define MSR_PLATFORM_INFO 0x000000ce 48 47 #define MSR_MTRRcap 0x000000fe 49 48 #define MSR_IA32_BBL_CR_CTL 0x00000119 50 49 #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+1 -2
arch/x86/kernel/cpu/common.c
··· 273 273 274 274 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 275 275 { 276 - unsigned long eflags; 276 + unsigned long eflags = native_save_fl(); 277 277 278 278 /* This should have been cleared long ago */ 279 - raw_local_save_flags(eflags); 280 279 BUG_ON(eflags & X86_EFLAGS_AC); 281 280 282 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
+5 -6
arch/x86/kernel/fpu/signal.c
··· 385 385 */ 386 386 void fpu__init_prepare_fx_sw_frame(void) 387 387 { 388 - int fsave_header_size = sizeof(struct fregs_state); 389 388 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 390 - 391 - if (config_enabled(CONFIG_X86_32)) 392 - size += fsave_header_size; 393 389 394 390 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 395 391 fx_sw_reserved.extended_size = size; 396 392 fx_sw_reserved.xfeatures = xfeatures_mask; 397 393 fx_sw_reserved.xstate_size = xstate_size; 398 394 399 - if (config_enabled(CONFIG_IA32_EMULATION)) { 395 + if (config_enabled(CONFIG_IA32_EMULATION) || 396 + config_enabled(CONFIG_X86_32)) { 397 + int fsave_header_size = sizeof(struct fregs_state); 398 + 400 399 fx_sw_reserved_ia32 = fx_sw_reserved; 401 - fx_sw_reserved_ia32.extended_size += fsave_header_size; 400 + fx_sw_reserved_ia32.extended_size = size + fsave_header_size; 402 401 } 403 402 } 404 403
-1
arch/x86/kernel/fpu/xstate.c
··· 694 694 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 695 695 return NULL; 696 696 697 - xsave = &current->thread.fpu.state.xsave; 698 697 /* 699 698 * We should not ever be requesting features that we 700 699 * have not enabled. Remember that pcntxt_mask is
+3 -3
arch/x86/kernel/livepatch.c
··· 41 41 int ret, numpages, size = 4; 42 42 bool readonly; 43 43 unsigned long val; 44 - unsigned long core = (unsigned long)mod->module_core; 45 - unsigned long core_size = mod->core_size; 44 + unsigned long core = (unsigned long)mod->core_layout.base; 45 + unsigned long core_size = mod->core_layout.size; 46 46 47 47 switch (type) { 48 48 case R_X86_64_NONE: ··· 72 72 readonly = false; 73 73 74 74 #ifdef CONFIG_DEBUG_SET_MODULE_RONX 75 - if (loc < core + mod->core_ro_size) 75 + if (loc < core + mod->core_layout.ro_size) 76 76 readonly = true; 77 77 #endif 78 78
+6
arch/x86/kernel/mcount_64.S
··· 278 278 /* save_mcount_regs fills in first two parameters */ 279 279 save_mcount_regs 280 280 281 + /* 282 + * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not 283 + * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the 284 + * ip and parent ip are used and the list function is called when 285 + * function tracing is enabled. 286 + */ 281 287 call *ftrace_trace_function 282 288 283 289 restore_mcount_regs
-5
arch/x86/kvm/vmx.c
··· 7394 7394 7395 7395 switch (type) { 7396 7396 case VMX_VPID_EXTENT_ALL_CONTEXT: 7397 - if (get_vmcs12(vcpu)->virtual_processor_id == 0) { 7398 - nested_vmx_failValid(vcpu, 7399 - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7400 - return 1; 7401 - } 7402 7397 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7403 7398 nested_vmx_succeed(vcpu); 7404 7399 break;
+32 -29
arch/x86/kvm/x86.c
··· 2763 2763 return 0; 2764 2764 } 2765 2765 2766 + static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 2767 + { 2768 + return (!lapic_in_kernel(vcpu) || 2769 + kvm_apic_accept_pic_intr(vcpu)); 2770 + } 2771 + 2772 + /* 2773 + * if userspace requested an interrupt window, check that the 2774 + * interrupt window is open. 2775 + * 2776 + * No need to exit to userspace if we already have an interrupt queued. 2777 + */ 2778 + static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 2779 + { 2780 + return kvm_arch_interrupt_allowed(vcpu) && 2781 + !kvm_cpu_has_interrupt(vcpu) && 2782 + !kvm_event_needs_reinjection(vcpu) && 2783 + kvm_cpu_accept_dm_intr(vcpu); 2784 + } 2785 + 2766 2786 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2767 2787 struct kvm_interrupt *irq) 2768 2788 { ··· 2806 2786 return -EEXIST; 2807 2787 2808 2788 vcpu->arch.pending_external_vector = irq->irq; 2789 + kvm_make_request(KVM_REQ_EVENT, vcpu); 2809 2790 return 0; 2810 2791 } 2811 2792 ··· 5931 5910 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5932 5911 } 5933 5912 5934 - /* 5935 - * Check if userspace requested an interrupt window, and that the 5936 - * interrupt window is open. 5937 - * 5938 - * No need to exit to userspace if we already have an interrupt queued. 5939 - */ 5940 5913 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5941 5914 { 5942 - if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) 5943 - return false; 5944 - 5945 - if (kvm_cpu_has_interrupt(vcpu)) 5946 - return false; 5947 - 5948 - return (irqchip_split(vcpu->kvm) 5949 - ? kvm_apic_accept_pic_intr(vcpu) 5950 - : kvm_arch_interrupt_allowed(vcpu)); 5915 + return vcpu->run->request_interrupt_window && 5916 + likely(!pic_in_kernel(vcpu->kvm)); 5951 5917 } 5952 5918 5953 5919 static void post_kvm_run_save(struct kvm_vcpu *vcpu) ··· 5945 5937 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 5946 5938 kvm_run->cr8 = kvm_get_cr8(vcpu); 5947 5939 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5948 - if (!irqchip_in_kernel(vcpu->kvm)) 5949 - kvm_run->ready_for_interrupt_injection = 5950 - kvm_arch_interrupt_allowed(vcpu) && 5951 - !kvm_cpu_has_interrupt(vcpu) && 5952 - !kvm_event_needs_reinjection(vcpu); 5953 - else if (!pic_in_kernel(vcpu->kvm)) 5954 - kvm_run->ready_for_interrupt_injection = 5955 - kvm_apic_accept_pic_intr(vcpu) && 5956 - !kvm_cpu_has_interrupt(vcpu); 5957 - else 5958 - kvm_run->ready_for_interrupt_injection = 1; 5940 + kvm_run->ready_for_interrupt_injection = 5941 + pic_in_kernel(vcpu->kvm) || 5942 + kvm_vcpu_ready_for_interrupt_injection(vcpu); 5959 5943 } 5960 5944 5961 5945 static void update_cr8_intercept(struct kvm_vcpu *vcpu) ··· 6360 6360 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6361 6361 { 6362 6362 int r; 6363 - bool req_int_win = !lapic_in_kernel(vcpu) && 6364 - vcpu->run->request_interrupt_window; 6363 + bool req_int_win = 6364 + dm_request_for_irq_injection(vcpu) && 6365 + kvm_cpu_accept_dm_intr(vcpu); 6366 + 6365 6367 bool req_immediate_exit = false; 6366 6368 6367 6369 if (vcpu->requests) { ··· 6665 6663 if (kvm_cpu_has_pending_timer(vcpu)) 6666 6664 kvm_inject_pending_timer_irqs(vcpu); 6667 6665 6668 - if (dm_request_for_irq_injection(vcpu)) { 6666 + if (dm_request_for_irq_injection(vcpu) && 6667 + kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 6669 6668 r = 0; 6670 6669 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 6671 6670 ++vcpu->stat.request_irq_exits;
+41 -6
arch/x86/mm/mpx.c
··· 586 586 } 587 587 588 588 /* 589 + * We only want to do a 4-byte get_user() on 32-bit. Otherwise, 590 + * we might run off the end of the bounds table if we are on 591 + * a 64-bit kernel and try to get 8 bytes. 592 + */ 593 + int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, 594 + long __user *bd_entry_ptr) 595 + { 596 + u32 bd_entry_32; 597 + int ret; 598 + 599 + if (is_64bit_mm(mm)) 600 + return get_user(*bd_entry_ret, bd_entry_ptr); 601 + 602 + /* 603 + * Note that get_user() uses the type of the *pointer* to 604 + * establish the size of the get, not the destination. 605 + */ 606 + ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr); 607 + *bd_entry_ret = bd_entry_32; 608 + return ret; 609 + } 610 + 611 + /* 589 612 * Get the base of bounds tables pointed by specific bounds 590 613 * directory entry. 591 614 */ ··· 628 605 int need_write = 0; 629 606 630 607 pagefault_disable(); 631 - ret = get_user(bd_entry, bd_entry_ptr); 608 + ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr); 632 609 pagefault_enable(); 633 610 if (!ret) 634 611 break; ··· 723 700 */ 724 701 static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 725 702 { 726 - unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 727 - if (is_64bit_mm(mm)) 728 - return virt_space / MPX_BD_NR_ENTRIES_64; 729 - else 730 - return virt_space / MPX_BD_NR_ENTRIES_32; 703 + unsigned long long virt_space; 704 + unsigned long long GB = (1ULL << 30); 705 + 706 + /* 707 + * This covers 32-bit emulation as well as 32-bit kernels 708 + * running on 64-bit harware. 709 + */ 710 + if (!is_64bit_mm(mm)) 711 + return (4ULL * GB) / MPX_BD_NR_ENTRIES_32; 712 + 713 + /* 714 + * 'x86_virt_bits' returns what the hardware is capable 715 + * of, and returns the full >32-bit adddress space when 716 + * running 32-bit kernels on 64-bit hardware. 717 + */ 718 + virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 719 + return virt_space / MPX_BD_NR_ENTRIES_64; 731 720 } 732 721 733 722 /*
+27 -5
block/blk-merge.c
··· 76 76 struct bio_vec bv, bvprv, *bvprvp = NULL; 77 77 struct bvec_iter iter; 78 78 unsigned seg_size = 0, nsegs = 0, sectors = 0; 79 + unsigned front_seg_size = bio->bi_seg_front_size; 80 + bool do_split = true; 81 + struct bio *new = NULL; 79 82 80 83 bio_for_each_segment(bv, bio, iter) { 81 84 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) ··· 101 98 102 99 seg_size += bv.bv_len; 103 100 bvprv = bv; 104 - bvprvp = &bv; 101 + bvprvp = &bvprv; 105 102 sectors += bv.bv_len >> 9; 106 103 continue; 107 104 } ··· 111 108 112 109 nsegs++; 113 110 bvprv = bv; 114 - bvprvp = &bv; 111 + bvprvp = &bvprv; 115 112 seg_size = bv.bv_len; 116 113 sectors += bv.bv_len >> 9; 114 + 115 + if (nsegs == 1 && seg_size > front_seg_size) 116 + front_seg_size = seg_size; 117 117 } 118 118 119 - *segs = nsegs; 120 - return NULL; 119 + do_split = false; 121 120 split: 122 121 *segs = nsegs; 123 - return bio_split(bio, sectors, GFP_NOIO, bs); 122 + 123 + if (do_split) { 124 + new = bio_split(bio, sectors, GFP_NOIO, bs); 125 + if (new) 126 + bio = new; 127 + } 128 + 129 + bio->bi_seg_front_size = front_seg_size; 130 + if (seg_size > bio->bi_seg_back_size) 131 + bio->bi_seg_back_size = seg_size; 132 + 133 + return do_split ? new : NULL; 124 134 } 125 135 126 136 void blk_queue_split(struct request_queue *q, struct bio **bio, ··· 427 411 428 412 if (sg) 429 413 sg_mark_end(sg); 414 + 415 + /* 416 + * Something must have been wrong if the figured number of 417 + * segment is bigger than number of req's physical segments 418 + */ 419 + WARN_ON(nsegs > rq->nr_phys_segments); 430 420 431 421 return nsegs; 432 422 }
+9 -5
block/blk-mq.c
··· 1291 1291 blk_mq_bio_to_request(rq, bio); 1292 1292 1293 1293 /* 1294 - * we do limited pluging. If bio can be merged, do merge. 1294 + * We do limited pluging. If the bio can be merged, do that. 1295 1295 * Otherwise the existing request in the plug list will be 1296 1296 * issued. So the plug list will have one request at most 1297 1297 */ 1298 1298 if (plug) { 1299 1299 /* 1300 1300 * The plug list might get flushed before this. If that 1301 - * happens, same_queue_rq is invalid and plug list is empty 1302 - **/ 1301 + * happens, same_queue_rq is invalid and plug list is 1302 + * empty 1303 + */ 1303 1304 if (same_queue_rq && !list_empty(&plug->mq_list)) { 1304 1305 old_rq = same_queue_rq; 1305 1306 list_del_init(&old_rq->queuelist); ··· 1381 1380 blk_mq_bio_to_request(rq, bio); 1382 1381 if (!request_count) 1383 1382 trace_block_plug(q); 1384 - else if (request_count >= BLK_MAX_REQUEST_COUNT) { 1383 + 1384 + blk_mq_put_ctx(data.ctx); 1385 + 1386 + if (request_count >= BLK_MAX_REQUEST_COUNT) { 1385 1387 blk_flush_plug_list(plug, false); 1386 1388 trace_block_plug(q); 1387 1389 } 1390 + 1388 1391 list_add_tail(&rq->queuelist, &plug->mq_list); 1389 - blk_mq_put_ctx(data.ctx); 1390 1392 return cookie; 1391 1393 } 1392 1394
+5 -3
block/blk-timeout.c
··· 158 158 { 159 159 if (blk_mark_rq_complete(req)) 160 160 return; 161 - blk_delete_timer(req); 162 - if (req->q->mq_ops) 161 + 162 + if (req->q->mq_ops) { 163 163 blk_mq_rq_timed_out(req, false); 164 - else 164 + } else { 165 + blk_delete_timer(req); 165 166 blk_rq_timed_out(req); 167 + } 166 168 } 167 169 EXPORT_SYMBOL_GPL(blk_abort_request); 168 170
-2
block/blk.h
··· 72 72 void __blk_queue_free_tags(struct request_queue *q); 73 73 bool __blk_end_bidi_request(struct request *rq, int error, 74 74 unsigned int nr_bytes, unsigned int bidi_bytes); 75 - int blk_queue_enter(struct request_queue *q, gfp_t gfp); 76 - void blk_queue_exit(struct request_queue *q); 77 75 void blk_freeze_queue(struct request_queue *q); 78 76 79 77 static inline void blk_queue_enter_live(struct request_queue *q)
+5 -5
block/noop-iosched.c
··· 21 21 static int noop_dispatch(struct request_queue *q, int force) 22 22 { 23 23 struct noop_data *nd = q->elevator->elevator_data; 24 + struct request *rq; 24 25 25 - if (!list_empty(&nd->queue)) { 26 - struct request *rq; 27 - rq = list_entry(nd->queue.next, struct request, queuelist); 26 + rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); 27 + if (rq) { 28 28 list_del_init(&rq->queuelist); 29 29 elv_dispatch_sort(q, rq); 30 30 return 1; ··· 46 46 47 47 if (rq->queuelist.prev == &nd->queue) 48 48 return NULL; 49 - return list_entry(rq->queuelist.prev, struct request, queuelist); 49 + return list_prev_entry(rq, queuelist); 50 50 } 51 51 52 52 static struct request * ··· 56 56 57 57 if (rq->queuelist.next == &nd->queue) 58 58 return NULL; 59 - return list_entry(rq->queuelist.next, struct request, queuelist); 59 + return list_next_entry(rq, queuelist); 60 60 } 61 61 62 62 static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
+7 -3
block/partitions/mac.c
··· 32 32 Sector sect; 33 33 unsigned char *data; 34 34 int slot, blocks_in_map; 35 - unsigned secsize; 35 + unsigned secsize, datasize, partoffset; 36 36 #ifdef CONFIG_PPC_PMAC 37 37 int found_root = 0; 38 38 int found_root_goodness = 0; ··· 50 50 } 51 51 secsize = be16_to_cpu(md->block_size); 52 52 put_dev_sector(sect); 53 - data = read_part_sector(state, secsize/512, &sect); 53 + datasize = round_down(secsize, 512); 54 + data = read_part_sector(state, datasize / 512, &sect); 54 55 if (!data) 55 56 return -1; 56 - part = (struct mac_partition *) (data + secsize%512); 57 + partoffset = secsize % 512; 58 + if (partoffset + sizeof(*part) > datasize) 59 + return -1; 60 + part = (struct mac_partition *) (data + partoffset); 57 61 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { 58 62 put_dev_sector(sect); 59 63 return 0; /* not a MacOS disk */
+1 -1
drivers/Makefile
··· 63 63 obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ 64 64 65 65 obj-$(CONFIG_PARPORT) += parport/ 66 + obj-$(CONFIG_NVM) += lightnvm/ 66 67 obj-y += base/ block/ misc/ mfd/ nfc/ 67 68 obj-$(CONFIG_LIBNVDIMM) += nvdimm/ 68 69 obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ ··· 71 70 obj-y += macintosh/ 72 71 obj-$(CONFIG_IDE) += ide/ 73 72 obj-$(CONFIG_SCSI) += scsi/ 74 - obj-$(CONFIG_NVM) += lightnvm/ 75 73 obj-y += nvme/ 76 74 obj-$(CONFIG_ATA) += ata/ 77 75 obj-$(CONFIG_TARGET_CORE) += target/
+1 -1
drivers/acpi/cppc_acpi.c
··· 304 304 305 305 static int register_pcc_channel(int pcc_subspace_idx) 306 306 { 307 - struct acpi_pcct_subspace *cppc_ss; 307 + struct acpi_pcct_hw_reduced *cppc_ss; 308 308 unsigned int len; 309 309 310 310 if (pcc_subspace_idx >= 0) {
+1 -1
drivers/acpi/ec.c
··· 1103 1103 } 1104 1104 1105 1105 err_exit: 1106 - if (result && q) 1106 + if (result) 1107 1107 acpi_ec_delete_query(q); 1108 1108 if (data) 1109 1109 *data = value;
+7 -41
drivers/acpi/sbshc.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/module.h> 16 16 #include <linux/interrupt.h> 17 - #include <linux/dmi.h> 18 17 #include "sbshc.h" 19 18 20 19 #define PREFIX "ACPI: " ··· 29 30 u8 query_bit; 30 31 smbus_alarm_callback callback; 31 32 void *context; 33 + bool done; 32 34 }; 33 35 34 36 static int acpi_smbus_hc_add(struct acpi_device *device); ··· 88 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 89 89 }; 90 90 91 - static bool macbook; 92 - 93 91 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 94 92 { 95 93 return ec_read(hc->offset + address, data); ··· 98 100 return ec_write(hc->offset + address, data); 99 101 } 100 102 101 - static inline int smb_check_done(struct acpi_smb_hc *hc) 102 - { 103 - union acpi_smb_status status = {.raw = 0}; 104 - smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); 105 - return status.fields.done && (status.fields.status == SMBUS_OK); 106 - } 107 - 108 103 static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) 109 104 { 110 - if (wait_event_timeout(hc->wait, smb_check_done(hc), 111 - msecs_to_jiffies(timeout))) 105 + if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout))) 112 106 return 0; 113 - /* 114 - * After the timeout happens, OS will try to check the status of SMbus. 115 - * If the status is what OS expected, it will be regarded as the bogus 116 - * timeout. 117 - */ 118 - if (smb_check_done(hc)) 119 - return 0; 120 - else 121 - return -ETIME; 107 + return -ETIME; 122 108 } 123 109 124 110 static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, ··· 117 135 } 118 136 119 137 mutex_lock(&hc->lock); 120 - if (macbook) 121 - udelay(5); 138 + hc->done = false; 122 139 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 123 140 goto end; 124 141 if (temp) { ··· 216 235 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) 217 236 return 0; 218 237 /* Check if it is only a completion notify */ 219 - if (status.fields.done) 238 + if (status.fields.done && status.fields.status == SMBUS_OK) { 239 + hc->done = true; 220 240 wake_up(&hc->wait); 241 + } 221 242 if (!status.fields.alarm) 222 243 return 0; 223 244 mutex_lock(&hc->lock); ··· 245 262 acpi_handle handle, acpi_ec_query_func func, 246 263 void *data); 247 264 248 - static int macbook_dmi_match(const struct dmi_system_id *d) 249 - { 250 - pr_debug("Detected MacBook, enabling workaround\n"); 251 - macbook = true; 252 - return 0; 253 - } 254 - 255 - static struct dmi_system_id acpi_smbus_dmi_table[] = { 256 - { macbook_dmi_match, "Apple MacBook", { 257 - DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 258 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, 259 - }, 260 - { }, 261 - }; 262 - 263 265 static int acpi_smbus_hc_add(struct acpi_device *device) 264 266 { 265 267 int status; 266 268 unsigned long long val; 267 269 struct acpi_smb_hc *hc; 268 - 269 - dmi_check_system(acpi_smbus_dmi_table); 270 270 271 271 if (!device) 272 272 return -EINVAL;
+6
drivers/base/power/wakeirq.c
··· 68 68 struct wake_irq *wirq; 69 69 int err; 70 70 71 + if (irq < 0) 72 + return -EINVAL; 73 + 71 74 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 72 75 if (!wirq) 73 76 return -ENOMEM; ··· 169 166 { 170 167 struct wake_irq *wirq; 171 168 int err; 169 + 170 + if (irq < 0) 171 + return -EINVAL; 172 172 173 173 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 174 174 if (!wirq)
+2 -4
drivers/block/mtip32xx/mtip32xx.c
··· 3810 3810 sector_t capacity; 3811 3811 unsigned int index = 0; 3812 3812 struct kobject *kobj; 3813 - unsigned char thd_name[16]; 3814 3813 3815 3814 if (dd->disk) 3816 3815 goto skip_create_disk; /* hw init done, before rebuild */ ··· 3957 3958 } 3958 3959 3959 3960 start_service_thread: 3960 - sprintf(thd_name, "mtip_svc_thd_%02d", index); 3961 3961 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 3962 - dd, dd->numa_node, "%s", 3963 - thd_name); 3962 + dd, dd->numa_node, 3963 + "mtip_svc_thd_%02d", index); 3964 3964 3965 3965 if (IS_ERR(dd->mtip_svc_handler)) { 3966 3966 dev_err(&dd->pdev->dev, "service thread failed to start\n");
+198 -9
drivers/block/null_blk.c
··· 8 8 #include <linux/slab.h> 9 9 #include <linux/blk-mq.h> 10 10 #include <linux/hrtimer.h> 11 + #include <linux/lightnvm.h> 11 12 12 13 struct nullb_cmd { 13 14 struct list_head list; ··· 40 39 41 40 struct nullb_queue *queues; 42 41 unsigned int nr_queues; 42 + char disk_name[DISK_NAME_LEN]; 43 43 }; 44 44 45 45 static LIST_HEAD(nullb_list); 46 46 static struct mutex lock; 47 47 static int null_major; 48 48 static int nullb_indexes; 49 + static struct kmem_cache *ppa_cache; 49 50 50 51 struct completion_queue { 51 52 struct llist_head list; ··· 121 118 static int nr_devices = 2; 122 119 module_param(nr_devices, int, S_IRUGO); 123 120 MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 121 + 122 + static bool use_lightnvm; 123 + module_param(use_lightnvm, bool, S_IRUGO); 124 + MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); 124 125 125 126 static int irqmode = NULL_IRQ_SOFTIRQ; 126 127 ··· 434 427 { 435 428 list_del_init(&nullb->list); 436 429 437 - del_gendisk(nullb->disk); 430 + if (use_lightnvm) 431 + nvm_unregister(nullb->disk_name); 432 + else 433 + del_gendisk(nullb->disk); 438 434 blk_cleanup_queue(nullb->q); 439 435 if (queue_mode == NULL_Q_MQ) 440 436 blk_mq_free_tag_set(&nullb->tag_set); 441 - put_disk(nullb->disk); 437 + if (!use_lightnvm) 438 + put_disk(nullb->disk); 442 439 cleanup_queues(nullb); 443 440 kfree(nullb); 444 441 } 442 + 443 + #ifdef CONFIG_NVM 444 + 445 + static void null_lnvm_end_io(struct request *rq, int error) 446 + { 447 + struct nvm_rq *rqd = rq->end_io_data; 448 + struct nvm_dev *dev = rqd->dev; 449 + 450 + dev->mt->end_io(rqd, error); 451 + 452 + blk_put_request(rq); 453 + } 454 + 455 + static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) 456 + { 457 + struct request *rq; 458 + struct bio *bio = rqd->bio; 459 + 460 + rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); 461 + if (IS_ERR(rq)) 462 + return -ENOMEM; 463 + 464 + rq->cmd_type = REQ_TYPE_DRV_PRIV; 465 + rq->__sector = bio->bi_iter.bi_sector; 466 + rq->ioprio = bio_prio(bio); 467 + 468 + if (bio_has_data(bio)) 469 + rq->nr_phys_segments = bio_phys_segments(q, bio); 470 + 471 + rq->__data_len = bio->bi_iter.bi_size; 472 + rq->bio = rq->biotail = bio; 473 + 474 + rq->end_io_data = rqd; 475 + 476 + blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); 477 + 478 + return 0; 479 + } 480 + 481 + static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) 482 + { 483 + sector_t size = gb * 1024 * 1024 * 1024ULL; 484 + sector_t blksize; 485 + struct nvm_id_group *grp; 486 + 487 + id->ver_id = 0x1; 488 + id->vmnt = 0; 489 + id->cgrps = 1; 490 + id->cap = 0x3; 491 + id->dom = 0x1; 492 + 493 + id->ppaf.blk_offset = 0; 494 + id->ppaf.blk_len = 16; 495 + id->ppaf.pg_offset = 16; 496 + id->ppaf.pg_len = 16; 497 + id->ppaf.sect_offset = 32; 498 + id->ppaf.sect_len = 8; 499 + id->ppaf.pln_offset = 40; 500 + id->ppaf.pln_len = 8; 501 + id->ppaf.lun_offset = 48; 502 + id->ppaf.lun_len = 8; 503 + id->ppaf.ch_offset = 56; 504 + id->ppaf.ch_len = 8; 505 + 506 + do_div(size, bs); /* convert size to pages */ 507 + do_div(size, 256); /* concert size to pgs pr blk */ 508 + grp = &id->groups[0]; 509 + grp->mtype = 0; 510 + grp->fmtype = 0; 511 + grp->num_ch = 1; 512 + grp->num_pg = 256; 513 + blksize = size; 514 + do_div(size, (1 << 16)); 515 + grp->num_lun = size + 1; 516 + do_div(blksize, grp->num_lun); 517 + grp->num_blk = blksize; 518 + grp->num_pln = 1; 519 + 520 + grp->fpg_sz = bs; 521 + grp->csecs = bs; 522 + grp->trdt = 25000; 523 + grp->trdm = 25000; 524 + grp->tprt = 500000; 525 + grp->tprm = 500000; 526 + grp->tbet = 1500000; 527 + grp->tbem = 1500000; 528 + grp->mpos = 0x010101; /* single plane rwe */ 529 + grp->cpar = hw_queue_depth; 530 + 531 + return 0; 532 + } 533 + 534 + static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) 535 + { 536 + mempool_t *virtmem_pool; 537 + 538 + virtmem_pool = mempool_create_slab_pool(64, ppa_cache); 539 + if (!virtmem_pool) { 540 + pr_err("null_blk: Unable to create virtual memory pool\n"); 541 + return NULL; 542 + } 543 + 544 + return virtmem_pool; 545 + } 546 + 547 + static void null_lnvm_destroy_dma_pool(void *pool) 548 + { 549 + mempool_destroy(pool); 550 + } 551 + 552 + static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, 553 + gfp_t mem_flags, dma_addr_t *dma_handler) 554 + { 555 + return mempool_alloc(pool, mem_flags); 556 + } 557 + 558 + static void null_lnvm_dev_dma_free(void *pool, void *entry, 559 + dma_addr_t dma_handler) 560 + { 561 + mempool_free(entry, pool); 562 + } 563 + 564 + static struct nvm_dev_ops null_lnvm_dev_ops = { 565 + .identity = null_lnvm_id, 566 + .submit_io = null_lnvm_submit_io, 567 + 568 + .create_dma_pool = null_lnvm_create_dma_pool, 569 + .destroy_dma_pool = null_lnvm_destroy_dma_pool, 570 + .dev_dma_alloc = null_lnvm_dev_dma_alloc, 571 + .dev_dma_free = null_lnvm_dev_dma_free, 572 + 573 + /* Simulate nvme protocol restriction */ 574 + .max_phys_sect = 64, 575 + }; 576 + #else 577 + static struct nvm_dev_ops null_lnvm_dev_ops; 578 + #endif /* CONFIG_NVM */ 445 579 446 580 static int null_open(struct block_device *bdev, fmode_t mode) 447 581 { ··· 723 575 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 724 576 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 725 577 726 - disk = nullb->disk = alloc_disk_node(1, home_node); 727 - if (!disk) { 728 - rv = -ENOMEM; 729 - goto out_cleanup_blk_queue; 730 - } 731 578 732 579 mutex_lock(&lock); 733 580 list_add_tail(&nullb->list, &nullb_list); ··· 732 589 blk_queue_logical_block_size(nullb->q, bs); 733 590 blk_queue_physical_block_size(nullb->q, bs); 734 591 592 + sprintf(nullb->disk_name, "nullb%d", nullb->index); 593 + 594 + if (use_lightnvm) { 595 + rv = nvm_register(nullb->q, nullb->disk_name, 596 + &null_lnvm_dev_ops); 597 + if (rv) 598 + goto out_cleanup_blk_queue; 599 + goto done; 600 + } 601 + 602 + disk = nullb->disk = alloc_disk_node(1, home_node); 603 + if (!disk) { 604 + rv = -ENOMEM; 605 + goto out_cleanup_lightnvm; 606 + } 735 607 size = gb * 1024 * 1024 * 1024ULL; 736 608 set_capacity(disk, size >> 9); 737 609 ··· 756 598 disk->fops = &null_fops; 757 599 disk->private_data = nullb; 758 600 disk->queue = nullb->q; 759 - sprintf(disk->disk_name, "nullb%d", nullb->index); 601 + strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 602 + 760 603 add_disk(disk); 604 + done: 761 605 return 0; 762 606 607 + out_cleanup_lightnvm: 608 + if (use_lightnvm) 609 + nvm_unregister(nullb->disk_name); 763 610 out_cleanup_blk_queue: 764 611 blk_cleanup_queue(nullb->q); 765 612 out_cleanup_tags: ··· 786 623 pr_warn("null_blk: invalid block size\n"); 787 624 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 788 625 bs = PAGE_SIZE; 626 + } 627 + 628 + if (use_lightnvm && bs != 4096) { 629 + pr_warn("null_blk: LightNVM only supports 4k block size\n"); 630 + pr_warn("null_blk: defaults block size to 4k\n"); 631 + bs = 4096; 632 + } 633 + 634 + if (use_lightnvm && queue_mode != NULL_Q_MQ) { 635 + pr_warn("null_blk: LightNVM only supported for blk-mq\n"); 636 + pr_warn("null_blk: defaults queue mode to blk-mq\n"); 637 + queue_mode = NULL_Q_MQ; 789 638 } 790 639 791 640 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { ··· 830 655 if (null_major < 0) 831 656 return null_major; 832 657 658 + if (use_lightnvm) { 659 + ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 660 + 0, 0, NULL); 661 + if (!ppa_cache) { 662 + pr_err("null_blk: unable to create ppa cache\n"); 663 + return -ENOMEM; 664 + } 665 + } 666 + 833 667 for (i = 0; i < nr_devices; i++) { 834 668 if (null_add_dev()) { 835 669 unregister_blkdev(null_major, "nullb"); 836 - return -EINVAL; 670 + goto err_ppa; 837 671 } 838 672 } 839 673 840 674 pr_info("null: module loaded\n"); 841 675 return 0; 676 + err_ppa: 677 + kmem_cache_destroy(ppa_cache); 678 + return -EINVAL; 842 679 } 843 680 844 681 static void __exit null_exit(void) ··· 865 678 null_del_dev(nullb); 866 679 } 867 680 mutex_unlock(&lock); 681 + 682 + kmem_cache_destroy(ppa_cache); 868 683 } 869 684 870 685 module_init(null_init);
+52 -30
drivers/char/ipmi/ipmi_si_intf.c
··· 412 412 return rv; 413 413 } 414 414 415 - static void start_check_enables(struct smi_info *smi_info) 415 + static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 416 + { 417 + smi_info->last_timeout_jiffies = jiffies; 418 + mod_timer(&smi_info->si_timer, new_val); 419 + smi_info->timer_running = true; 420 + } 421 + 422 + /* 423 + * Start a new message and (re)start the timer and thread. 424 + */ 425 + static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, 426 + unsigned int size) 427 + { 428 + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 429 + 430 + if (smi_info->thread) 431 + wake_up_process(smi_info->thread); 432 + 433 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 434 + } 435 + 436 + static void start_check_enables(struct smi_info *smi_info, bool start_timer) 416 437 { 417 438 unsigned char msg[2]; 418 439 419 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 420 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 421 442 422 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 + if (start_timer) 444 + start_new_msg(smi_info, msg, 2); 445 + else 446 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 423 447 smi_info->si_state = SI_CHECKING_ENABLES; 424 448 } 425 449 426 - static void start_clear_flags(struct smi_info *smi_info) 450 + static void start_clear_flags(struct smi_info *smi_info, bool start_timer) 427 451 { 428 452 unsigned char msg[3]; 429 453 ··· 456 432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 457 433 msg[2] = WDT_PRE_TIMEOUT_INT; 458 434 459 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 435 + if (start_timer) 436 + start_new_msg(smi_info, msg, 3); 437 + else 438 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 460 439 smi_info->si_state = SI_CLEARING_FLAGS; 461 440 } 462 441 ··· 469 442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 470 443 smi_info->curr_msg->data_size = 2; 471 444 472 - smi_info->handlers->start_transaction( 473 - smi_info->si_sm, 474 - smi_info->curr_msg->data, 475 - smi_info->curr_msg->data_size); 445 + start_new_msg(smi_info, smi_info->curr_msg->data, 446 + smi_info->curr_msg->data_size); 476 447 smi_info->si_state = SI_GETTING_MESSAGES; 477 448 } 478 449 ··· 480 455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 481 456 smi_info->curr_msg->data_size = 2; 482 457 483 - smi_info->handlers->start_transaction( 484 - smi_info->si_sm, 485 - smi_info->curr_msg->data, 486 - smi_info->curr_msg->data_size); 458 + start_new_msg(smi_info, smi_info->curr_msg->data, 459 + smi_info->curr_msg->data_size); 487 460 smi_info->si_state = SI_GETTING_EVENTS; 488 - } 489 - 490 - static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 491 - { 492 - smi_info->last_timeout_jiffies = jiffies; 493 - mod_timer(&smi_info->si_timer, new_val); 494 - smi_info->timer_running = true; 495 461 } 496 462 497 463 /* ··· 494 478 * Note that we cannot just use disable_irq(), since the interrupt may 495 479 * be shared. 496 480 */ 497 - static inline bool disable_si_irq(struct smi_info *smi_info) 481 + static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) 498 482 { 499 483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 500 484 smi_info->interrupt_disabled = true; 501 - start_check_enables(smi_info); 485 + start_check_enables(smi_info, start_timer); 502 486 return true; 503 487 } 504 488 return false; ··· 508 492 { 509 493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 510 494 smi_info->interrupt_disabled = false; 511 - start_check_enables(smi_info); 495 + start_check_enables(smi_info, true); 512 496 return true; 513 497 } 514 498 return false; ··· 526 510 527 511 msg = ipmi_alloc_smi_msg(); 528 512 if (!msg) { 529 - if (!disable_si_irq(smi_info)) 513 + if (!disable_si_irq(smi_info, true)) 530 514 smi_info->si_state = SI_NORMAL; 531 515 } else if (enable_si_irq(smi_info)) { 532 516 ipmi_free_smi_msg(msg); ··· 542 526 /* Watchdog pre-timeout */ 543 527 smi_inc_stat(smi_info, watchdog_pretimeouts); 544 528 545 - start_clear_flags(smi_info); 529 + start_clear_flags(smi_info, true); 546 530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 547 531 if (smi_info->intf) 548 532 ipmi_smi_watchdog_pretimeout(smi_info->intf); ··· 895 879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 896 880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 897 881 898 - smi_info->handlers->start_transaction( 899 - smi_info->si_sm, msg, 2); 882 + start_new_msg(smi_info, msg, 2); 900 883 smi_info->si_state = SI_GETTING_FLAGS; 901 884 goto restart; 902 885 } ··· 925 910 * disable and messages disabled. 926 911 */ 927 912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 928 - start_check_enables(smi_info); 913 + start_check_enables(smi_info, true); 929 914 } else { 930 915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 931 916 if (!smi_info->curr_msg) ··· 935 920 } 936 921 goto restart; 937 922 } 923 + 924 + if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { 925 + /* Ok it if fails, the timer will just go off. */ 926 + if (del_timer(&smi_info->si_timer)) 927 + smi_info->timer_running = false; 928 + } 929 + 938 930 out: 939 931 return si_sm_result; 940 932 } ··· 2582 2560 .data = (void *)(unsigned long) SI_BT }, 2583 2561 {}, 2584 2562 }; 2563 + MODULE_DEVICE_TABLE(of, of_ipmi_match); 2585 2564 2586 2565 static int of_ipmi_probe(struct platform_device *dev) 2587 2566 { ··· 2669 2646 } 2670 2647 return 0; 2671 2648 } 2672 - MODULE_DEVICE_TABLE(of, of_ipmi_match); 2673 2649 #else 2674 2650 #define of_ipmi_match NULL 2675 2651 static int of_ipmi_probe(struct platform_device *dev) ··· 3635 3613 * Start clearing the flags before we enable interrupts or the 3636 3614 * timer to avoid racing with the timer. 3637 3615 */ 3638 - start_clear_flags(new_smi); 3616 + start_clear_flags(new_smi, false); 3639 3617 3640 3618 /* 3641 3619 * IRQ is defined to be set when non-zero. req_events will ··· 3930 3908 poll(to_clean); 3931 3909 schedule_timeout_uninterruptible(1); 3932 3910 } 3933 - disable_si_irq(to_clean); 3911 + disable_si_irq(to_clean, false); 3934 3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3935 3913 poll(to_clean); 3936 3914 schedule_timeout_uninterruptible(1);
+7 -1
drivers/char/ipmi/ipmi_watchdog.c
··· 153 153 /* The pre-timeout is disabled by default. */ 154 154 static int pretimeout; 155 155 156 + /* Default timeout to set on panic */ 157 + static int panic_wdt_timeout = 255; 158 + 156 159 /* Default action is to reset the board on a timeout. */ 157 160 static unsigned char action_val = WDOG_TIMEOUT_RESET; 158 161 ··· 295 292 296 293 module_param(pretimeout, timeout, 0644); 297 294 MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 295 + 296 + module_param(panic_wdt_timeout, timeout, 0644); 297 + MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds."); 298 298 299 299 module_param_cb(action, &param_ops_str, action_op, 0644); 300 300 MODULE_PARM_DESC(action, "Timeout action. One of: " ··· 1195 1189 /* Make sure we do this only once. */ 1196 1190 panic_event_handled = 1; 1197 1191 1198 - timeout = 255; 1192 + timeout = panic_wdt_timeout; 1199 1193 pretimeout = 0; 1200 1194 panic_halt_ipmi_set_timeout(); 1201 1195 }
+1
drivers/clocksource/Kconfig
··· 1 1 menu "Clock Source drivers" 2 + depends on !ARCH_USES_GETTIMEOFFSET 2 3 3 4 config CLKSRC_OF 4 5 bool
+2 -2
drivers/clocksource/fsl_ftm_timer.c
··· 203 203 int err; 204 204 205 205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); 206 - ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); 206 + ftm_writel(~0u, priv->clkevt_base + FTM_MOD); 207 207 208 208 ftm_reset_counter(priv->clkevt_base); 209 209 ··· 230 230 int err; 231 231 232 232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); 233 - ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); 233 + ftm_writel(~0u, priv->clksrc_base + FTM_MOD); 234 234 235 235 ftm_reset_counter(priv->clksrc_base); 236 236
+1
drivers/cpufreq/Kconfig.arm
··· 84 84 config ARM_MT8173_CPUFREQ 85 85 bool "Mediatek MT8173 CPUFreq support" 86 86 depends on ARCH_MEDIATEK && REGULATOR 87 + depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) 87 88 depends on !CPU_THERMAL || THERMAL=y 88 89 select PM_OPP 89 90 help
-1
drivers/cpufreq/Kconfig.x86
··· 5 5 config X86_INTEL_PSTATE 6 6 bool "Intel P state control" 7 7 depends on X86 8 - select ACPI_PROCESSOR if ACPI 9 8 help 10 9 This driver provides a P state for Intel core processors. 11 10 The driver implements an internal governor and will become
+73 -245
drivers/cpufreq/intel_pstate.c
··· 34 34 #include <asm/cpu_device_id.h> 35 35 #include <asm/cpufeature.h> 36 36 37 - #if IS_ENABLED(CONFIG_ACPI) 38 - #include <acpi/processor.h> 39 - #endif 40 - 41 - #define BYT_RATIOS 0x66a 42 - #define BYT_VIDS 0x66b 43 - #define BYT_TURBO_RATIOS 0x66c 44 - #define BYT_TURBO_VIDS 0x66d 37 + #define ATOM_RATIOS 0x66a 38 + #define ATOM_VIDS 0x66b 39 + #define ATOM_TURBO_RATIOS 0x66c 40 + #define ATOM_TURBO_VIDS 0x66d 45 41 46 42 #define FRAC_BITS 8 47 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) ··· 113 117 u64 prev_mperf; 114 118 u64 prev_tsc; 115 119 struct sample sample; 116 - #if IS_ENABLED(CONFIG_ACPI) 117 - struct acpi_processor_performance acpi_perf_data; 118 - #endif 119 120 }; 120 121 121 122 static struct cpudata **all_cpu_data; ··· 143 150 static struct pstate_adjust_policy pid_params; 144 151 static struct pstate_funcs pstate_funcs; 145 152 static int hwp_active; 146 - static int no_acpi_perf; 147 153 148 154 struct perf_limits { 149 155 int no_turbo; ··· 155 163 int max_sysfs_pct; 156 164 int min_policy_pct; 157 165 int min_sysfs_pct; 158 - int max_perf_ctl; 159 - int min_perf_ctl; 160 166 }; 161 167 162 168 static struct perf_limits performance_limits = { ··· 181 191 .max_sysfs_pct = 100, 182 192 .min_policy_pct = 0, 183 193 .min_sysfs_pct = 0, 184 - .max_perf_ctl = 0, 185 - .min_perf_ctl = 0, 186 194 }; 187 195 188 196 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 189 197 static struct perf_limits *limits = &performance_limits; 190 198 #else 191 199 static struct perf_limits *limits = &powersave_limits; 192 - #endif 193 - 194 - #if IS_ENABLED(CONFIG_ACPI) 195 - /* 196 - * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 197 - * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and 198 - * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state 199 - * ratio, out of it only high 8 bits are used. For example 0x1700 is setting 200 - * target ratio 0x17. The _PSS control value stores in a format which can be 201 - * directly written to PERF_CTL MSR. But in intel_pstate driver this shift 202 - * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). 203 - * This function converts the _PSS control value to intel pstate driver format 204 - * for comparison and assignment. 205 - */ 206 - static int convert_to_native_pstate_format(struct cpudata *cpu, int index) 207 - { 208 - return cpu->acpi_perf_data.states[index].control >> 8; 209 - } 210 - 211 - static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) 212 - { 213 - struct cpudata *cpu; 214 - int ret; 215 - bool turbo_absent = false; 216 - int max_pstate_index; 217 - int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; 218 - int i; 219 - 220 - cpu = all_cpu_data[policy->cpu]; 221 - 222 - pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", 223 - cpu->pstate.min_pstate, cpu->pstate.max_pstate, 224 - cpu->pstate.turbo_pstate); 225 - 226 - if (!cpu->acpi_perf_data.shared_cpu_map && 227 - zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, 228 - GFP_KERNEL, cpu_to_node(policy->cpu))) { 229 - return -ENOMEM; 230 - } 231 - 232 - ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 233 - policy->cpu); 234 - if (ret) 235 - return ret; 236 - 237 - /* 238 - * Check if the control value in _PSS is for PERF_CTL MSR, which should 239 - * guarantee that the states returned by it map to the states in our 240 - * list directly. 241 - */ 242 - if (cpu->acpi_perf_data.control_register.space_id != 243 - ACPI_ADR_SPACE_FIXED_HARDWARE) 244 - return -EIO; 245 - 246 - pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); 247 - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) 248 - pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 249 - (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 250 - (u32) cpu->acpi_perf_data.states[i].core_frequency, 251 - (u32) cpu->acpi_perf_data.states[i].power, 252 - (u32) cpu->acpi_perf_data.states[i].control); 253 - 254 - /* 255 - * If there is only one entry _PSS, simply ignore _PSS and continue as 256 - * usual without taking _PSS into account 257 - */ 258 - if (cpu->acpi_perf_data.state_count < 2) 259 - return 0; 260 - 261 - turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 262 - min_pss_ctl = convert_to_native_pstate_format(cpu, 263 - cpu->acpi_perf_data.state_count - 1); 264 - /* Check if there is a turbo freq in _PSS */ 265 - if (turbo_pss_ctl <= cpu->pstate.max_pstate && 266 - turbo_pss_ctl > cpu->pstate.min_pstate) { 267 - pr_debug("intel_pstate: no turbo range exists in _PSS\n"); 268 - limits->no_turbo = limits->turbo_disabled = 1; 269 - cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; 270 - turbo_absent = true; 271 - } 272 - 273 - /* Check if the max non turbo p state < Intel P state max */ 274 - max_pstate_index = turbo_absent ? 0 : 1; 275 - max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); 276 - if (max_pss_ctl < cpu->pstate.max_pstate && 277 - max_pss_ctl > cpu->pstate.min_pstate) 278 - cpu->pstate.max_pstate = max_pss_ctl; 279 - 280 - /* check If min perf > Intel P State min */ 281 - if (min_pss_ctl > cpu->pstate.min_pstate && 282 - min_pss_ctl < cpu->pstate.max_pstate) { 283 - cpu->pstate.min_pstate = min_pss_ctl; 284 - policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; 285 - } 286 - 287 - if (turbo_absent) 288 - policy->cpuinfo.max_freq = cpu->pstate.max_pstate * 289 - cpu->pstate.scaling; 290 - else { 291 - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 292 - cpu->pstate.scaling; 293 - /* 294 - * The _PSS table doesn't contain whole turbo frequency range. 295 - * This just contains +1 MHZ above the max non turbo frequency, 296 - * with control value corresponding to max turbo ratio. But 297 - * when cpufreq set policy is called, it will call with this 298 - * max frequency, which will cause a reduced performance as 299 - * this driver uses real max turbo frequency as the max 300 - * frequeny. So correct this frequency in _PSS table to 301 - * correct max turbo frequency based on the turbo ratio. 302 - * Also need to convert to MHz as _PSS freq is in MHz. 303 - */ 304 - cpu->acpi_perf_data.states[0].core_frequency = 305 - turbo_pss_ctl * 100; 306 - } 307 - 308 - pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", 309 - cpu->pstate.min_pstate, cpu->pstate.max_pstate, 310 - cpu->pstate.turbo_pstate); 311 - pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", 312 - policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); 313 - 314 - return 0; 315 - } 316 - 317 - static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 318 - { 319 - struct cpudata *cpu; 320 - 321 - if (!no_acpi_perf) 322 - return 0; 323 - 324 - cpu = all_cpu_data[policy->cpu]; 325 - acpi_processor_unregister_performance(policy->cpu); 326 - return 0; 327 - } 328 - 329 - #else 330 - static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) 331 - { 332 - return 0; 333 - } 334 - 335 - static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 336 - { 337 - return 0; 338 - } 339 200 #endif 340 201 341 202 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, ··· 528 687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 529 688 } 530 689 531 - static int byt_get_min_pstate(void) 690 + static int atom_get_min_pstate(void) 532 691 { 533 692 u64 value; 534 693 535 - rdmsrl(BYT_RATIOS, value); 694 + rdmsrl(ATOM_RATIOS, value); 536 695 return (value >> 8) & 0x7F; 537 696 } 538 697 539 - static int byt_get_max_pstate(void) 698 + static int atom_get_max_pstate(void) 540 699 { 541 700 u64 value; 542 701 543 - rdmsrl(BYT_RATIOS, value); 702 + rdmsrl(ATOM_RATIOS, value); 544 703 return (value >> 16) & 0x7F; 545 704 } 546 705 547 - static int byt_get_turbo_pstate(void) 706 + static int atom_get_turbo_pstate(void) 548 707 { 549 708 u64 value; 550 709 551 - rdmsrl(BYT_TURBO_RATIOS, value); 710 + rdmsrl(ATOM_TURBO_RATIOS, value); 552 711 return value & 0x7F; 553 712 } 554 713 555 - static void byt_set_pstate(struct cpudata *cpudata, int pstate) 714 + static void atom_set_pstate(struct cpudata *cpudata, int pstate) 556 715 { 557 716 u64 val; 558 717 int32_t vid_fp; ··· 577 736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 578 737 } 579 738 580 - #define BYT_BCLK_FREQS 5 581 - static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; 582 - 583 - static int byt_get_scaling(void) 739 + static int silvermont_get_scaling(void) 584 740 { 585 741 u64 value; 586 742 int i; 743 + /* Defined in Table 35-6 from SDM (Sept 2015) */ 744 + static int silvermont_freq_table[] = { 745 + 83300, 100000, 133300, 116700, 80000}; 587 746 588 747 rdmsrl(MSR_FSB_FREQ, value); 589 - i = value & 0x3; 748 + i = value & 0x7; 749 + WARN_ON(i > 4); 590 750 591 - BUG_ON(i > BYT_BCLK_FREQS); 592 - 593 - return byt_freq_table[i] * 100; 751 + return silvermont_freq_table[i]; 594 752 } 595 753 596 - static void byt_get_vid(struct cpudata *cpudata) 754 + static int airmont_get_scaling(void) 755 + { 756 + u64 value; 757 + int i; 758 + /* Defined in Table 35-10 from SDM (Sept 2015) */ 759 + static int airmont_freq_table[] = { 760 + 83300, 100000, 133300, 116700, 80000, 761 + 93300, 90000, 88900, 87500}; 762 + 763 + rdmsrl(MSR_FSB_FREQ, value); 764 + i = value & 0xF; 765 + WARN_ON(i > 8); 766 + 767 + return airmont_freq_table[i]; 768 + } 769 + 770 + static void atom_get_vid(struct cpudata *cpudata) 597 771 { 598 772 u64 value; 599 773 600 - rdmsrl(BYT_VIDS, value); 774 + rdmsrl(ATOM_VIDS, value); 601 775 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 602 776 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 603 777 cpudata->vid.ratio = div_fp( ··· 620 764 int_tofp(cpudata->pstate.max_pstate - 621 765 cpudata->pstate.min_pstate)); 622 766 623 - rdmsrl(BYT_TURBO_VIDS, value); 767 + rdmsrl(ATOM_TURBO_VIDS, value); 624 768 cpudata->vid.turbo = value & 0x7f; 625 769 } 626 770 ··· 741 885 }, 742 886 }; 743 887 744 - static struct cpu_defaults byt_params = { 888 + static struct cpu_defaults silvermont_params = { 745 889 .pid_policy = { 746 890 .sample_rate_ms = 10, 747 891 .deadband = 0, ··· 751 895 .i_gain_pct = 4, 752 896 }, 753 897 .funcs = { 754 - .get_max = byt_get_max_pstate, 755 - .get_max_physical = byt_get_max_pstate, 756 - .get_min = byt_get_min_pstate, 757 - .get_turbo = byt_get_turbo_pstate, 758 - .set = byt_set_pstate, 759 - .get_scaling = byt_get_scaling, 760 - .get_vid = byt_get_vid, 898 + .get_max = atom_get_max_pstate, 899 + .get_max_physical = atom_get_max_pstate, 900 + .get_min = atom_get_min_pstate, 901 + .get_turbo = atom_get_turbo_pstate, 902 + .set = atom_set_pstate, 903 + .get_scaling = silvermont_get_scaling, 904 + .get_vid = atom_get_vid, 905 + }, 906 + }; 907 + 908 + static struct cpu_defaults airmont_params = { 909 + .pid_policy = { 910 + .sample_rate_ms = 10, 911 + .deadband = 0, 912 + .setpoint = 60, 913 + .p_gain_pct = 14, 914 + .d_gain_pct = 0, 915 + .i_gain_pct = 4, 916 + }, 917 + .funcs = { 918 + .get_max = atom_get_max_pstate, 919 + .get_max_physical = atom_get_max_pstate, 920 + .get_min = atom_get_min_pstate, 921 + .get_turbo = atom_get_turbo_pstate, 922 + .set = atom_set_pstate, 923 + .get_scaling = airmont_get_scaling, 924 + .get_vid = atom_get_vid, 761 925 }, 762 926 }; 763 927 ··· 814 938 * policy, or by cpu specific default values determined through 815 939 * experimentation. 816 940 */ 817 - if (limits->max_perf_ctl && limits->max_sysfs_pct >= 818 - limits->max_policy_pct) { 819 - *max = limits->max_perf_ctl; 820 - } else { 821 - max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), 822 - limits->max_perf)); 823 - *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, 824 - cpu->pstate.turbo_pstate); 825 - } 941 + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); 942 + *max = clamp_t(int, max_perf_adj, 943 + cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 826 944 827 - if (limits->min_perf_ctl) { 828 - *min = limits->min_perf_ctl; 829 - } else { 830 - min_perf = fp_toint(mul_fp(int_tofp(max_perf), 831 - limits->min_perf)); 832 - *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 833 - } 945 + min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); 946 + *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 834 947 } 835 948 836 949 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) ··· 1018 1153 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1019 1154 ICPU(0x2a, core_params), 1020 1155 ICPU(0x2d, core_params), 1021 - ICPU(0x37, byt_params), 1156 + ICPU(0x37, silvermont_params), 1022 1157 ICPU(0x3a, core_params), 1023 1158 ICPU(0x3c, core_params), 1024 1159 ICPU(0x3d, core_params), ··· 1027 1162 ICPU(0x45, core_params), 1028 1163 ICPU(0x46, core_params), 1029 1164 ICPU(0x47, core_params), 1030 - ICPU(0x4c, byt_params), 1165 + ICPU(0x4c, airmont_params), 1031 1166 ICPU(0x4e, core_params), 1032 1167 ICPU(0x4f, core_params), 1033 1168 ICPU(0x5e, core_params), ··· 1094 1229 1095 1230 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1096 1231 { 1097 - #if IS_ENABLED(CONFIG_ACPI) 1098 - struct cpudata *cpu; 1099 - int i; 1100 - #endif 1101 - pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, 1102 - policy->cpuinfo.max_freq, policy->max); 1103 1232 if (!policy->cpuinfo.max_freq) 1104 1233 return -ENODEV; 1105 1234 ··· 1128 1269 int_tofp(100)); 1129 1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1130 1271 int_tofp(100)); 1131 - 1132 - #if IS_ENABLED(CONFIG_ACPI) 1133 - cpu = all_cpu_data[policy->cpu]; 1134 - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 1135 - int control; 1136 - 1137 - control = convert_to_native_pstate_format(cpu, i); 1138 - if (control * cpu->pstate.scaling == policy->max) 1139 - limits->max_perf_ctl = control; 1140 - if (control * cpu->pstate.scaling == policy->min) 1141 - limits->min_perf_ctl = control; 1142 - } 1143 - 1144 - pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", 1145 - policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, 1146 - limits->max_perf_ctl); 1147 - #endif 1148 1272 1149 1273 if (hwp_active) 1150 1274 intel_pstate_hwp_set(); ··· 1183 1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1184 1342 policy->cpuinfo.max_freq = 1185 1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1186 - if (!no_acpi_perf) 1187 - intel_pstate_init_perf_limits(policy); 1188 - /* 1189 - * If there is no acpi perf data or error, we ignore and use Intel P 1190 - * state calculated limits, So this is not fatal error. 1191 - */ 1192 1344 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1193 1345 cpumask_set_cpu(policy->cpu, policy->cpus); 1194 1346 1195 1347 return 0; 1196 - } 1197 - 1198 - static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1199 - { 1200 - return intel_pstate_exit_perf_limits(policy); 1201 1348 } 1202 1349 1203 1350 static struct cpufreq_driver intel_pstate_driver = { ··· 1195 1364 .setpolicy = intel_pstate_set_policy, 1196 1365 .get = intel_pstate_get, 1197 1366 .init = intel_pstate_cpu_init, 1198 - .exit = intel_pstate_cpu_exit, 1199 1367 .stop_cpu = intel_pstate_stop_cpu, 1200 1368 .name = "intel_pstate", 1201 1369 }; ··· 1236 1406 } 1237 1407 1238 1408 #if IS_ENABLED(CONFIG_ACPI) 1409 + #include <acpi/processor.h> 1239 1410 1240 1411 static bool intel_pstate_no_acpi_pss(void) 1241 1412 { ··· 1432 1601 force_load = 1; 1433 1602 if (!strcmp(str, "hwp_only")) 1434 1603 hwp_only = 1; 1435 - if (!strcmp(str, "no_acpi")) 1436 - no_acpi_perf = 1; 1437 - 1438 1604 return 0; 1439 1605 } 1440 1606 early_param("intel_pstate", intel_pstate_setup);
+10 -10
drivers/dma/at_hdmac.c
··· 729 729 return NULL; 730 730 731 731 dev_info(chan2dev(chan), 732 - "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 733 - __func__, xt->src_start, xt->dst_start, xt->numf, 732 + "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 733 + __func__, &xt->src_start, &xt->dst_start, xt->numf, 734 734 xt->frame_size, flags); 735 735 736 736 /* ··· 824 824 u32 ctrla; 825 825 u32 ctrlb; 826 826 827 - dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 828 - dest, src, len, flags); 827 + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", 828 + &dest, &src, len, flags); 829 829 830 830 if (unlikely(!len)) { 831 831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); ··· 938 938 void __iomem *vaddr; 939 939 dma_addr_t paddr; 940 940 941 - dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 942 - dest, value, len, flags); 941 + dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, 942 + &dest, value, len, flags); 943 943 944 944 if (unlikely(!len)) { 945 945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); ··· 1022 1022 dma_addr_t dest = sg_dma_address(sg); 1023 1023 size_t len = sg_dma_len(sg); 1024 1024 1025 - dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", 1026 - __func__, dest, len); 1025 + dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n", 1026 + __func__, &dest, len); 1027 1027 1028 1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 1029 1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n", ··· 1439 1439 unsigned int periods = buf_len / period_len; 1440 1440 unsigned int i; 1441 1441 1442 - dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1442 + dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n", 1443 1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 1444 - buf_addr, 1444 + &buf_addr, 1445 1445 periods, buf_len, period_len); 1446 1446 1447 1447 if (unlikely(!atslave || !buf_len || !period_len)) {
+3 -3
drivers/dma/at_hdmac_regs.h
··· 385 385 static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) 386 386 { 387 387 dev_crit(chan2dev(&atchan->chan_common), 388 - " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 389 - lli->saddr, lli->daddr, 390 - lli->ctrla, lli->ctrlb, lli->dscr); 388 + " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", 389 + &lli->saddr, &lli->daddr, 390 + lli->ctrla, lli->ctrlb, &lli->dscr); 391 391 } 392 392 393 393
+10 -10
drivers/dma/at_xdmac.c
··· 920 920 desc->lld.mbr_cfg = chan_cc; 921 921 922 922 dev_dbg(chan2dev(chan), 923 - "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 924 - __func__, desc->lld.mbr_sa, desc->lld.mbr_da, 923 + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 924 + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, 925 925 desc->lld.mbr_ubc, desc->lld.mbr_cfg); 926 926 927 927 /* Chain lld. */ ··· 953 953 if ((xt->numf > 1) && (xt->frame_size > 1)) 954 954 return NULL; 955 955 956 - dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 957 - __func__, xt->src_start, xt->dst_start, xt->numf, 956 + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 957 + __func__, &xt->src_start, &xt->dst_start, xt->numf, 958 958 xt->frame_size, flags); 959 959 960 960 src_addr = xt->src_start; ··· 1179 1179 desc->lld.mbr_cfg = chan_cc; 1180 1180 1181 1181 dev_dbg(chan2dev(chan), 1182 - "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1183 - __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1182 + "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1183 + __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, 1184 1184 desc->lld.mbr_cfg); 1185 1185 1186 1186 return desc; ··· 1193 1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1194 1194 struct at_xdmac_desc *desc; 1195 1195 1196 - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1197 - __func__, dest, len, value, flags); 1196 + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", 1197 + __func__, &dest, len, value, flags); 1198 1198 1199 1199 if (unlikely(!len)) 1200 1200 return NULL; ··· 1229 1229 1230 1230 /* Prepare descriptors. */ 1231 1231 for_each_sg(sgl, sg, sg_len, i) { 1232 - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1233 - __func__, sg_dma_address(sg), sg_dma_len(sg), 1232 + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", 1233 + __func__, &sg_dma_address(sg), sg_dma_len(sg), 1234 1234 value, flags); 1235 1235 desc = at_xdmac_memset_create_desc(chan, atchan, 1236 1236 sg_dma_address(sg),
+2 -2
drivers/dma/edma.c
··· 107 107 108 108 /* CCCFG register */ 109 109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 110 - #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ 110 + #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ 111 111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 112 112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 113 113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ ··· 1565 1565 struct platform_device *tc_pdev; 1566 1566 int ret; 1567 1567 1568 - if (!tc) 1568 + if (!IS_ENABLED(CONFIG_OF) || !tc) 1569 1569 return; 1570 1570 1571 1571 tc_pdev = of_find_device_by_node(tc->node);
+1 -1
drivers/dma/imx-sdma.c
··· 1462 1462 1463 1463 #define EVENT_REMAP_CELLS 3 1464 1464 1465 - static int __init sdma_event_remap(struct sdma_engine *sdma) 1465 + static int sdma_event_remap(struct sdma_engine *sdma) 1466 1466 { 1467 1467 struct device_node *np = sdma->dev->of_node; 1468 1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
+8 -3
drivers/dma/sh/usb-dmac.c
··· 679 679 struct usb_dmac *dmac = dev_get_drvdata(dev); 680 680 int i; 681 681 682 - for (i = 0; i < dmac->n_channels; ++i) 682 + for (i = 0; i < dmac->n_channels; ++i) { 683 + if (!dmac->channels[i].iomem) 684 + break; 683 685 usb_dmac_chan_halt(&dmac->channels[i]); 686 + } 684 687 685 688 return 0; 686 689 } ··· 802 799 ret = pm_runtime_get_sync(&pdev->dev); 803 800 if (ret < 0) { 804 801 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 805 - return ret; 802 + goto error_pm; 806 803 } 807 804 808 805 ret = usb_dmac_init(dmac); 809 - pm_runtime_put(&pdev->dev); 810 806 811 807 if (ret) { 812 808 dev_err(&pdev->dev, "failed to reset device\n"); ··· 853 851 if (ret < 0) 854 852 goto error; 855 853 854 + pm_runtime_put(&pdev->dev); 856 855 return 0; 857 856 858 857 error: 859 858 of_dma_controller_free(pdev->dev.of_node); 859 + pm_runtime_put(&pdev->dev); 860 + error_pm: 860 861 pm_runtime_disable(&pdev->dev); 861 862 return ret; 862 863 }
+58 -62
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 389 389 * Fences. 390 390 */ 391 391 struct amdgpu_fence_driver { 392 - struct amdgpu_ring *ring; 393 392 uint64_t gpu_addr; 394 393 volatile uint32_t *cpu_addr; 395 394 /* sync_seq is protected by ring emission lock */ ··· 397 398 bool initialized; 398 399 struct amdgpu_irq_src *irq_src; 399 400 unsigned irq_type; 400 - struct delayed_work lockup_work; 401 + struct timer_list fallback_timer; 401 402 wait_queue_head_t fence_queue; 402 403 }; 403 404 ··· 916 917 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 917 918 918 919 struct amdgpu_vm_pt { 919 - struct amdgpu_bo *bo; 920 - uint64_t addr; 920 + struct amdgpu_bo *bo; 921 + uint64_t addr; 921 922 }; 922 923 923 924 struct amdgpu_vm_id { ··· 925 926 uint64_t pd_gpu_addr; 926 927 /* last flushed PD/PT update */ 927 928 struct fence *flushed_updates; 928 - /* last use of vmid */ 929 - struct fence *last_id_use; 930 929 }; 931 930 932 931 struct amdgpu_vm { ··· 954 957 955 958 /* for id and flush management per ring */ 956 959 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 960 + /* for interval tree */ 961 + spinlock_t it_lock; 957 962 }; 958 963 959 964 struct amdgpu_vm_manager { 960 - struct fence *active[AMDGPU_NUM_VM]; 961 - uint32_t max_pfn; 965 + struct { 966 + struct fence *active; 967 + atomic_long_t owner; 968 + } ids[AMDGPU_NUM_VM]; 969 + 970 + uint32_t max_pfn; 962 971 /* number of VMIDs */ 963 - unsigned nvm; 972 + unsigned nvm; 964 973 /* vram base address for page table entry */ 965 - u64 vram_base_offset; 974 + u64 vram_base_offset; 966 975 /* is vm enabled? */ 967 - bool enabled; 968 - /* for hw to save the PD addr on suspend/resume */ 969 - uint32_t saved_table_addr[AMDGPU_NUM_VM]; 976 + bool enabled; 970 977 /* vm pte handling */ 971 978 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 972 979 struct amdgpu_ring *vm_pte_funcs_ring; 973 980 }; 981 + 982 + void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 983 + int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 984 + void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 985 + struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 986 + struct amdgpu_vm *vm, 987 + struct list_head *head); 988 + int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 989 + struct amdgpu_sync *sync); 990 + void amdgpu_vm_flush(struct amdgpu_ring *ring, 991 + struct amdgpu_vm *vm, 992 + struct fence *updates); 993 + void amdgpu_vm_fence(struct amdgpu_device *adev, 994 + struct amdgpu_vm *vm, 995 + struct fence *fence); 996 + uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); 997 + int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 998 + struct amdgpu_vm *vm); 999 + int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1000 + struct amdgpu_vm *vm); 1001 + int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1002 + struct amdgpu_sync *sync); 1003 + int amdgpu_vm_bo_update(struct amdgpu_device *adev, 1004 + struct amdgpu_bo_va *bo_va, 1005 + struct ttm_mem_reg *mem); 1006 + void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 1007 + struct amdgpu_bo *bo); 1008 + struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1009 + struct amdgpu_bo *bo); 1010 + struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 1011 + struct amdgpu_vm *vm, 1012 + struct amdgpu_bo *bo); 1013 + int amdgpu_vm_bo_map(struct amdgpu_device *adev, 1014 + struct amdgpu_bo_va *bo_va, 1015 + uint64_t addr, uint64_t offset, 1016 + uint64_t size, uint32_t flags); 1017 + int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1018 + struct amdgpu_bo_va *bo_va, 1019 + uint64_t addr); 1020 + void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 1021 + struct amdgpu_bo_va *bo_va); 1022 + int amdgpu_vm_free_job(struct amdgpu_job *job); 974 1023 975 1024 /* 976 1025 * context related structures ··· 1254 1211 /* relocations */ 1255 1212 struct amdgpu_bo_list_entry *vm_bos; 1256 1213 struct list_head validated; 1214 + struct fence *fence; 1257 1215 1258 1216 struct amdgpu_ib *ibs; 1259 1217 uint32_t num_ibs; ··· 1270 1226 struct amdgpu_device *adev; 1271 1227 struct amdgpu_ib *ibs; 1272 1228 uint32_t num_ibs; 1273 - struct mutex job_lock; 1229 + void *owner; 1274 1230 struct amdgpu_user_fence uf; 1275 1231 int (*free_job)(struct amdgpu_job *job); 1276 1232 }; ··· 2301 2257 bool amdgpu_card_posted(struct amdgpu_device *adev); 2302 2258 void amdgpu_update_display_priority(struct amdgpu_device *adev); 2303 2259 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2304 - struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, 2305 - struct drm_file *filp, 2306 - struct amdgpu_ctx *ctx, 2307 - struct amdgpu_ib *ibs, 2308 - uint32_t num_ibs); 2309 2260 2310 2261 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2311 2262 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ··· 2357 2318 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 2358 2319 unsigned long arg); 2359 2320 2360 - /* 2361 - * vm 2362 - */ 2363 - int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 2364 - void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 2365 - struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 2366 - struct amdgpu_vm *vm, 2367 - struct list_head *head); 2368 - int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 2369 - struct amdgpu_sync *sync); 2370 - void amdgpu_vm_flush(struct amdgpu_ring *ring, 2371 - struct amdgpu_vm *vm, 2372 - struct fence *updates); 2373 - void amdgpu_vm_fence(struct amdgpu_device *adev, 2374 - struct amdgpu_vm *vm, 2375 - struct amdgpu_fence *fence); 2376 - uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); 2377 - int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 2378 - struct amdgpu_vm *vm); 2379 - int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 2380 - struct amdgpu_vm *vm); 2381 - int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, 2382 - struct amdgpu_vm *vm, struct amdgpu_sync *sync); 2383 - int amdgpu_vm_bo_update(struct amdgpu_device *adev, 2384 - struct amdgpu_bo_va *bo_va, 2385 - struct ttm_mem_reg *mem); 2386 - void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2387 - struct amdgpu_bo *bo); 2388 - struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 2389 - struct amdgpu_bo *bo); 2390 - struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2391 - struct amdgpu_vm *vm, 2392 - struct amdgpu_bo *bo); 2393 - int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2394 - struct amdgpu_bo_va *bo_va, 2395 - uint64_t addr, uint64_t offset, 2396 - uint64_t size, uint32_t flags); 2397 - int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2398 - struct amdgpu_bo_va *bo_va, 2399 - uint64_t addr); 2400 - void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2401 - struct amdgpu_bo_va *bo_va); 2402 - int amdgpu_vm_free_job(struct amdgpu_job *job); 2403 2321 /* 2404 2322 * functions used by amdgpu_encoder.c 2405 2323 */
+72 -105
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 127 127 return 0; 128 128 } 129 129 130 - struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, 131 - struct drm_file *filp, 132 - struct amdgpu_ctx *ctx, 133 - struct amdgpu_ib *ibs, 134 - uint32_t num_ibs) 135 - { 136 - struct amdgpu_cs_parser *parser; 137 - int i; 138 - 139 - parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); 140 - if (!parser) 141 - return NULL; 142 - 143 - parser->adev = adev; 144 - parser->filp = filp; 145 - parser->ctx = ctx; 146 - parser->ibs = ibs; 147 - parser->num_ibs = num_ibs; 148 - for (i = 0; i < num_ibs; i++) 149 - ibs[i].ctx = ctx; 150 - 151 - return parser; 152 - } 153 - 154 130 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 155 131 { 156 132 union drm_amdgpu_cs *cs = data; ··· 439 463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 440 464 } 441 465 442 - static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 466 + /** 467 + * cs_parser_fini() - clean parser states 468 + * @parser: parser structure holding parsing context. 469 + * @error: error number 470 + * 471 + * If error is set than unvalidate buffer, otherwise just free memory 472 + * used by parsing context. 473 + **/ 474 + static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) 443 475 { 476 + unsigned i; 477 + 444 478 if (!error) { 445 479 /* Sort the buffer list from the smallest to largest buffer, 446 480 * which affects the order of buffers in the LRU list. ··· 465 479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 466 480 467 481 ttm_eu_fence_buffer_objects(&parser->ticket, 468 - &parser->validated, 469 - &parser->ibs[parser->num_ibs-1].fence->base); 482 + &parser->validated, 483 + parser->fence); 470 484 } else if (backoff) { 471 485 ttm_eu_backoff_reservation(&parser->ticket, 472 486 &parser->validated); 473 487 } 474 - } 488 + fence_put(parser->fence); 475 489 476 - static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) 477 - { 478 - unsigned i; 479 490 if (parser->ctx) 480 491 amdgpu_ctx_put(parser->ctx); 481 492 if (parser->bo_list) ··· 482 499 for (i = 0; i < parser->nchunks; i++) 483 500 drm_free_large(parser->chunks[i].kdata); 484 501 kfree(parser->chunks); 485 - if (!amdgpu_enable_scheduler) 486 - { 487 - if (parser->ibs) 488 - for (i = 0; i < parser->num_ibs; i++) 489 - amdgpu_ib_free(parser->adev, &parser->ibs[i]); 490 - kfree(parser->ibs); 491 - if (parser->uf.bo) 492 - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 493 - } 494 - 495 - kfree(parser); 496 - } 497 - 498 - /** 499 - * cs_parser_fini() - clean parser states 500 - * @parser: parser structure holding parsing context. 501 - * @error: error number 502 - * 503 - * If error is set than unvalidate buffer, otherwise just free memory 504 - * used by parsing context. 505 - **/ 506 - static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) 507 - { 508 - amdgpu_cs_parser_fini_early(parser, error, backoff); 509 - amdgpu_cs_parser_fini_late(parser); 502 + if (parser->ibs) 503 + for (i = 0; i < parser->num_ibs; i++) 504 + amdgpu_ib_free(parser->adev, &parser->ibs[i]); 505 + kfree(parser->ibs); 506 + if (parser->uf.bo) 507 + drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 510 508 } 511 509 512 510 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, ··· 574 610 } 575 611 576 612 r = amdgpu_bo_vm_update_pte(parser, vm); 577 - if (r) { 578 - goto out; 579 - } 580 - amdgpu_cs_sync_rings(parser); 581 - if (!amdgpu_enable_scheduler) 582 - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, 583 - parser->filp); 613 + if (!r) 614 + amdgpu_cs_sync_rings(parser); 584 615 585 - out: 586 616 return r; 587 617 } 588 618 ··· 786 828 union drm_amdgpu_cs *cs = data; 787 829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 788 830 struct amdgpu_vm *vm = &fpriv->vm; 789 - struct amdgpu_cs_parser *parser; 831 + struct amdgpu_cs_parser parser = {}; 790 832 bool reserved_buffers = false; 791 833 int i, r; 792 834 793 835 if (!adev->accel_working) 794 836 return -EBUSY; 795 837 796 - parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 797 - if (!parser) 798 - return -ENOMEM; 799 - r = amdgpu_cs_parser_init(parser, data); 838 + parser.adev = adev; 839 + parser.filp = filp; 840 + 841 + r = amdgpu_cs_parser_init(&parser, data); 800 842 if (r) { 801 843 DRM_ERROR("Failed to initialize parser !\n"); 802 - amdgpu_cs_parser_fini(parser, r, false); 844 + amdgpu_cs_parser_fini(&parser, r, false); 803 845 r = amdgpu_cs_handle_lockup(adev, r); 804 846 return r; 805 847 } 806 848 mutex_lock(&vm->mutex); 807 - r = amdgpu_cs_parser_relocs(parser); 849 + r = amdgpu_cs_parser_relocs(&parser); 808 850 if (r == -ENOMEM) 809 851 DRM_ERROR("Not enough memory for command submission!\n"); 810 852 else if (r && r != -ERESTARTSYS) 811 853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 812 854 else if (!r) { 813 855 reserved_buffers = true; 814 - r = amdgpu_cs_ib_fill(adev, parser); 856 + r = amdgpu_cs_ib_fill(adev, &parser); 815 857 } 816 858 817 859 if (!r) { 818 - r = amdgpu_cs_dependencies(adev, parser); 860 + r = amdgpu_cs_dependencies(adev, &parser); 819 861 if (r) 820 862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 821 863 } ··· 823 865 if (r) 824 866 goto out; 825 867 826 - for (i = 0; i < parser->num_ibs; i++) 827 - trace_amdgpu_cs(parser, i); 868 + for (i = 0; i < parser.num_ibs; i++) 869 + trace_amdgpu_cs(&parser, i); 828 870 829 - r = amdgpu_cs_ib_vm_chunk(adev, parser); 871 + r = amdgpu_cs_ib_vm_chunk(adev, &parser); 830 872 if (r) 831 873 goto out; 832 874 833 - if (amdgpu_enable_scheduler && parser->num_ibs) { 875 + if (amdgpu_enable_scheduler && parser.num_ibs) { 876 + struct amdgpu_ring * ring = parser.ibs->ring; 877 + struct amd_sched_fence *fence; 834 878 struct amdgpu_job *job; 835 - struct amdgpu_ring * ring = parser->ibs->ring; 879 + 836 880 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 837 881 if (!job) { 838 882 r = -ENOMEM; 839 883 goto out; 840 884 } 885 + 841 886 job->base.sched = &ring->sched; 842 - job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 843 - job->adev = parser->adev; 844 - job->ibs = parser->ibs; 845 - job->num_ibs = parser->num_ibs; 846 - job->base.owner = parser->filp; 847 - mutex_init(&job->job_lock); 887 + job->base.s_entity = &parser.ctx->rings[ring->idx].entity; 888 + job->adev = parser.adev; 889 + job->owner = parser.filp; 890 + job->free_job = amdgpu_cs_free_job; 891 + 892 + job->ibs = parser.ibs; 893 + job->num_ibs = parser.num_ibs; 894 + parser.ibs = NULL; 895 + parser.num_ibs = 0; 896 + 848 897 if (job->ibs[job->num_ibs - 1].user) { 849 - memcpy(&job->uf, &parser->uf, 850 - sizeof(struct amdgpu_user_fence)); 898 + job->uf = parser.uf; 851 899 job->ibs[job->num_ibs - 1].user = &job->uf; 900 + parser.uf.bo = NULL; 852 901 } 853 902 854 - job->free_job = amdgpu_cs_free_job; 855 - mutex_lock(&job->job_lock); 856 - r = amd_sched_entity_push_job(&job->base); 857 - if (r) { 858 - mutex_unlock(&job->job_lock); 903 + fence = amd_sched_fence_create(job->base.s_entity, 904 + parser.filp); 905 + if (!fence) { 906 + r = -ENOMEM; 859 907 amdgpu_cs_free_job(job); 860 908 kfree(job); 861 909 goto out; 862 910 } 863 - cs->out.handle = 864 - amdgpu_ctx_add_fence(parser->ctx, ring, 865 - &job->base.s_fence->base); 866 - parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; 911 + job->base.s_fence = fence; 912 + parser.fence = fence_get(&fence->base); 867 913 868 - list_sort(NULL, &parser->validated, cmp_size_smaller_first); 869 - ttm_eu_fence_buffer_objects(&parser->ticket, 870 - &parser->validated, 871 - &job->base.s_fence->base); 914 + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, 915 + &fence->base); 916 + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; 872 917 873 - mutex_unlock(&job->job_lock); 874 - amdgpu_cs_parser_fini_late(parser); 875 - mutex_unlock(&vm->mutex); 876 - return 0; 918 + trace_amdgpu_cs_ioctl(job); 919 + amd_sched_entity_push_job(&job->base); 920 + 921 + } else { 922 + struct amdgpu_fence *fence; 923 + 924 + r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, 925 + parser.filp); 926 + fence = parser.ibs[parser.num_ibs - 1].fence; 927 + parser.fence = fence_get(&fence->base); 928 + cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; 877 929 } 878 930 879 - cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; 880 931 out: 881 - amdgpu_cs_parser_fini(parser, r, reserved_buffers); 932 + amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 882 933 mutex_unlock(&vm->mutex); 883 934 r = amdgpu_cs_handle_lockup(adev, r); 884 935 return r;
+55 -48
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 47 47 * that the the relevant GPU caches have been flushed. 48 48 */ 49 49 50 + static struct kmem_cache *amdgpu_fence_slab; 51 + static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); 52 + 50 53 /** 51 54 * amdgpu_fence_write - write a fence value 52 55 * ··· 88 85 } 89 86 90 87 /** 91 - * amdgpu_fence_schedule_check - schedule lockup check 92 - * 93 - * @ring: pointer to struct amdgpu_ring 94 - * 95 - * Queues a delayed work item to check for lockups. 96 - */ 97 - static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) 98 - { 99 - /* 100 - * Do not reset the timer here with mod_delayed_work, 101 - * this can livelock in an interaction with TTM delayed destroy. 102 - */ 103 - queue_delayed_work(system_power_efficient_wq, 104 - &ring->fence_drv.lockup_work, 105 - AMDGPU_FENCE_JIFFIES_TIMEOUT); 106 - } 107 - 108 - /** 109 88 * amdgpu_fence_emit - emit a fence on the requested ring 110 89 * 111 90 * @ring: ring the fence is associated with ··· 103 118 struct amdgpu_device *adev = ring->adev; 104 119 105 120 /* we are protected by the ring emission mutex */ 106 - *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 121 + *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 107 122 if ((*fence) == NULL) { 108 123 return -ENOMEM; 109 124 } ··· 117 132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 118 133 (*fence)->seq, 119 134 AMDGPU_FENCE_FLAG_INT); 120 - trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 121 135 return 0; 136 + } 137 + 138 + /** 139 + * amdgpu_fence_schedule_fallback - schedule fallback check 140 + * 141 + * @ring: pointer to struct amdgpu_ring 142 + * 143 + * Start a timer as fallback to our interrupts. 144 + */ 145 + static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 146 + { 147 + mod_timer(&ring->fence_drv.fallback_timer, 148 + jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 122 149 } 123 150 124 151 /** ··· 199 202 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 200 203 201 204 if (seq < last_emitted) 202 - amdgpu_fence_schedule_check(ring); 205 + amdgpu_fence_schedule_fallback(ring); 203 206 204 207 return wake; 205 - } 206 - 207 - /** 208 - * amdgpu_fence_check_lockup - check for hardware lockup 209 - * 210 - * @work: delayed work item 211 - * 212 - * Checks for fence activity and if there is none probe 213 - * the hardware if a lockup occured. 214 - */ 215 - static void amdgpu_fence_check_lockup(struct work_struct *work) 216 - { 217 - struct amdgpu_fence_driver *fence_drv; 218 - struct amdgpu_ring *ring; 219 - 220 - fence_drv = container_of(work, struct amdgpu_fence_driver, 221 - lockup_work.work); 222 - ring = fence_drv->ring; 223 - 224 - if (amdgpu_fence_activity(ring)) 225 - wake_up_all(&ring->fence_drv.fence_queue); 226 208 } 227 209 228 210 /** ··· 217 241 { 218 242 if (amdgpu_fence_activity(ring)) 219 243 wake_up_all(&ring->fence_drv.fence_queue); 244 + } 245 + 246 + /** 247 + * amdgpu_fence_fallback - fallback for hardware interrupts 248 + * 249 + * @work: delayed work item 250 + * 251 + * Checks for fence activity. 252 + */ 253 + static void amdgpu_fence_fallback(unsigned long arg) 254 + { 255 + struct amdgpu_ring *ring = (void *)arg; 256 + 257 + amdgpu_fence_process(ring); 220 258 } 221 259 222 260 /** ··· 280 290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 281 291 return 0; 282 292 283 - amdgpu_fence_schedule_check(ring); 293 + amdgpu_fence_schedule_fallback(ring); 284 294 wait_event(ring->fence_drv.fence_queue, ( 285 295 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 286 296 ··· 481 491 atomic64_set(&ring->fence_drv.last_seq, 0); 482 492 ring->fence_drv.initialized = false; 483 493 484 - INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 485 - amdgpu_fence_check_lockup); 486 - ring->fence_drv.ring = ring; 494 + setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 495 + (unsigned long)ring); 487 496 488 497 init_waitqueue_head(&ring->fence_drv.fence_queue); 489 498 ··· 525 536 */ 526 537 int amdgpu_fence_driver_init(struct amdgpu_device *adev) 527 538 { 539 + if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { 540 + amdgpu_fence_slab = kmem_cache_create( 541 + "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 542 + SLAB_HWCACHE_ALIGN, NULL); 543 + if (!amdgpu_fence_slab) 544 + return -ENOMEM; 545 + } 528 546 if (amdgpu_debugfs_fence_init(adev)) 529 547 dev_err(adev->dev, "fence debugfs file creation failed\n"); 530 548 ··· 550 554 { 551 555 int i, r; 552 556 557 + if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) 558 + kmem_cache_destroy(amdgpu_fence_slab); 553 559 mutex_lock(&adev->ring_lock); 554 560 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 555 561 struct amdgpu_ring *ring = adev->rings[i]; 562 + 556 563 if (!ring || !ring->fence_drv.initialized) 557 564 continue; 558 565 r = amdgpu_fence_wait_empty(ring); ··· 567 568 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 568 569 ring->fence_drv.irq_type); 569 570 amd_sched_fini(&ring->sched); 571 + del_timer_sync(&ring->fence_drv.fallback_timer); 570 572 ring->fence_drv.initialized = false; 571 573 } 572 574 mutex_unlock(&adev->ring_lock); ··· 751 751 fence->fence_wake.func = amdgpu_fence_check_signaled; 752 752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 753 753 fence_get(f); 754 - amdgpu_fence_schedule_check(ring); 754 + if (!timer_pending(&ring->fence_drv.fallback_timer)) 755 + amdgpu_fence_schedule_fallback(ring); 755 756 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 756 757 return true; 758 + } 759 + 760 + static void amdgpu_fence_release(struct fence *f) 761 + { 762 + struct amdgpu_fence *fence = to_amdgpu_fence(f); 763 + kmem_cache_free(amdgpu_fence_slab, fence); 757 764 } 758 765 759 766 const struct fence_ops amdgpu_fence_ops = { ··· 769 762 .enable_signaling = amdgpu_fence_enable_signaling, 770 763 .signaled = amdgpu_fence_is_signaled, 771 764 .wait = fence_default_wait, 772 - .release = NULL, 765 + .release = amdgpu_fence_release, 773 766 }; 774 767 775 768 /*
+21 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 483 483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 484 484 goto error_unreserve; 485 485 } 486 + r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 487 + if (r) 488 + goto error_unreserve; 486 489 487 490 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 488 491 if (r) ··· 515 512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 516 513 struct amdgpu_bo *rbo; 517 514 struct amdgpu_bo_va *bo_va; 515 + struct ttm_validate_buffer tv, tv_pd; 516 + struct ww_acquire_ctx ticket; 517 + struct list_head list, duplicates; 518 518 uint32_t invalid_flags, va_flags = 0; 519 519 int r = 0; 520 520 ··· 555 549 return -ENOENT; 556 550 mutex_lock(&fpriv->vm.mutex); 557 551 rbo = gem_to_amdgpu_bo(gobj); 558 - r = amdgpu_bo_reserve(rbo, false); 552 + INIT_LIST_HEAD(&list); 553 + INIT_LIST_HEAD(&duplicates); 554 + tv.bo = &rbo->tbo; 555 + tv.shared = true; 556 + list_add(&tv.head, &list); 557 + 558 + if (args->operation == AMDGPU_VA_OP_MAP) { 559 + tv_pd.bo = &fpriv->vm.page_directory->tbo; 560 + tv_pd.shared = true; 561 + list_add(&tv_pd.head, &list); 562 + } 563 + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 559 564 if (r) { 560 565 mutex_unlock(&fpriv->vm.mutex); 561 566 drm_gem_object_unreference_unlocked(gobj); ··· 575 558 576 559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 577 560 if (!bo_va) { 578 - amdgpu_bo_unreserve(rbo); 561 + ttm_eu_backoff_reservation(&ticket, &list); 562 + drm_gem_object_unreference_unlocked(gobj); 579 563 mutex_unlock(&fpriv->vm.mutex); 580 564 return -ENOENT; 581 565 } ··· 599 581 default: 600 582 break; 601 583 } 602 - 584 + ttm_eu_backoff_reservation(&ticket, &list); 603 585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 604 586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 605 587 mutex_unlock(&fpriv->vm.mutex);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 62 62 int r; 63 63 64 64 if (size) { 65 - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 65 + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 66 66 &ib->sa_bo, size, 256); 67 67 if (r) { 68 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); ··· 216 216 } 217 217 218 218 if (ib->vm) 219 - amdgpu_vm_fence(adev, ib->vm, ib->fence); 219 + amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); 220 220 221 221 amdgpu_ring_unlock_commit(ring); 222 222 return 0;
+3 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 189 189 struct amdgpu_sa_manager *sa_manager); 190 190 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 191 191 struct amdgpu_sa_manager *sa_manager); 192 - int amdgpu_sa_bo_new(struct amdgpu_device *adev, 193 - struct amdgpu_sa_manager *sa_manager, 194 - struct amdgpu_sa_bo **sa_bo, 195 - unsigned size, unsigned align); 192 + int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 193 + struct amdgpu_sa_bo **sa_bo, 194 + unsigned size, unsigned align); 196 195 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 197 196 struct amdgpu_sa_bo **sa_bo, 198 197 struct fence *fence);
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
··· 311 311 return false; 312 312 } 313 313 314 - int amdgpu_sa_bo_new(struct amdgpu_device *adev, 315 - struct amdgpu_sa_manager *sa_manager, 314 + int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 316 315 struct amdgpu_sa_bo **sa_bo, 317 316 unsigned size, unsigned align) 318 317 {
+12 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 26 26 #include <linux/sched.h> 27 27 #include <drm/drmP.h> 28 28 #include "amdgpu.h" 29 + #include "amdgpu_trace.h" 29 30 30 31 static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31 32 { ··· 45 44 return NULL; 46 45 } 47 46 job = to_amdgpu_job(sched_job); 48 - mutex_lock(&job->job_lock); 49 - r = amdgpu_ib_schedule(job->adev, 50 - job->num_ibs, 51 - job->ibs, 52 - job->base.owner); 47 + trace_amdgpu_sched_run_job(job); 48 + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); 53 49 if (r) { 54 50 DRM_ERROR("Error scheduling IBs (%d)\n", r); 55 51 goto err; ··· 59 61 if (job->free_job) 60 62 job->free_job(job); 61 63 62 - mutex_unlock(&job->job_lock); 63 - fence_put(&job->base.s_fence->base); 64 64 kfree(job); 65 65 return fence ? &fence->base : NULL; 66 66 } ··· 84 88 return -ENOMEM; 85 89 job->base.sched = &ring->sched; 86 90 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 91 + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); 92 + if (!job->base.s_fence) { 93 + kfree(job); 94 + return -ENOMEM; 95 + } 96 + *f = fence_get(&job->base.s_fence->base); 97 + 87 98 job->adev = adev; 88 99 job->ibs = ibs; 89 100 job->num_ibs = num_ibs; 90 - job->base.owner = owner; 91 - mutex_init(&job->job_lock); 101 + job->owner = owner; 92 102 job->free_job = free_job; 93 - mutex_lock(&job->job_lock); 94 - r = amd_sched_entity_push_job(&job->base); 95 - if (r) { 96 - mutex_unlock(&job->job_lock); 97 - kfree(job); 98 - return r; 99 - } 100 - *f = fence_get(&job->base.s_fence->base); 101 - mutex_unlock(&job->job_lock); 103 + amd_sched_entity_push_job(&job->base); 102 104 } else { 103 105 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 104 106 if (r)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
··· 40 40 if (*semaphore == NULL) { 41 41 return -ENOMEM; 42 42 } 43 - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 43 + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 44 44 &(*semaphore)->sa_bo, 8, 8); 45 45 if (r) { 46 46 kfree(*semaphore);
+8 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 302 302 return -EINVAL; 303 303 } 304 304 305 - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || 306 - (count >= AMDGPU_NUM_SYNCS)) { 305 + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { 306 + r = fence_wait(&fence->base, true); 307 + if (r) 308 + return r; 309 + continue; 310 + } 311 + 312 + if (count >= AMDGPU_NUM_SYNCS) { 307 313 /* not enough room, wait manually */ 308 314 r = fence_wait(&fence->base, false); 309 315 if (r)
+51 -43
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 48 48 __entry->fences) 49 49 ); 50 50 51 + TRACE_EVENT(amdgpu_cs_ioctl, 52 + TP_PROTO(struct amdgpu_job *job), 53 + TP_ARGS(job), 54 + TP_STRUCT__entry( 55 + __field(struct amdgpu_device *, adev) 56 + __field(struct amd_sched_job *, sched_job) 57 + __field(struct amdgpu_ib *, ib) 58 + __field(struct fence *, fence) 59 + __field(char *, ring_name) 60 + __field(u32, num_ibs) 61 + ), 62 + 63 + TP_fast_assign( 64 + __entry->adev = job->adev; 65 + __entry->sched_job = &job->base; 66 + __entry->ib = job->ibs; 67 + __entry->fence = &job->base.s_fence->base; 68 + __entry->ring_name = job->ibs[0].ring->name; 69 + __entry->num_ibs = job->num_ibs; 70 + ), 71 + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 72 + __entry->adev, __entry->sched_job, __entry->ib, 73 + __entry->fence, __entry->ring_name, __entry->num_ibs) 74 + ); 75 + 76 + TRACE_EVENT(amdgpu_sched_run_job, 77 + TP_PROTO(struct amdgpu_job *job), 78 + TP_ARGS(job), 79 + TP_STRUCT__entry( 80 + __field(struct amdgpu_device *, adev) 81 + __field(struct amd_sched_job *, sched_job) 82 + __field(struct amdgpu_ib *, ib) 83 + __field(struct fence *, fence) 84 + __field(char *, ring_name) 85 + __field(u32, num_ibs) 86 + ), 87 + 88 + TP_fast_assign( 89 + __entry->adev = job->adev; 90 + __entry->sched_job = &job->base; 91 + __entry->ib = job->ibs; 92 + __entry->fence = &job->base.s_fence->base; 93 + __entry->ring_name = job->ibs[0].ring->name; 94 + __entry->num_ibs = job->num_ibs; 95 + ), 96 + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 97 + __entry->adev, __entry->sched_job, __entry->ib, 98 + __entry->fence, __entry->ring_name, __entry->num_ibs) 99 + ); 100 + 101 + 51 102 TRACE_EVENT(amdgpu_vm_grab_id, 52 103 TP_PROTO(unsigned vmid, int ring), 53 104 TP_ARGS(vmid, ring), ··· 245 194 __entry->bo = bo; 246 195 ), 247 196 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 248 - ); 249 - 250 - DECLARE_EVENT_CLASS(amdgpu_fence_request, 251 - 252 - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 253 - 254 - TP_ARGS(dev, ring, seqno), 255 - 256 - TP_STRUCT__entry( 257 - __field(u32, dev) 258 - __field(int, ring) 259 - __field(u32, seqno) 260 - ), 261 - 262 - TP_fast_assign( 263 - __entry->dev = dev->primary->index; 264 - __entry->ring = ring; 265 - __entry->seqno = seqno; 266 - ), 267 - 268 - TP_printk("dev=%u, ring=%d, seqno=%u", 269 - __entry->dev, __entry->ring, __entry->seqno) 270 - ); 271 - 272 - DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, 273 - 274 - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 275 - 276 - TP_ARGS(dev, ring, seqno) 277 - ); 278 - 279 - DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, 280 - 281 - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 282 - 283 - TP_ARGS(dev, ring, seqno) 284 - ); 285 - 286 - DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, 287 - 288 - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 289 - 290 - TP_ARGS(dev, ring, seqno) 291 197 ); 292 198 293 199 DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1073 1073 ret = drm_mm_dump_table(m, mm); 1074 1074 spin_unlock(&glob->lru_lock); 1075 1075 if (ttm_pl == TTM_PL_VRAM) 1076 - seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", 1076 + seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", 1077 1077 adev->mman.bdev.man[ttm_pl].size, 1078 - atomic64_read(&adev->vram_usage) >> 20, 1079 - atomic64_read(&adev->vram_vis_usage) >> 20); 1078 + (u64)atomic64_read(&adev->vram_usage) >> 20, 1079 + (u64)atomic64_read(&adev->vram_vis_usage) >> 20); 1080 1080 return ret; 1081 1081 } 1082 1082
+77 -61
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 143 143 unsigned i; 144 144 145 145 /* check if the id is still valid */ 146 - if (vm_id->id && vm_id->last_id_use && 147 - vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 148 - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 149 - return 0; 146 + if (vm_id->id) { 147 + unsigned id = vm_id->id; 148 + long owner; 149 + 150 + owner = atomic_long_read(&adev->vm_manager.ids[id].owner); 151 + if (owner == (long)vm) { 152 + trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 153 + return 0; 154 + } 150 155 } 151 156 152 157 /* we definately need to flush */ ··· 159 154 160 155 /* skip over VMID 0, since it is the system VM */ 161 156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 162 - struct fence *fence = adev->vm_manager.active[i]; 157 + struct fence *fence = adev->vm_manager.ids[i].active; 163 158 struct amdgpu_ring *fring; 164 159 165 160 if (fence == NULL) { ··· 181 176 if (choices[i]) { 182 177 struct fence *fence; 183 178 184 - fence = adev->vm_manager.active[choices[i]]; 179 + fence = adev->vm_manager.ids[choices[i]].active; 185 180 vm_id->id = choices[i]; 186 181 187 182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); ··· 212 207 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 213 208 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 214 209 struct fence *flushed_updates = vm_id->flushed_updates; 215 - bool is_earlier = false; 210 + bool is_later; 216 211 217 - if (flushed_updates && updates) { 218 - BUG_ON(flushed_updates->context != updates->context); 219 - is_earlier = (updates->seqno - flushed_updates->seqno <= 220 - INT_MAX) ? true : false; 221 - } 212 + if (!flushed_updates) 213 + is_later = true; 214 + else if (!updates) 215 + is_later = false; 216 + else 217 + is_later = fence_is_later(updates, flushed_updates); 222 218 223 - if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || 224 - is_earlier) { 225 - 219 + if (pd_addr != vm_id->pd_gpu_addr || is_later) { 226 220 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 227 - if (is_earlier) { 221 + if (is_later) { 228 222 vm_id->flushed_updates = fence_get(updates); 229 223 fence_put(flushed_updates); 230 224 } 231 - if (!flushed_updates) 232 - vm_id->flushed_updates = fence_get(updates); 233 225 vm_id->pd_gpu_addr = pd_addr; 234 226 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 235 227 } ··· 246 244 */ 247 245 void amdgpu_vm_fence(struct amdgpu_device *adev, 248 246 struct amdgpu_vm *vm, 249 - struct amdgpu_fence *fence) 247 + struct fence *fence) 250 248 { 251 - unsigned ridx = fence->ring->idx; 252 - unsigned vm_id = vm->ids[ridx].id; 249 + struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); 250 + unsigned vm_id = vm->ids[ring->idx].id; 253 251 254 - fence_put(adev->vm_manager.active[vm_id]); 255 - adev->vm_manager.active[vm_id] = fence_get(&fence->base); 256 - 257 - fence_put(vm->ids[ridx].last_id_use); 258 - vm->ids[ridx].last_id_use = fence_get(&fence->base); 252 + fence_put(adev->vm_manager.ids[vm_id].active); 253 + adev->vm_manager.ids[vm_id].active = fence_get(fence); 254 + atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); 259 255 } 260 256 261 257 /** ··· 332 332 * 333 333 * @adev: amdgpu_device pointer 334 334 * @bo: bo to clear 335 + * 336 + * need to reserve bo first before calling it. 335 337 */ 336 338 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 337 339 struct amdgpu_bo *bo) ··· 345 343 uint64_t addr; 346 344 int r; 347 345 348 - r = amdgpu_bo_reserve(bo, false); 349 - if (r) 350 - return r; 351 - 352 346 r = reservation_object_reserve_shared(bo->tbo.resv); 353 347 if (r) 354 348 return r; 355 349 356 350 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 357 351 if (r) 358 - goto error_unreserve; 352 + goto error; 359 353 360 354 addr = amdgpu_bo_gpu_offset(bo); 361 355 entries = amdgpu_bo_size(bo) / 8; 362 356 363 357 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 364 358 if (!ib) 365 - goto error_unreserve; 359 + goto error; 366 360 367 361 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 368 362 if (r) ··· 376 378 if (!r) 377 379 amdgpu_bo_fence(bo, fence, true); 378 380 fence_put(fence); 379 - if (amdgpu_enable_scheduler) { 380 - amdgpu_bo_unreserve(bo); 381 + if (amdgpu_enable_scheduler) 381 382 return 0; 382 - } 383 + 383 384 error_free: 384 385 amdgpu_ib_free(adev, ib); 385 386 kfree(ib); 386 387 387 - error_unreserve: 388 - amdgpu_bo_unreserve(bo); 388 + error: 389 389 return r; 390 390 } 391 391 ··· 985 989 * Add a mapping of the BO at the specefied addr into the VM. 986 990 * Returns 0 for success, error for failure. 987 991 * 988 - * Object has to be reserved and gets unreserved by this function! 992 + * Object has to be reserved and unreserved outside! 989 993 */ 990 994 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 991 995 struct amdgpu_bo_va *bo_va, ··· 1001 1005 1002 1006 /* validate the parameters */ 1003 1007 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1004 - size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1005 - amdgpu_bo_unreserve(bo_va->bo); 1008 + size == 0 || size & AMDGPU_GPU_PAGE_MASK) 1006 1009 return -EINVAL; 1007 - } 1008 1010 1009 1011 /* make sure object fit at this offset */ 1010 1012 eaddr = saddr + size; 1011 - if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1012 - amdgpu_bo_unreserve(bo_va->bo); 1013 + if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) 1013 1014 return -EINVAL; 1014 - } 1015 1015 1016 1016 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1017 1017 if (last_pfn > adev->vm_manager.max_pfn) { 1018 1018 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1019 1019 last_pfn, adev->vm_manager.max_pfn); 1020 - amdgpu_bo_unreserve(bo_va->bo); 1021 1020 return -EINVAL; 1022 1021 } 1023 1022 1024 1023 saddr /= AMDGPU_GPU_PAGE_SIZE; 1025 1024 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1026 1025 1026 + spin_lock(&vm->it_lock); 1027 1027 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1028 + spin_unlock(&vm->it_lock); 1028 1029 if (it) { 1029 1030 struct amdgpu_bo_va_mapping *tmp; 1030 1031 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); ··· 1029 1036 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1030 1037 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1031 1038 tmp->it.start, tmp->it.last + 1); 1032 - amdgpu_bo_unreserve(bo_va->bo); 1033 1039 r = -EINVAL; 1034 1040 goto error; 1035 1041 } 1036 1042 1037 1043 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1038 1044 if (!mapping) { 1039 - amdgpu_bo_unreserve(bo_va->bo); 1040 1045 r = -ENOMEM; 1041 1046 goto error; 1042 1047 } ··· 1046 1055 mapping->flags = flags; 1047 1056 1048 1057 list_add(&mapping->list, &bo_va->invalids); 1058 + spin_lock(&vm->it_lock); 1049 1059 interval_tree_insert(&mapping->it, &vm->va); 1060 + spin_unlock(&vm->it_lock); 1050 1061 trace_amdgpu_vm_bo_map(bo_va, mapping); 1051 1062 1052 1063 /* Make sure the page tables are allocated */ ··· 1060 1067 if (eaddr > vm->max_pde_used) 1061 1068 vm->max_pde_used = eaddr; 1062 1069 1063 - amdgpu_bo_unreserve(bo_va->bo); 1064 - 1065 1070 /* walk over the address space and allocate the page tables */ 1066 1071 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1067 1072 struct reservation_object *resv = vm->page_directory->tbo.resv; ··· 1068 1077 if (vm->page_tables[pt_idx].bo) 1069 1078 continue; 1070 1079 1071 - ww_mutex_lock(&resv->lock, NULL); 1072 1080 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1073 1081 AMDGPU_GPU_PAGE_SIZE, true, 1074 1082 AMDGPU_GEM_DOMAIN_VRAM, 1075 1083 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1076 1084 NULL, resv, &pt); 1077 - ww_mutex_unlock(&resv->lock); 1078 1085 if (r) 1079 1086 goto error_free; 1080 1087 ··· 1090 1101 1091 1102 error_free: 1092 1103 list_del(&mapping->list); 1104 + spin_lock(&vm->it_lock); 1093 1105 interval_tree_remove(&mapping->it, &vm->va); 1106 + spin_unlock(&vm->it_lock); 1094 1107 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1095 1108 kfree(mapping); 1096 1109 ··· 1110 1119 * Remove a mapping of the BO at the specefied addr from the VM. 1111 1120 * Returns 0 for success, error for failure. 1112 1121 * 1113 - * Object has to be reserved and gets unreserved by this function! 1122 + * Object has to be reserved and unreserved outside! 1114 1123 */ 1115 1124 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1116 1125 struct amdgpu_bo_va *bo_va, ··· 1135 1144 break; 1136 1145 } 1137 1146 1138 - if (&mapping->list == &bo_va->invalids) { 1139 - amdgpu_bo_unreserve(bo_va->bo); 1147 + if (&mapping->list == &bo_va->invalids) 1140 1148 return -ENOENT; 1141 - } 1142 1149 } 1143 1150 1144 1151 list_del(&mapping->list); 1152 + spin_lock(&vm->it_lock); 1145 1153 interval_tree_remove(&mapping->it, &vm->va); 1154 + spin_unlock(&vm->it_lock); 1146 1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1147 1156 1148 1157 if (valid) 1149 1158 list_add(&mapping->list, &vm->freed); 1150 1159 else 1151 1160 kfree(mapping); 1152 - amdgpu_bo_unreserve(bo_va->bo); 1153 1161 1154 1162 return 0; 1155 1163 } ··· 1177 1187 1178 1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1179 1189 list_del(&mapping->list); 1190 + spin_lock(&vm->it_lock); 1180 1191 interval_tree_remove(&mapping->it, &vm->va); 1192 + spin_unlock(&vm->it_lock); 1181 1193 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1182 1194 list_add(&mapping->list, &vm->freed); 1183 1195 } 1184 1196 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1185 1197 list_del(&mapping->list); 1198 + spin_lock(&vm->it_lock); 1186 1199 interval_tree_remove(&mapping->it, &vm->va); 1200 + spin_unlock(&vm->it_lock); 1187 1201 kfree(mapping); 1188 1202 } 1189 1203 ··· 1235 1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1236 1242 vm->ids[i].id = 0; 1237 1243 vm->ids[i].flushed_updates = NULL; 1238 - vm->ids[i].last_id_use = NULL; 1239 1244 } 1240 1245 mutex_init(&vm->mutex); 1241 1246 vm->va = RB_ROOT; ··· 1242 1249 INIT_LIST_HEAD(&vm->invalidated); 1243 1250 INIT_LIST_HEAD(&vm->cleared); 1244 1251 INIT_LIST_HEAD(&vm->freed); 1245 - 1252 + spin_lock_init(&vm->it_lock); 1246 1253 pd_size = amdgpu_vm_directory_size(adev); 1247 1254 pd_entries = amdgpu_vm_num_pdes(adev); 1248 1255 ··· 1262 1269 NULL, NULL, &vm->page_directory); 1263 1270 if (r) 1264 1271 return r; 1265 - 1272 + r = amdgpu_bo_reserve(vm->page_directory, false); 1273 + if (r) { 1274 + amdgpu_bo_unref(&vm->page_directory); 1275 + vm->page_directory = NULL; 1276 + return r; 1277 + } 1266 1278 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1279 + amdgpu_bo_unreserve(vm->page_directory); 1267 1280 if (r) { 1268 1281 amdgpu_bo_unref(&vm->page_directory); 1269 1282 vm->page_directory = NULL; ··· 1312 1313 1313 1314 amdgpu_bo_unref(&vm->page_directory); 1314 1315 fence_put(vm->page_directory_fence); 1315 - 1316 1316 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1317 + unsigned id = vm->ids[i].id; 1318 + 1319 + atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, 1320 + (long)vm, 0); 1317 1321 fence_put(vm->ids[i].flushed_updates); 1318 - fence_put(vm->ids[i].last_id_use); 1319 1322 } 1320 1323 1321 1324 mutex_destroy(&vm->mutex); 1325 + } 1326 + 1327 + /** 1328 + * amdgpu_vm_manager_fini - cleanup VM manager 1329 + * 1330 + * @adev: amdgpu_device pointer 1331 + * 1332 + * Cleanup the VM manager and free resources. 1333 + */ 1334 + void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 1335 + { 1336 + unsigned i; 1337 + 1338 + for (i = 0; i < AMDGPU_NUM_VM; ++i) 1339 + fence_put(adev->vm_manager.ids[i].active); 1322 1340 }
+4 -4
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
··· 6569 6569 switch (state) { 6570 6570 case AMDGPU_IRQ_STATE_DISABLE: 6571 6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6572 - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6572 + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6573 6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6574 6574 break; 6575 6575 case AMDGPU_IRQ_STATE_ENABLE: 6576 6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6577 - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6577 + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6578 6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6579 6579 break; 6580 6580 default: ··· 6586 6586 switch (state) { 6587 6587 case AMDGPU_IRQ_STATE_DISABLE: 6588 6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6589 - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6589 + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6590 6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6591 6591 break; 6592 6592 case AMDGPU_IRQ_STATE_ENABLE: 6593 6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6594 - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6594 + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6595 6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6596 6596 break; 6597 6597 default:
+295 -7
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 268 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 269 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 270 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 271 - mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 272 271 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 273 272 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 274 273 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, ··· 295 296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 296 297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 297 298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 298 - mmPCIE_INDEX, 0xffffffff, 0x0140001c, 299 - mmPCIE_DATA, 0x000f0000, 0x00000000, 300 - mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 301 - mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 302 299 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 303 300 }; 304 301 ··· 995 1000 adev->gfx.config.max_cu_per_sh = 16; 996 1001 adev->gfx.config.max_sh_per_se = 1; 997 1002 adev->gfx.config.max_backends_per_se = 4; 998 - adev->gfx.config.max_texture_channel_caches = 8; 1003 + adev->gfx.config.max_texture_channel_caches = 16; 999 1004 adev->gfx.config.max_gprs = 256; 1000 1005 adev->gfx.config.max_gs_threads = 32; 1001 1006 adev->gfx.config.max_hw_contexts = 8; ··· 1608 1613 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1609 1614 } 1610 1615 case CHIP_FIJI: 1616 + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1617 + switch (reg_offset) { 1618 + case 0: 1619 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1620 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1621 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1622 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1623 + break; 1624 + case 1: 1625 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1626 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1627 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1628 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1629 + break; 1630 + case 2: 1631 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1632 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1633 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1634 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1635 + break; 1636 + case 3: 1637 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1638 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1639 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1640 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1641 + break; 1642 + case 4: 1643 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1644 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1645 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1646 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1647 + break; 1648 + case 5: 1649 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1650 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1651 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1652 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1653 + break; 1654 + case 6: 1655 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1656 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1657 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1658 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1659 + break; 1660 + case 7: 1661 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1662 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1663 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 1664 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1665 + break; 1666 + case 8: 1667 + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1668 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1669 + break; 1670 + case 9: 1671 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1672 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1673 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1674 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1675 + break; 1676 + case 10: 1677 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1678 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1679 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1680 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1681 + break; 1682 + case 11: 1683 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1684 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1685 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1686 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1687 + break; 1688 + case 12: 1689 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1690 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1691 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1692 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1693 + break; 1694 + case 13: 1695 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1696 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1697 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1698 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1699 + break; 1700 + case 14: 1701 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1702 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1703 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1704 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1705 + break; 1706 + case 15: 1707 + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1708 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1709 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1710 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1711 + break; 1712 + case 16: 1713 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1714 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1715 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1716 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1717 + break; 1718 + case 17: 1719 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1720 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1721 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1722 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1723 + break; 1724 + case 18: 1725 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1726 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1727 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1728 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1729 + break; 1730 + case 19: 1731 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1732 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1733 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1734 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1735 + break; 1736 + case 20: 1737 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1738 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1739 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1740 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1741 + break; 1742 + case 21: 1743 + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1744 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1745 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1746 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1747 + break; 1748 + case 22: 1749 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1750 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1751 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1752 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1753 + break; 1754 + case 23: 1755 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1756 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1757 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1758 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1759 + break; 1760 + case 24: 1761 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1762 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1763 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1764 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1765 + break; 1766 + case 25: 1767 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1768 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1769 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1770 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1771 + break; 1772 + case 26: 1773 + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1774 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1775 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1776 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1777 + break; 1778 + case 27: 1779 + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1780 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1781 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1782 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1783 + break; 1784 + case 28: 1785 + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1786 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1787 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1788 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1789 + break; 1790 + case 29: 1791 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1792 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1793 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1794 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1795 + break; 1796 + case 30: 1797 + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1798 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1799 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1800 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1801 + break; 1802 + default: 1803 + gb_tile_moden = 0; 1804 + break; 1805 + } 1806 + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 1807 + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 1808 + } 1809 + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 1810 + switch (reg_offset) { 1811 + case 0: 1812 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1813 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1814 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1815 + NUM_BANKS(ADDR_SURF_8_BANK)); 1816 + break; 1817 + case 1: 1818 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1819 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1820 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1821 + NUM_BANKS(ADDR_SURF_8_BANK)); 1822 + break; 1823 + case 2: 1824 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1825 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1826 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1827 + NUM_BANKS(ADDR_SURF_8_BANK)); 1828 + break; 1829 + case 3: 1830 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1831 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1832 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1833 + NUM_BANKS(ADDR_SURF_8_BANK)); 1834 + break; 1835 + case 4: 1836 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1837 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1838 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1839 + NUM_BANKS(ADDR_SURF_8_BANK)); 1840 + break; 1841 + case 5: 1842 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1843 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1844 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1845 + NUM_BANKS(ADDR_SURF_8_BANK)); 1846 + break; 1847 + case 6: 1848 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1849 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1850 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1851 + NUM_BANKS(ADDR_SURF_8_BANK)); 1852 + break; 1853 + case 8: 1854 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1855 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1856 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1857 + NUM_BANKS(ADDR_SURF_8_BANK)); 1858 + break; 1859 + case 9: 1860 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1861 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1862 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1863 + NUM_BANKS(ADDR_SURF_8_BANK)); 1864 + break; 1865 + case 10: 1866 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1867 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1868 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1869 + NUM_BANKS(ADDR_SURF_8_BANK)); 1870 + break; 1871 + case 11: 1872 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1873 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1874 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1875 + NUM_BANKS(ADDR_SURF_8_BANK)); 1876 + break; 1877 + case 12: 1878 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1879 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1880 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1881 + NUM_BANKS(ADDR_SURF_8_BANK)); 1882 + break; 1883 + case 13: 1884 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1885 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1886 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1887 + NUM_BANKS(ADDR_SURF_8_BANK)); 1888 + break; 1889 + case 14: 1890 + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1891 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1892 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1893 + NUM_BANKS(ADDR_SURF_4_BANK)); 1894 + break; 1895 + case 7: 1896 + /* unused idx */ 1897 + continue; 1898 + default: 1899 + gb_tile_moden = 0; 1900 + break; 1901 + } 1902 + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; 1903 + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1904 + } 1905 + break; 1611 1906 case CHIP_TONGA: 1612 1907 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1613 1908 switch (reg_offset) { ··· 3256 2971 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3257 2972 switch (adev->asic_type) { 3258 2973 case CHIP_TONGA: 3259 - case CHIP_FIJI: 3260 2974 amdgpu_ring_write(ring, 0x16000012); 3261 2975 amdgpu_ring_write(ring, 0x0000002A); 2976 + break; 2977 + case CHIP_FIJI: 2978 + amdgpu_ring_write(ring, 0x3a00161a); 2979 + amdgpu_ring_write(ring, 0x0000002e); 3262 2980 break; 3263 2981 case CHIP_TOPAZ: 3264 2982 case CHIP_CARRIZO:
+4 -7
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 40 40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 41 41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 42 42 43 - MODULE_FIRMWARE("radeon/boniare_mc.bin"); 43 + MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 44 44 MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 45 45 46 46 /** ··· 501 501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 502 502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 503 503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 504 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 504 505 WREG32(mmVM_L2_CNTL, tmp); 505 506 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 506 507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); ··· 961 960 962 961 static int gmc_v7_0_sw_fini(void *handle) 963 962 { 964 - int i; 965 963 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 966 964 967 965 if (adev->vm_manager.enabled) { 968 - for (i = 0; i < AMDGPU_NUM_VM; ++i) 969 - fence_put(adev->vm_manager.active[i]); 966 + amdgpu_vm_manager_fini(adev); 970 967 gmc_v7_0_vm_fini(adev); 971 968 adev->vm_manager.enabled = false; 972 969 } ··· 1009 1010 1010 1011 static int gmc_v7_0_suspend(void *handle) 1011 1012 { 1012 - int i; 1013 1013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1014 1014 1015 1015 if (adev->vm_manager.enabled) { 1016 - for (i = 0; i < AMDGPU_NUM_VM; ++i) 1017 - fence_put(adev->vm_manager.active[i]); 1016 + amdgpu_vm_manager_fini(adev); 1018 1017 gmc_v7_0_vm_fini(adev); 1019 1018 adev->vm_manager.enabled = false; 1020 1019 }
+3 -6
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 629 629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 630 630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 631 631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 632 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 632 633 WREG32(mmVM_L2_CNTL, tmp); 633 634 tmp = RREG32(mmVM_L2_CNTL2); 634 635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); ··· 980 979 981 980 static int gmc_v8_0_sw_fini(void *handle) 982 981 { 983 - int i; 984 982 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 985 983 986 984 if (adev->vm_manager.enabled) { 987 - for (i = 0; i < AMDGPU_NUM_VM; ++i) 988 - fence_put(adev->vm_manager.active[i]); 985 + amdgpu_vm_manager_fini(adev); 989 986 gmc_v8_0_vm_fini(adev); 990 987 adev->vm_manager.enabled = false; 991 988 } ··· 1030 1031 1031 1032 static int gmc_v8_0_suspend(void *handle) 1032 1033 { 1033 - int i; 1034 1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1035 1035 1036 1036 if (adev->vm_manager.enabled) { 1037 - for (i = 0; i < AMDGPU_NUM_VM; ++i) 1038 - fence_put(adev->vm_manager.active[i]); 1037 + amdgpu_vm_manager_fini(adev); 1039 1038 gmc_v8_0_vm_fini(adev); 1040 1039 adev->vm_manager.enabled = false; 1041 1040 }
+21 -3
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
··· 16 16 TP_ARGS(sched_job), 17 17 TP_STRUCT__entry( 18 18 __field(struct amd_sched_entity *, entity) 19 + __field(struct amd_sched_job *, sched_job) 20 + __field(struct fence *, fence) 19 21 __field(const char *, name) 20 22 __field(u32, job_count) 21 23 __field(int, hw_job_count) ··· 25 23 26 24 TP_fast_assign( 27 25 __entry->entity = sched_job->s_entity; 26 + __entry->sched_job = sched_job; 27 + __entry->fence = &sched_job->s_fence->base; 28 28 __entry->name = sched_job->sched->name; 29 29 __entry->job_count = kfifo_len( 30 30 &sched_job->s_entity->job_queue) / sizeof(sched_job); 31 31 __entry->hw_job_count = atomic_read( 32 32 &sched_job->sched->hw_rq_count); 33 33 ), 34 - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", 35 - __entry->entity, __entry->name, __entry->job_count, 36 - __entry->hw_job_count) 34 + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", 35 + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, 36 + __entry->job_count, __entry->hw_job_count) 37 37 ); 38 + 39 + TRACE_EVENT(amd_sched_process_job, 40 + TP_PROTO(struct amd_sched_fence *fence), 41 + TP_ARGS(fence), 42 + TP_STRUCT__entry( 43 + __field(struct fence *, fence) 44 + ), 45 + 46 + TP_fast_assign( 47 + __entry->fence = &fence->base; 48 + ), 49 + TP_printk("fence=%p signaled", __entry->fence) 50 + ); 51 + 38 52 #endif 39 53 40 54 /* This part must be outside protection */
+14 -10
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 34 34 amd_sched_entity_pop_job(struct amd_sched_entity *entity); 35 35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 36 36 37 + struct kmem_cache *sched_fence_slab; 38 + atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); 39 + 37 40 /* Initialize a given run queue struct */ 38 41 static void amd_sched_rq_init(struct amd_sched_rq *rq) 39 42 { ··· 276 273 * 277 274 * Returns 0 for success, negative error code otherwise. 278 275 */ 279 - int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 276 + void amd_sched_entity_push_job(struct amd_sched_job *sched_job) 280 277 { 281 278 struct amd_sched_entity *entity = sched_job->s_entity; 282 - struct amd_sched_fence *fence = amd_sched_fence_create( 283 - entity, sched_job->owner); 284 - 285 - if (!fence) 286 - return -ENOMEM; 287 - 288 - fence_get(&fence->base); 289 - sched_job->s_fence = fence; 290 279 291 280 wait_event(entity->sched->job_scheduled, 292 281 amd_sched_entity_in(sched_job)); 293 282 trace_amd_sched_job(sched_job); 294 - return 0; 295 283 } 296 284 297 285 /** ··· 337 343 list_del_init(&s_fence->list); 338 344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 339 345 } 346 + trace_amd_sched_process_job(s_fence); 340 347 fence_put(&s_fence->base); 341 348 wake_up_interruptible(&sched->wake_up_worker); 342 349 } ··· 445 450 init_waitqueue_head(&sched->wake_up_worker); 446 451 init_waitqueue_head(&sched->job_scheduled); 447 452 atomic_set(&sched->hw_rq_count, 0); 453 + if (atomic_inc_return(&sched_fence_slab_ref) == 1) { 454 + sched_fence_slab = kmem_cache_create( 455 + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, 456 + SLAB_HWCACHE_ALIGN, NULL); 457 + if (!sched_fence_slab) 458 + return -ENOMEM; 459 + } 448 460 449 461 /* Each scheduler will run on a seperate kernel thread */ 450 462 sched->thread = kthread_run(amd_sched_main, sched, sched->name); ··· 472 470 { 473 471 if (sched->thread) 474 472 kthread_stop(sched->thread); 473 + if (atomic_dec_and_test(&sched_fence_slab_ref)) 474 + kmem_cache_destroy(sched_fence_slab); 475 475 }
+4 -2
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
··· 30 30 struct amd_gpu_scheduler; 31 31 struct amd_sched_rq; 32 32 33 + extern struct kmem_cache *sched_fence_slab; 34 + extern atomic_t sched_fence_slab_ref; 35 + 33 36 /** 34 37 * A scheduler entity is a wrapper around a job queue or a group 35 38 * of other entities. Entities take turns emitting jobs from their ··· 79 76 struct amd_gpu_scheduler *sched; 80 77 struct amd_sched_entity *s_entity; 81 78 struct amd_sched_fence *s_fence; 82 - void *owner; 83 79 }; 84 80 85 81 extern const struct fence_ops amd_sched_fence_ops; ··· 130 128 uint32_t jobs); 131 129 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 132 130 struct amd_sched_entity *entity); 133 - int amd_sched_entity_push_job(struct amd_sched_job *sched_job); 131 + void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 134 132 135 133 struct amd_sched_fence *amd_sched_fence_create( 136 134 struct amd_sched_entity *s_entity, void *owner);
+8 -2
drivers/gpu/drm/amd/scheduler/sched_fence.c
··· 32 32 struct amd_sched_fence *fence = NULL; 33 33 unsigned seq; 34 34 35 - fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 35 + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); 36 36 if (fence == NULL) 37 37 return NULL; 38 38 fence->owner = owner; ··· 71 71 return true; 72 72 } 73 73 74 + static void amd_sched_fence_release(struct fence *f) 75 + { 76 + struct amd_sched_fence *fence = to_amd_sched_fence(f); 77 + kmem_cache_free(sched_fence_slab, fence); 78 + } 79 + 74 80 const struct fence_ops amd_sched_fence_ops = { 75 81 .get_driver_name = amd_sched_fence_get_driver_name, 76 82 .get_timeline_name = amd_sched_fence_get_timeline_name, 77 83 .enable_signaling = amd_sched_fence_enable_signaling, 78 84 .signaled = NULL, 79 85 .wait = fence_default_wait, 80 - .release = NULL, 86 + .release = amd_sched_fence_release, 81 87 };
+42 -19
drivers/gpu/drm/drm_atomic.c
··· 1432 1432 return ret; 1433 1433 } 1434 1434 1435 + /** 1436 + * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. 1437 + * 1438 + * @dev: drm device to check. 1439 + * @plane_mask: plane mask for planes that were updated. 1440 + * @ret: return value, can be -EDEADLK for a retry. 1441 + * 1442 + * Before doing an update plane->old_fb is set to plane->fb, 1443 + * but before dropping the locks old_fb needs to be set to NULL 1444 + * and plane->fb updated. This is a common operation for each 1445 + * atomic update, so this call is split off as a helper. 1446 + */ 1447 + void drm_atomic_clean_old_fb(struct drm_device *dev, 1448 + unsigned plane_mask, 1449 + int ret) 1450 + { 1451 + struct drm_plane *plane; 1452 + 1453 + /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1454 + * locks (ie. while it is still safe to deref plane->state). We 1455 + * need to do this here because the driver entry points cannot 1456 + * distinguish between legacy and atomic ioctls. 1457 + */ 1458 + drm_for_each_plane_mask(plane, dev, plane_mask) { 1459 + if (ret == 0) { 1460 + struct drm_framebuffer *new_fb = plane->state->fb; 1461 + if (new_fb) 1462 + drm_framebuffer_reference(new_fb); 1463 + plane->fb = new_fb; 1464 + plane->crtc = plane->state->crtc; 1465 + 1466 + if (plane->old_fb) 1467 + drm_framebuffer_unreference(plane->old_fb); 1468 + } 1469 + plane->old_fb = NULL; 1470 + } 1471 + } 1472 + EXPORT_SYMBOL(drm_atomic_clean_old_fb); 1473 + 1435 1474 int drm_mode_atomic_ioctl(struct drm_device *dev, 1436 1475 void *data, struct drm_file *file_priv) 1437 1476 { ··· 1485 1446 struct drm_plane *plane; 1486 1447 struct drm_crtc *crtc; 1487 1448 struct drm_crtc_state *crtc_state; 1488 - unsigned plane_mask = 0; 1449 + unsigned plane_mask; 1489 1450 int ret = 0; 1490 1451 unsigned int i, j; 1491 1452 ··· 1525 1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1526 1487 1527 1488 retry: 1489 + plane_mask = 0; 1528 1490 copied_objs = 0; 1529 1491 copied_props = 0; 1530 1492 ··· 1616 1576 } 1617 1577 1618 1578 out: 1619 - /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1620 - * locks (ie. while it is still safe to deref plane->state). We 1621 - * need to do this here because the driver entry points cannot 1622 - * distinguish between legacy and atomic ioctls. 1623 - */ 1624 - drm_for_each_plane_mask(plane, dev, plane_mask) { 1625 - if (ret == 0) { 1626 - struct drm_framebuffer *new_fb = plane->state->fb; 1627 - if (new_fb) 1628 - drm_framebuffer_reference(new_fb); 1629 - plane->fb = new_fb; 1630 - plane->crtc = plane->state->crtc; 1631 - 1632 - if (plane->old_fb) 1633 - drm_framebuffer_unreference(plane->old_fb); 1634 - } 1635 - plane->old_fb = NULL; 1636 - } 1579 + drm_atomic_clean_old_fb(dev, plane_mask, ret); 1637 1580 1638 1581 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1639 1582 /*
+20 -9
drivers/gpu/drm/drm_atomic_helper.c
··· 210 210 return -EINVAL; 211 211 } 212 212 213 + if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) { 214 + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n", 215 + new_encoder->base.id, 216 + new_encoder->name, 217 + connector_state->crtc->base.id); 218 + return -EINVAL; 219 + } 220 + 213 221 if (new_encoder == connector_state->best_encoder) { 214 222 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 215 223 connector->base.id, ··· 1561 1553 goto fail; 1562 1554 } 1563 1555 1556 + if (plane_state->crtc && (plane == plane->crtc->cursor)) 1557 + plane_state->state->legacy_cursor_update = true; 1558 + 1564 1559 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 1565 1560 if (ret != 0) 1566 1561 goto fail; ··· 1615 1604 plane_state->src_y = 0; 1616 1605 plane_state->src_h = 0; 1617 1606 plane_state->src_w = 0; 1618 - 1619 - if (plane->crtc && (plane == plane->crtc->cursor)) 1620 - plane_state->state->legacy_cursor_update = true; 1621 1607 1622 1608 return 0; 1623 1609 } ··· 1749 1741 struct drm_crtc_state *crtc_state; 1750 1742 struct drm_plane_state *primary_state; 1751 1743 struct drm_crtc *crtc = set->crtc; 1744 + int hdisplay, vdisplay; 1752 1745 int ret; 1753 1746 1754 1747 crtc_state = drm_atomic_get_crtc_state(state, crtc); ··· 1792 1783 if (ret != 0) 1793 1784 return ret; 1794 1785 1786 + drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); 1787 + 1795 1788 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1796 1789 primary_state->crtc_x = 0; 1797 1790 primary_state->crtc_y = 0; 1798 - primary_state->crtc_h = set->mode->vdisplay; 1799 - primary_state->crtc_w = set->mode->hdisplay; 1791 + primary_state->crtc_h = vdisplay; 1792 + primary_state->crtc_w = hdisplay; 1800 1793 primary_state->src_x = set->x << 16; 1801 1794 primary_state->src_y = set->y << 16; 1802 1795 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 1803 - primary_state->src_h = set->mode->hdisplay << 16; 1804 - primary_state->src_w = set->mode->vdisplay << 16; 1796 + primary_state->src_h = hdisplay << 16; 1797 + primary_state->src_w = vdisplay << 16; 1805 1798 } else { 1806 - primary_state->src_h = set->mode->vdisplay << 16; 1807 - primary_state->src_w = set->mode->hdisplay << 16; 1799 + primary_state->src_h = vdisplay << 16; 1800 + primary_state->src_w = hdisplay << 16; 1808 1801 } 1809 1802 1810 1803 commit:
+14 -37
drivers/gpu/drm/drm_fb_helper.c
··· 342 342 struct drm_plane *plane; 343 343 struct drm_atomic_state *state; 344 344 int i, ret; 345 + unsigned plane_mask; 345 346 346 347 state = drm_atomic_state_alloc(dev); 347 348 if (!state) ··· 350 349 351 350 state->acquire_ctx = dev->mode_config.acquire_ctx; 352 351 retry: 352 + plane_mask = 0; 353 353 drm_for_each_plane(plane, dev) { 354 354 struct drm_plane_state *plane_state; 355 - 356 - plane->old_fb = plane->fb; 357 355 358 356 plane_state = drm_atomic_get_plane_state(state, plane); 359 357 if (IS_ERR(plane_state)) { ··· 361 361 } 362 362 363 363 plane_state->rotation = BIT(DRM_ROTATE_0); 364 + 365 + plane->old_fb = plane->fb; 366 + plane_mask |= 1 << drm_plane_index(plane); 364 367 365 368 /* disable non-primary: */ 366 369 if (plane->type == DRM_PLANE_TYPE_PRIMARY) ··· 385 382 ret = drm_atomic_commit(state); 386 383 387 384 fail: 388 - drm_for_each_plane(plane, dev) { 389 - if (ret == 0) { 390 - struct drm_framebuffer *new_fb = plane->state->fb; 391 - if (new_fb) 392 - drm_framebuffer_reference(new_fb); 393 - plane->fb = new_fb; 394 - plane->crtc = plane->state->crtc; 395 - 396 - if (plane->old_fb) 397 - drm_framebuffer_unreference(plane->old_fb); 398 - } 399 - plane->old_fb = NULL; 400 - } 385 + drm_atomic_clean_old_fb(dev, plane_mask, ret); 401 386 402 387 if (ret == -EDEADLK) 403 388 goto backoff; ··· 1227 1236 struct drm_fb_helper *fb_helper = info->par; 1228 1237 struct drm_device *dev = fb_helper->dev; 1229 1238 struct drm_atomic_state *state; 1239 + struct drm_plane *plane; 1230 1240 int i, ret; 1241 + unsigned plane_mask; 1231 1242 1232 1243 state = drm_atomic_state_alloc(dev); 1233 1244 if (!state) ··· 1237 1244 1238 1245 state->acquire_ctx = dev->mode_config.acquire_ctx; 1239 1246 retry: 1247 + plane_mask = 0; 1240 1248 for(i = 0; i < fb_helper->crtc_count; i++) { 1241 1249 struct drm_mode_set *mode_set; 1242 1250 1243 1251 mode_set = &fb_helper->crtc_info[i].mode_set; 1244 - 1245 - mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb; 1246 1252 1247 1253 mode_set->x = var->xoffset; 1248 1254 mode_set->y = var->yoffset; ··· 1249 1257 ret = __drm_atomic_helper_set_config(mode_set, state); 1250 1258 if (ret != 0) 1251 1259 goto fail; 1260 + 1261 + plane = mode_set->crtc->primary; 1262 + plane_mask |= drm_plane_index(plane); 1263 + plane->old_fb = plane->fb; 1252 1264 } 1253 1265 1254 1266 ret = drm_atomic_commit(state); ··· 1264 1268 1265 1269 1266 1270 fail: 1267 - for(i = 0; i < fb_helper->crtc_count; i++) { 1268 - struct drm_mode_set *mode_set; 1269 - struct drm_plane *plane; 1270 - 1271 - mode_set = &fb_helper->crtc_info[i].mode_set; 1272 - plane = mode_set->crtc->primary; 1273 - 1274 - if (ret == 0) { 1275 - struct drm_framebuffer *new_fb = plane->state->fb; 1276 - 1277 - if (new_fb) 1278 - drm_framebuffer_reference(new_fb); 1279 - plane->fb = new_fb; 1280 - plane->crtc = plane->state->crtc; 1281 - 1282 - if (plane->old_fb) 1283 - drm_framebuffer_unreference(plane->old_fb); 1284 - } 1285 - plane->old_fb = NULL; 1286 - } 1271 + drm_atomic_clean_old_fb(dev, plane_mask, ret); 1287 1272 1288 1273 if (ret == -EDEADLK) 1289 1274 goto backoff;
+4
drivers/gpu/drm/i915/i915_drv.h
··· 351 351 /* hsw/bdw */ 352 352 DPLL_ID_WRPLL1 = 0, 353 353 DPLL_ID_WRPLL2 = 1, 354 + DPLL_ID_SPLL = 2, 355 + 354 356 /* skl */ 355 357 DPLL_ID_SKL_DPLL1 = 0, 356 358 DPLL_ID_SKL_DPLL2 = 1, ··· 369 367 370 368 /* hsw, bdw */ 371 369 uint32_t wrpll; 370 + uint32_t spll; 372 371 373 372 /* skl */ 374 373 /* ··· 2651 2648 int enable_cmd_parser; 2652 2649 /* leave bools at the end to not create holes */ 2653 2650 bool enable_hangcheck; 2651 + bool fastboot; 2654 2652 bool prefault_disable; 2655 2653 bool load_detect_test; 2656 2654 bool reset;
+7 -1
drivers/gpu/drm/i915/i915_gem.c
··· 3809 3809 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3810 3810 struct drm_file *file) 3811 3811 { 3812 + struct drm_i915_private *dev_priv = dev->dev_private; 3812 3813 struct drm_i915_gem_caching *args = data; 3813 3814 struct drm_i915_gem_object *obj; 3814 3815 enum i915_cache_level level; ··· 3838 3837 return -EINVAL; 3839 3838 } 3840 3839 3840 + intel_runtime_pm_get(dev_priv); 3841 + 3841 3842 ret = i915_mutex_lock_interruptible(dev); 3842 3843 if (ret) 3843 - return ret; 3844 + goto rpm_put; 3844 3845 3845 3846 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3846 3847 if (&obj->base == NULL) { ··· 3855 3852 drm_gem_object_unreference(&obj->base); 3856 3853 unlock: 3857 3854 mutex_unlock(&dev->struct_mutex); 3855 + rpm_put: 3856 + intel_runtime_pm_put(dev_priv); 3857 + 3858 3858 return ret; 3859 3859 } 3860 3860
+5
drivers/gpu/drm/i915/i915_params.c
··· 40 40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 41 41 .disable_power_well = -1, 42 42 .enable_ips = 1, 43 + .fastboot = 0, 43 44 .prefault_disable = 0, 44 45 .load_detect_test = 0, 45 46 .reset = true, ··· 133 132 134 133 module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 135 134 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 135 + 136 + module_param_named(fastboot, i915.fastboot, bool, 0600); 137 + MODULE_PARM_DESC(fastboot, 138 + "Try to skip unnecessary mode sets at boot time (default: false)"); 136 139 137 140 module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 138 141 MODULE_PARM_DESC(prefault_disable,
+4 -27
drivers/gpu/drm/i915/intel_crt.c
··· 138 138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); 139 139 } 140 140 141 - static void hsw_crt_pre_enable(struct intel_encoder *encoder) 142 - { 143 - struct drm_device *dev = encoder->base.dev; 144 - struct drm_i915_private *dev_priv = dev->dev_private; 145 - 146 - WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); 147 - I915_WRITE(SPLL_CTL, 148 - SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); 149 - POSTING_READ(SPLL_CTL); 150 - udelay(20); 151 - } 152 - 153 141 /* Note: The caller is required to filter out dpms modes not supported by the 154 142 * platform. */ 155 143 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) ··· 204 216 intel_disable_crt(encoder); 205 217 } 206 218 207 - static void hsw_crt_post_disable(struct intel_encoder *encoder) 208 - { 209 - struct drm_device *dev = encoder->base.dev; 210 - struct drm_i915_private *dev_priv = dev->dev_private; 211 - uint32_t val; 212 - 213 - DRM_DEBUG_KMS("Disabling SPLL\n"); 214 - val = I915_READ(SPLL_CTL); 215 - WARN_ON(!(val & SPLL_PLL_ENABLE)); 216 - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); 217 - POSTING_READ(SPLL_CTL); 218 - } 219 - 220 219 static void intel_enable_crt(struct intel_encoder *encoder) 221 220 { 222 221 struct intel_crt *crt = intel_encoder_to_crt(encoder); ··· 255 280 if (HAS_DDI(dev)) { 256 281 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; 257 282 pipe_config->port_clock = 135000 * 2; 283 + 284 + pipe_config->dpll_hw_state.wrpll = 0; 285 + pipe_config->dpll_hw_state.spll = 286 + SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; 258 287 } 259 288 260 289 return true; ··· 839 860 if (HAS_DDI(dev)) { 840 861 crt->base.get_config = hsw_crt_get_config; 841 862 crt->base.get_hw_state = intel_ddi_get_hw_state; 842 - crt->base.pre_enable = hsw_crt_pre_enable; 843 - crt->base.post_disable = hsw_crt_post_disable; 844 863 } else { 845 864 crt->base.get_config = intel_crt_get_config; 846 865 crt->base.get_hw_state = intel_crt_get_hw_state;
+65 -10
drivers/gpu/drm/i915/intel_ddi.c
··· 1286 1286 } 1287 1287 1288 1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 1289 + } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) { 1290 + struct drm_atomic_state *state = crtc_state->base.state; 1291 + struct intel_shared_dpll_config *spll = 1292 + &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL]; 1293 + 1294 + if (spll->crtc_mask && 1295 + WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll)) 1296 + return false; 1297 + 1298 + crtc_state->shared_dpll = DPLL_ID_SPLL; 1299 + spll->hw_state.spll = crtc_state->dpll_hw_state.spll; 1300 + spll->crtc_mask |= 1 << intel_crtc->pipe; 1289 1301 } 1290 1302 1291 1303 return true; ··· 2449 2437 } 2450 2438 } 2451 2439 2452 - static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 2440 + static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, 2453 2441 struct intel_shared_dpll *pll) 2454 2442 { 2455 2443 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); ··· 2457 2445 udelay(20); 2458 2446 } 2459 2447 2460 - static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, 2448 + static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, 2461 2449 struct intel_shared_dpll *pll) 2450 + { 2451 + I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); 2452 + POSTING_READ(SPLL_CTL); 2453 + udelay(20); 2454 + } 2455 + 2456 + static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, 2457 + struct intel_shared_dpll *pll) 2462 2458 { 2463 2459 uint32_t val; 2464 2460 ··· 2475 2455 POSTING_READ(WRPLL_CTL(pll->id)); 2476 2456 } 2477 2457 2478 - static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2479 - struct intel_shared_dpll *pll, 2480 - struct intel_dpll_hw_state *hw_state) 2458 + static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, 2459 + struct intel_shared_dpll *pll) 2460 + { 2461 + uint32_t val; 2462 + 2463 + val = I915_READ(SPLL_CTL); 2464 + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); 2465 + POSTING_READ(SPLL_CTL); 2466 + } 2467 + 2468 + static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, 2469 + struct intel_shared_dpll *pll, 2470 + struct intel_dpll_hw_state *hw_state) 2481 2471 { 2482 2472 uint32_t val; 2483 2473 ··· 2500 2470 return val & WRPLL_PLL_ENABLE; 2501 2471 } 2502 2472 2473 + static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, 2474 + struct intel_shared_dpll *pll, 2475 + struct intel_dpll_hw_state *hw_state) 2476 + { 2477 + uint32_t val; 2478 + 2479 + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2480 + return false; 2481 + 2482 + val = I915_READ(SPLL_CTL); 2483 + hw_state->spll = val; 2484 + 2485 + return val & SPLL_PLL_ENABLE; 2486 + } 2487 + 2488 + 2503 2489 static const char * const hsw_ddi_pll_names[] = { 2504 2490 "WRPLL 1", 2505 2491 "WRPLL 2", 2492 + "SPLL" 2506 2493 }; 2507 2494 2508 2495 static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) 2509 2496 { 2510 2497 int i; 2511 2498 2512 - dev_priv->num_shared_dpll = 2; 2499 + dev_priv->num_shared_dpll = 3; 2513 2500 2514 - for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2501 + for (i = 0; i < 2; i++) { 2515 2502 dev_priv->shared_dplls[i].id = i; 2516 2503 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2517 - dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; 2518 - dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; 2504 + dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable; 2505 + dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable; 2519 2506 dev_priv->shared_dplls[i].get_hw_state = 2520 - hsw_ddi_pll_get_hw_state; 2507 + hsw_ddi_wrpll_get_hw_state; 2521 2508 } 2509 + 2510 + /* SPLL is special, but needs to be initialized anyway.. */ 2511 + dev_priv->shared_dplls[i].id = i; 2512 + dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2513 + dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable; 2514 + dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable; 2515 + dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state; 2516 + 2522 2517 } 2523 2518 2524 2519 static const char * const skl_ddi_pll_names[] = {
+27 -10
drivers/gpu/drm/i915/intel_display.c
··· 2646 2646 return; 2647 2647 2648 2648 valid_fb: 2649 - plane_state->src_x = plane_state->src_y = 0; 2649 + plane_state->src_x = 0; 2650 + plane_state->src_y = 0; 2650 2651 plane_state->src_w = fb->width << 16; 2651 2652 plane_state->src_h = fb->height << 16; 2652 2653 2653 - plane_state->crtc_x = plane_state->src_y = 0; 2654 + plane_state->crtc_x = 0; 2655 + plane_state->crtc_y = 0; 2654 2656 plane_state->crtc_w = fb->width; 2655 2657 plane_state->crtc_h = fb->height; 2656 2658 ··· 4239 4237 struct intel_shared_dpll *pll; 4240 4238 struct intel_shared_dpll_config *shared_dpll; 4241 4239 enum intel_dpll_id i; 4240 + int max = dev_priv->num_shared_dpll; 4242 4241 4243 4242 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4244 4243 ··· 4274 4271 WARN_ON(shared_dpll[i].crtc_mask); 4275 4272 4276 4273 goto found; 4277 - } 4274 + } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) 4275 + /* Do not consider SPLL */ 4276 + max = 2; 4278 4277 4279 - for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4278 + for (i = 0; i < max; i++) { 4280 4279 pll = &dev_priv->shared_dplls[i]; 4281 4280 4282 4281 /* Only want to check enabled timings first */ ··· 9728 9723 case PORT_CLK_SEL_WRPLL2: 9729 9724 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9730 9725 break; 9726 + case PORT_CLK_SEL_SPLL: 9727 + pipe_config->shared_dpll = DPLL_ID_SPLL; 9731 9728 } 9732 9729 } 9733 9730 ··· 12010 12003 pipe_config->dpll_hw_state.cfgcr1, 12011 12004 pipe_config->dpll_hw_state.cfgcr2); 12012 12005 } else if (HAS_DDI(dev)) { 12013 - DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 12006 + DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 12014 12007 pipe_config->ddi_pll_sel, 12015 - pipe_config->dpll_hw_state.wrpll); 12008 + pipe_config->dpll_hw_state.wrpll, 12009 + pipe_config->dpll_hw_state.spll); 12016 12010 } else { 12017 12011 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12018 12012 "fp0: 0x%x, fp1: 0x%x\n", ··· 12536 12528 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12537 12529 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12538 12530 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12531 + PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12539 12532 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12540 12533 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12541 12534 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); ··· 13041 13032 struct intel_crtc_state *pipe_config = 13042 13033 to_intel_crtc_state(crtc_state); 13043 13034 13035 + memset(&to_intel_crtc(crtc)->atomic, 0, 13036 + sizeof(struct intel_crtc_atomic_commit)); 13037 + 13044 13038 /* Catch I915_MODE_FLAG_INHERITED */ 13045 13039 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13046 13040 crtc_state->mode_changed = true; ··· 13068 13056 if (ret) 13069 13057 return ret; 13070 13058 13071 - if (intel_pipe_config_compare(state->dev, 13059 + if (i915.fastboot && 13060 + intel_pipe_config_compare(state->dev, 13072 13061 to_intel_crtc_state(crtc->state), 13073 13062 pipe_config, true)) { 13074 13063 crtc_state->mode_changed = false; ··· 14377 14364 static struct drm_framebuffer * 14378 14365 intel_user_framebuffer_create(struct drm_device *dev, 14379 14366 struct drm_file *filp, 14380 - struct drm_mode_fb_cmd2 *mode_cmd) 14367 + struct drm_mode_fb_cmd2 *user_mode_cmd) 14381 14368 { 14382 14369 struct drm_i915_gem_object *obj; 14370 + struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14383 14371 14384 14372 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14385 - mode_cmd->handles[0])); 14373 + mode_cmd.handles[0])); 14386 14374 if (&obj->base == NULL) 14387 14375 return ERR_PTR(-ENOENT); 14388 14376 14389 - return intel_framebuffer_create(dev, mode_cmd, obj); 14377 + return intel_framebuffer_create(dev, &mode_cmd, obj); 14390 14378 } 14391 14379 14392 14380 #ifndef CONFIG_DRM_FBDEV_EMULATION ··· 14718 14704 14719 14705 /* Apple Macbook 2,1 (Core 2 T7400) */ 14720 14706 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14707 + 14708 + /* Apple Macbook 4,1 */ 14709 + { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14721 14710 14722 14711 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14723 14712 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+6 -4
drivers/gpu/drm/i915/intel_pm.c
··· 4449 4449 POSTING_READ(GEN6_RPNSWREQ); 4450 4450 4451 4451 dev_priv->rps.cur_freq = val; 4452 - trace_intel_gpu_freq_change(val * 50); 4452 + trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4453 4453 } 4454 4454 4455 4455 static void valleyview_set_rps(struct drm_device *dev, u8 val) ··· 7255 7255 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7256 7256 { 7257 7257 if (IS_GEN9(dev_priv->dev)) 7258 - return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 7258 + return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 7259 + GEN9_FREQ_SCALER); 7259 7260 else if (IS_CHERRYVIEW(dev_priv->dev)) 7260 7261 return chv_gpu_freq(dev_priv, val); 7261 7262 else if (IS_VALLEYVIEW(dev_priv->dev)) ··· 7268 7267 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7269 7268 { 7270 7269 if (IS_GEN9(dev_priv->dev)) 7271 - return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 7270 + return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 7271 + GT_FREQUENCY_MULTIPLIER); 7272 7272 else if (IS_CHERRYVIEW(dev_priv->dev)) 7273 7273 return chv_freq_opcode(dev_priv, val); 7274 7274 else if (IS_VALLEYVIEW(dev_priv->dev)) 7275 7275 return byt_freq_opcode(dev_priv, val); 7276 7276 else 7277 - return val / GT_FREQUENCY_MULTIPLIER; 7277 + return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 7278 7278 } 7279 7279 7280 7280 struct request_boost {
+5 -6
drivers/gpu/drm/mgag200/mgag200_cursor.c
··· 70 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 71 71 BUG_ON(pixels_current == pixels_prev); 72 72 73 + if (!handle || !file_priv) { 74 + mga_hide_cursor(mdev); 75 + return 0; 76 + } 77 + 73 78 obj = drm_gem_object_lookup(dev, file_priv, handle); 74 79 if (!obj) 75 80 return -ENOENT; ··· 91 86 WREG8(MGA_CURPOSXH, 0); 92 87 mgag200_bo_unreserve(pixels_1); 93 88 goto out_unreserve1; 94 - } 95 - 96 - if (!handle) { 97 - mga_hide_cursor(mdev); 98 - ret = 0; 99 - goto out1; 100 89 } 101 90 102 91 /* Move cursor buffers into VRAM if they aren't already */
+11 -4
drivers/gpu/drm/radeon/radeon_object.c
··· 221 221 if (!(rdev->flags & RADEON_IS_PCIE)) 222 222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 223 223 224 + /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx 225 + * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 226 + */ 227 + if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) 228 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 229 + 224 230 #ifdef CONFIG_X86_32 225 231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 226 232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 227 233 */ 228 - bo->flags &= ~RADEON_GEM_GTT_WC; 234 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 229 235 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 230 236 /* Don't try to enable write-combining when it can't work, or things 231 237 * may be slow ··· 241 235 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 242 236 thanks to write-combining 243 237 244 - DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 245 - "better performance thanks to write-combining\n"); 246 - bo->flags &= ~RADEON_GEM_GTT_WC; 238 + if (bo->flags & RADEON_GEM_GTT_WC) 239 + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 240 + "better performance thanks to write-combining\n"); 241 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 247 242 #endif 248 243 249 244 radeon_ttm_placement_from_domain(bo, domain);
+1 -2
drivers/gpu/drm/radeon/radeon_pm.c
··· 1542 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1543 1543 if (ret) 1544 1544 DRM_ERROR("failed to create device file for power method\n"); 1545 - if (!ret) 1546 - rdev->pm.sysfs_initialized = true; 1545 + rdev->pm.sysfs_initialized = true; 1547 1546 } 1548 1547 1549 1548 mutex_lock(&rdev->pm.mutex);
+1 -1
drivers/gpu/drm/radeon/si_dpm.c
··· 2927 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2928 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2929 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2930 - { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, 2930 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2931 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2932 2932 { 0, 0, 0, 0 }, 2933 2933 };
+5 -4
drivers/gpu/drm/vc4/vc4_crtc.c
··· 168 168 struct drm_connector *connector; 169 169 170 170 drm_for_each_connector(connector, crtc->dev) { 171 - if (connector && connector->state->crtc == crtc) { 171 + if (connector->state->crtc == crtc) { 172 172 struct drm_encoder *encoder = connector->encoder; 173 173 struct vc4_encoder *vc4_encoder = 174 174 to_vc4_encoder(encoder); ··· 401 401 dlist_next++; 402 402 403 403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 404 - (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); 404 + (u32 __iomem *)vc4_crtc->dlist - 405 + (u32 __iomem *)vc4->hvs->dlist); 405 406 406 407 /* Make the next display list start after ours. */ 407 408 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); ··· 592 591 * that will take too much. 593 592 */ 594 593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 595 - if (!primary_plane) { 594 + if (IS_ERR(primary_plane)) { 596 595 dev_err(dev, "failed to construct primary plane\n"); 597 596 ret = PTR_ERR(primary_plane); 598 597 goto err; 599 598 } 600 599 601 600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 602 - if (!cursor_plane) { 601 + if (IS_ERR(cursor_plane)) { 603 602 dev_err(dev, "failed to construct cursor plane\n"); 604 603 ret = PTR_ERR(cursor_plane); 605 604 goto err_primary;
-1
drivers/gpu/drm/vc4/vc4_drv.c
··· 259 259 .remove = vc4_platform_drm_remove, 260 260 .driver = { 261 261 .name = "vc4-drm", 262 - .owner = THIS_MODULE, 263 262 .of_match_table = vc4_of_match, 264 263 }, 265 264 };
+4 -4
drivers/gpu/drm/vc4/vc4_hvs.c
··· 75 75 for (i = 0; i < 64; i += 4) { 76 76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 77 77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 78 - ((uint32_t *)vc4->hvs->dlist)[i + 0], 79 - ((uint32_t *)vc4->hvs->dlist)[i + 1], 80 - ((uint32_t *)vc4->hvs->dlist)[i + 2], 81 - ((uint32_t *)vc4->hvs->dlist)[i + 3]); 78 + readl((u32 __iomem *)vc4->hvs->dlist + i + 0), 79 + readl((u32 __iomem *)vc4->hvs->dlist + i + 1), 80 + readl((u32 __iomem *)vc4->hvs->dlist + i + 2), 81 + readl((u32 __iomem *)vc4->hvs->dlist + i + 3)); 82 82 } 83 83 } 84 84
+14 -4
drivers/gpu/drm/vc4/vc4_plane.c
··· 70 70 return state->fb && state->crtc; 71 71 } 72 72 73 - struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 73 + static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 74 74 { 75 75 struct vc4_plane_state *vc4_state; 76 76 ··· 97 97 return &vc4_state->base; 98 98 } 99 99 100 - void vc4_plane_destroy_state(struct drm_plane *plane, 101 - struct drm_plane_state *state) 100 + static void vc4_plane_destroy_state(struct drm_plane *plane, 101 + struct drm_plane_state *state) 102 102 { 103 103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 104 104 ··· 108 108 } 109 109 110 110 /* Called during init to allocate the plane's atomic state. */ 111 - void vc4_plane_reset(struct drm_plane *plane) 111 + static void vc4_plane_reset(struct drm_plane *plane) 112 112 { 113 113 struct vc4_plane_state *vc4_state; 114 114 ··· 156 156 int crtc_y = state->crtc_y; 157 157 int crtc_w = state->crtc_w; 158 158 int crtc_h = state->crtc_h; 159 + 160 + if (state->crtc_w << 16 != state->src_w || 161 + state->crtc_h << 16 != state->src_h) { 162 + /* We don't support scaling yet, which involves 163 + * allocating the LBM memory for scaling temporary 164 + * storage, and putting filter kernels in the HVS 165 + * context. 166 + */ 167 + return -EINVAL; 168 + } 159 169 160 170 if (crtc_x < 0) { 161 171 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
+1
drivers/i2c/busses/Kconfig
··· 126 126 Sunrise Point-LP (PCH) 127 127 DNV (SOC) 128 128 Broxton (SOC) 129 + Lewisburg (PCH) 129 130 130 131 This driver can also be built as a module. If so, the module 131 132 will be called i2c-i801.
+6
drivers/i2c/busses/i2c-i801.c
··· 62 62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 63 63 * DNV (SOC) 0x19df 32 hard yes yes yes 64 64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes 65 + * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes 66 + * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes 65 67 * 66 68 * Features supported by this driver: 67 69 * Software PEC no ··· 208 206 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 209 207 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 210 208 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 209 + #define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 210 + #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 211 211 212 212 struct i801_mux_config { 213 213 char *gpio_chip; ··· 873 869 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 874 870 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 875 871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 872 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, 873 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, 876 874 { 0, } 877 875 }; 878 876
+1
drivers/i2c/busses/i2c-imx.c
··· 50 50 #include <linux/of_device.h> 51 51 #include <linux/of_dma.h> 52 52 #include <linux/of_gpio.h> 53 + #include <linux/pinctrl/consumer.h> 53 54 #include <linux/platform_data/i2c-imx.h> 54 55 #include <linux/platform_device.h> 55 56 #include <linux/sched.h>
+3 -1
drivers/i2c/busses/i2c-xiic.c
··· 662 662 663 663 static void xiic_start_xfer(struct xiic_i2c *i2c) 664 664 { 665 - 665 + spin_lock(&i2c->lock); 666 + xiic_reinit(i2c); 666 667 __xiic_start_xfer(i2c); 668 + spin_unlock(&i2c->lock); 667 669 } 668 670 669 671 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+1 -1
drivers/i2c/i2c-core.c
··· 715 715 if (wakeirq > 0 && wakeirq != client->irq) 716 716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); 717 717 else if (client->irq > 0) 718 - status = dev_pm_set_wake_irq(dev, wakeirq); 718 + status = dev_pm_set_wake_irq(dev, client->irq); 719 719 else 720 720 status = 0; 721 721
+1 -1
drivers/iio/adc/ad7793.c
··· 101 101 #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ 102 102 103 103 /* ID Register Bit Designations (AD7793_REG_ID) */ 104 - #define AD7785_ID 0xB 104 + #define AD7785_ID 0x3 105 105 #define AD7792_ID 0xA 106 106 #define AD7793_ID 0xB 107 107 #define AD7794_ID 0xF
+16 -6
drivers/iio/adc/vf610_adc.c
··· 106 106 107 107 #define DEFAULT_SAMPLE_TIME 1000 108 108 109 + /* V at 25°C of 696 mV */ 110 + #define VF610_VTEMP25_3V0 950 111 + /* V at 25°C of 699 mV */ 112 + #define VF610_VTEMP25_3V3 867 113 + /* Typical sensor slope coefficient at all temperatures */ 114 + #define VF610_TEMP_SLOPE_COEFF 1840 115 + 109 116 enum clk_sel { 110 117 VF610_ADCIOC_BUSCLK_SET, 111 118 VF610_ADCIOC_ALTCLK_SET, ··· 204 197 adc_feature->clk_div = 8; 205 198 } 206 199 200 + adck_rate = ipg_rate / adc_feature->clk_div; 201 + 207 202 /* 208 203 * Determine the long sample time adder value to be used based 209 204 * on the default minimum sample time provided. ··· 230 221 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 231 222 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles 232 223 */ 233 - adck_rate = ipg_rate / info->adc_feature.clk_div; 234 224 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 235 225 info->sample_freq_avail[i] = 236 226 adck_rate / (6 + vf610_hw_avgs[i] * ··· 671 663 break; 672 664 case IIO_TEMP: 673 665 /* 674 - * Calculate in degree Celsius times 1000 675 - * Using sensor slope of 1.84 mV/°C and 676 - * V at 25°C of 696 mV 677 - */ 678 - *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; 666 + * Calculate in degree Celsius times 1000 667 + * Using the typical sensor slope of 1.84 mV/°C 668 + * and VREFH_ADC at 3.3V, V at 25°C of 699 mV 669 + */ 670 + *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * 671 + 1000000 / VF610_TEMP_SLOPE_COEFF; 672 + 679 673 break; 680 674 default: 681 675 mutex_unlock(&indio_dev->mlock);
+1
drivers/iio/adc/xilinx-xadc-core.c
··· 841 841 case XADC_REG_VCCINT: 842 842 case XADC_REG_VCCAUX: 843 843 case XADC_REG_VREFP: 844 + case XADC_REG_VREFN: 844 845 case XADC_REG_VCCBRAM: 845 846 case XADC_REG_VCCPINT: 846 847 case XADC_REG_VCCPAUX:
+64 -27
drivers/iio/dac/ad5064.c
··· 113 113 ID_AD5065, 114 114 ID_AD5628_1, 115 115 ID_AD5628_2, 116 + ID_AD5629_1, 117 + ID_AD5629_2, 116 118 ID_AD5648_1, 117 119 ID_AD5648_2, 118 120 ID_AD5666_1, 119 121 ID_AD5666_2, 120 122 ID_AD5668_1, 121 123 ID_AD5668_2, 124 + ID_AD5669_1, 125 + ID_AD5669_2, 122 126 }; 123 127 124 128 static int ad5064_write(struct ad5064_state *st, unsigned int cmd, ··· 295 291 { }, 296 292 }; 297 293 298 - #define AD5064_CHANNEL(chan, addr, bits) { \ 294 + #define AD5064_CHANNEL(chan, addr, bits, _shift) { \ 299 295 .type = IIO_VOLTAGE, \ 300 296 .indexed = 1, \ 301 297 .output = 1, \ ··· 307 303 .sign = 'u', \ 308 304 .realbits = (bits), \ 309 305 .storagebits = 16, \ 310 - .shift = 20 - bits, \ 306 + .shift = (_shift), \ 311 307 }, \ 312 308 .ext_info = ad5064_ext_info, \ 313 309 } 314 310 315 - #define DECLARE_AD5064_CHANNELS(name, bits) \ 311 + #define DECLARE_AD5064_CHANNELS(name, bits, shift) \ 316 312 const struct iio_chan_spec name[] = { \ 317 - AD5064_CHANNEL(0, 0, bits), \ 318 - AD5064_CHANNEL(1, 1, bits), \ 319 - AD5064_CHANNEL(2, 2, bits), \ 320 - AD5064_CHANNEL(3, 3, bits), \ 321 - AD5064_CHANNEL(4, 4, bits), \ 322 - AD5064_CHANNEL(5, 5, bits), \ 323 - AD5064_CHANNEL(6, 6, bits), \ 324 - AD5064_CHANNEL(7, 7, bits), \ 313 + AD5064_CHANNEL(0, 0, bits, shift), \ 314 + AD5064_CHANNEL(1, 1, bits, shift), \ 315 + AD5064_CHANNEL(2, 2, bits, shift), \ 316 + AD5064_CHANNEL(3, 3, bits, shift), \ 317 + AD5064_CHANNEL(4, 4, bits, shift), \ 318 + AD5064_CHANNEL(5, 5, bits, shift), \ 319 + AD5064_CHANNEL(6, 6, bits, shift), \ 320 + AD5064_CHANNEL(7, 7, bits, shift), \ 325 321 } 326 322 327 - #define DECLARE_AD5065_CHANNELS(name, bits) \ 323 + #define DECLARE_AD5065_CHANNELS(name, bits, shift) \ 328 324 const struct iio_chan_spec name[] = { \ 329 - AD5064_CHANNEL(0, 0, bits), \ 330 - AD5064_CHANNEL(1, 3, bits), \ 325 + AD5064_CHANNEL(0, 0, bits, shift), \ 326 + AD5064_CHANNEL(1, 3, bits, shift), \ 331 327 } 332 328 333 - static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 334 - static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 335 - static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 329 + static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); 330 + static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); 331 + static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); 336 332 337 - static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 338 - static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 339 - static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 333 + static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); 334 + static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); 335 + static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); 336 + 337 + static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); 338 + static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); 340 339 341 340 static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 342 341 [ID_AD5024] = { ··· 389 382 .channels = ad5024_channels, 390 383 .num_channels = 8, 391 384 }, 385 + [ID_AD5629_1] = { 386 + .shared_vref = true, 387 + .internal_vref = 2500000, 388 + .channels = ad5629_channels, 389 + .num_channels = 8, 390 + }, 391 + [ID_AD5629_2] = { 392 + .shared_vref = true, 393 + .internal_vref = 5000000, 394 + .channels = ad5629_channels, 395 + .num_channels = 8, 396 + }, 392 397 [ID_AD5648_1] = { 393 398 .shared_vref = true, 394 399 .internal_vref = 2500000, ··· 435 416 .shared_vref = true, 436 417 .internal_vref = 5000000, 437 418 .channels = ad5064_channels, 419 + .num_channels = 8, 420 + }, 421 + [ID_AD5669_1] = { 422 + .shared_vref = true, 423 + .internal_vref = 2500000, 424 + .channels = ad5669_channels, 425 + .num_channels = 8, 426 + }, 427 + [ID_AD5669_2] = { 428 + .shared_vref = true, 429 + .internal_vref = 5000000, 430 + .channels = ad5669_channels, 438 431 .num_channels = 8, 439 432 }, 440 433 }; ··· 628 597 unsigned int addr, unsigned int val) 629 598 { 630 599 struct i2c_client *i2c = to_i2c_client(st->dev); 600 + int ret; 631 601 632 602 st->data.i2c[0] = (cmd << 4) | addr; 633 603 put_unaligned_be16(val, &st->data.i2c[1]); 634 - return i2c_master_send(i2c, st->data.i2c, 3); 604 + 605 + ret = i2c_master_send(i2c, st->data.i2c, 3); 606 + if (ret < 0) 607 + return ret; 608 + 609 + return 0; 635 610 } 636 611 637 612 static int ad5064_i2c_probe(struct i2c_client *i2c, ··· 653 616 } 654 617 655 618 static const struct i2c_device_id ad5064_i2c_ids[] = { 656 - {"ad5629-1", ID_AD5628_1}, 657 - {"ad5629-2", ID_AD5628_2}, 658 - {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ 659 - {"ad5669-1", ID_AD5668_1}, 660 - {"ad5669-2", ID_AD5668_2}, 661 - {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ 619 + {"ad5629-1", ID_AD5629_1}, 620 + {"ad5629-2", ID_AD5629_2}, 621 + {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ 622 + {"ad5669-1", ID_AD5669_1}, 623 + {"ad5669-2", ID_AD5669_2}, 624 + {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ 662 625 {} 663 626 }; 664 627 MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
+4 -4
drivers/iio/humidity/si7020.c
··· 50 50 51 51 switch (mask) { 52 52 case IIO_CHAN_INFO_RAW: 53 - ret = i2c_smbus_read_word_data(*client, 54 - chan->type == IIO_TEMP ? 55 - SI7020CMD_TEMP_HOLD : 56 - SI7020CMD_RH_HOLD); 53 + ret = i2c_smbus_read_word_swapped(*client, 54 + chan->type == IIO_TEMP ? 55 + SI7020CMD_TEMP_HOLD : 56 + SI7020CMD_RH_HOLD); 57 57 if (ret < 0) 58 58 return ret; 59 59 *val = ret >> 2;
+9 -4
drivers/irqchip/irq-gic-common.c
··· 84 84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); 85 85 86 86 /* 87 - * Disable all interrupts. Leave the PPI and SGIs alone 88 - * as they are enabled by redistributor registers. 87 + * Deactivate and disable all SPIs. Leave the PPI and SGIs 88 + * alone as they are in the redistributor registers on GICv3. 89 89 */ 90 - for (i = 32; i < gic_irqs; i += 32) 90 + for (i = 32; i < gic_irqs; i += 32) { 91 91 writel_relaxed(GICD_INT_EN_CLR_X32, 92 - base + GIC_DIST_ENABLE_CLEAR + i / 8); 92 + base + GIC_DIST_ACTIVE_CLEAR + i / 8); 93 + writel_relaxed(GICD_INT_EN_CLR_X32, 94 + base + GIC_DIST_ENABLE_CLEAR + i / 8); 95 + } 93 96 94 97 if (sync_access) 95 98 sync_access(); ··· 105 102 /* 106 103 * Deal with the banked PPI and SGI interrupts - disable all 107 104 * PPI interrupts, ensure all SGI interrupts are enabled. 105 + * Make sure everything is deactivated. 108 106 */ 107 + writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); 109 108 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 110 109 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 111 110
+36 -2
drivers/irqchip/irq-gic.c
··· 73 73 union gic_base cpu_base; 74 74 #ifdef CONFIG_CPU_PM 75 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 76 + u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 76 77 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 78 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 79 u32 __percpu *saved_ppi_enable; 80 + u32 __percpu *saved_ppi_active; 79 81 u32 __percpu *saved_ppi_conf; 80 82 #endif 81 83 struct irq_domain *domain; ··· 568 566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 569 567 gic_data[gic_nr].saved_spi_enable[i] = 570 568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 569 + 570 + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 571 + gic_data[gic_nr].saved_spi_active[i] = 572 + readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 571 573 } 572 574 573 575 /* ··· 610 604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 611 605 dist_base + GIC_DIST_TARGET + i * 4); 612 606 613 - for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 607 + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 608 + writel_relaxed(GICD_INT_EN_CLR_X32, 609 + dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 614 610 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 615 611 dist_base + GIC_DIST_ENABLE_SET + i * 4); 612 + } 613 + 614 + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 615 + writel_relaxed(GICD_INT_EN_CLR_X32, 616 + dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 617 + writel_relaxed(gic_data[gic_nr].saved_spi_active[i], 618 + dist_base + GIC_DIST_ACTIVE_SET + i * 4); 619 + } 616 620 617 621 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 618 622 } ··· 647 631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 648 632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 649 633 634 + ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 635 + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 636 + ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 637 + 650 638 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 651 639 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 652 640 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); ··· 674 654 return; 675 655 676 656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 677 - for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 657 + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 658 + writel_relaxed(GICD_INT_EN_CLR_X32, 659 + dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 678 660 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 661 + } 662 + 663 + ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 664 + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 665 + writel_relaxed(GICD_INT_EN_CLR_X32, 666 + dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 667 + writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); 668 + } 679 669 680 670 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 681 671 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) ··· 739 709 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 740 710 sizeof(u32)); 741 711 BUG_ON(!gic->saved_ppi_enable); 712 + 713 + gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 714 + sizeof(u32)); 715 + BUG_ON(!gic->saved_ppi_active); 742 716 743 717 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 744 718 sizeof(u32));
+27 -24
drivers/lightnvm/core.c
··· 160 160 } 161 161 EXPORT_SYMBOL(nvm_erase_blk); 162 162 163 - static void nvm_core_free(struct nvm_dev *dev) 164 - { 165 - kfree(dev); 166 - } 167 - 168 163 static int nvm_core_init(struct nvm_dev *dev) 169 164 { 170 165 struct nvm_id *id = &dev->identity; ··· 174 179 dev->sec_size = grp->csecs; 175 180 dev->oob_size = grp->sos; 176 181 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 177 - dev->addr_mode = id->ppat; 178 - dev->addr_format = id->ppaf; 182 + memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); 179 183 180 184 dev->plane_mode = NVM_PLANE_SINGLE; 181 185 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; 186 + 187 + if (grp->mtype != 0) { 188 + pr_err("nvm: memory type not supported\n"); 189 + return -EINVAL; 190 + } 191 + 192 + if (grp->fmtype != 0 && grp->fmtype != 1) { 193 + pr_err("nvm: flash type not supported\n"); 194 + return -EINVAL; 195 + } 182 196 183 197 if (grp->mpos & 0x020202) 184 198 dev->plane_mode = NVM_PLANE_DOUBLE; ··· 217 213 218 214 if (dev->mt) 219 215 dev->mt->unregister_mgr(dev); 220 - 221 - nvm_core_free(dev); 222 216 } 223 217 224 218 static int nvm_init(struct nvm_dev *dev) 225 219 { 226 220 struct nvmm_type *mt; 227 - int ret = 0; 221 + int ret = -EINVAL; 228 222 229 223 if (!dev->q || !dev->ops) 230 - return -EINVAL; 224 + return ret; 231 225 232 226 if (dev->ops->identity(dev->q, &dev->identity)) { 233 227 pr_err("nvm: device could not be identified\n"); 234 - ret = -EINVAL; 235 228 goto err; 236 229 } 237 230 ··· 274 273 dev->nr_chnls); 275 274 return 0; 276 275 err: 277 - nvm_free(dev); 278 276 pr_err("nvm: failed to initialize nvm\n"); 279 277 return ret; 280 278 } ··· 308 308 if (ret) 309 309 goto err_init; 310 310 311 - down_write(&nvm_lock); 312 - list_add(&dev->devices, &nvm_devices); 313 - up_write(&nvm_lock); 314 - 315 311 if (dev->ops->max_phys_sect > 1) { 316 312 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, 317 313 "ppalist"); 318 314 if (!dev->ppalist_pool) { 319 315 pr_err("nvm: could not create ppa pool\n"); 320 - return -ENOMEM; 316 + ret = -ENOMEM; 317 + goto err_init; 321 318 } 322 319 } else if (dev->ops->max_phys_sect > 256) { 323 320 pr_info("nvm: max sectors supported is 256.\n"); 324 - return -EINVAL; 321 + ret = -EINVAL; 322 + goto err_init; 325 323 } 324 + 325 + down_write(&nvm_lock); 326 + list_add(&dev->devices, &nvm_devices); 327 + up_write(&nvm_lock); 326 328 327 329 return 0; 328 330 err_init: ··· 343 341 return; 344 342 } 345 343 346 - nvm_exit(dev); 347 - 348 344 down_write(&nvm_lock); 349 345 list_del(&dev->devices); 350 346 up_write(&nvm_lock); 347 + 348 + nvm_exit(dev); 349 + kfree(dev); 351 350 } 352 351 EXPORT_SYMBOL(nvm_unregister); 353 352 ··· 460 457 lockdep_assert_held(&nvm_lock); 461 458 462 459 del_gendisk(tdisk); 460 + blk_cleanup_queue(q); 461 + 463 462 if (tt->exit) 464 463 tt->exit(tdisk->private_data); 465 - 466 - blk_cleanup_queue(q); 467 464 468 465 put_disk(tdisk); 469 466 ··· 544 541 if (!dev->mt) 545 542 return 0; 546 543 547 - dev->mt->free_blocks_print(dev); 544 + dev->mt->lun_info_print(dev); 548 545 549 546 return 0; 550 547 }
+53 -20
drivers/lightnvm/gennvm.c
··· 60 60 lun->vlun.lun_id = i % dev->luns_per_chnl; 61 61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 62 62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 63 + lun->vlun.nr_inuse_blocks = 0; 64 + lun->vlun.nr_bad_blocks = 0; 63 65 } 64 66 return 0; 65 67 } 66 68 67 - static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, 69 + static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, 68 70 void *private) 69 71 { 70 72 struct gen_nvm *gn = private; 71 - struct gen_lun *lun = &gn->luns[lun_id]; 73 + struct nvm_dev *dev = gn->dev; 74 + struct gen_lun *lun; 72 75 struct nvm_block *blk; 73 76 int i; 74 77 75 - if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) 76 - return 0; 78 + ppa = dev_to_generic_addr(gn->dev, ppa); 79 + lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; 77 80 78 - i = -1; 79 - while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { 81 + for (i = 0; i < nr_blocks; i++) { 82 + if (blks[i] == 0) 83 + continue; 84 + 80 85 blk = &lun->vlun.blocks[i]; 81 86 if (!blk) { 82 87 pr_err("gennvm: BB data is out of bounds.\n"); ··· 89 84 } 90 85 91 86 list_move_tail(&blk->list, &lun->bb_list); 87 + lun->vlun.nr_bad_blocks++; 92 88 } 93 89 94 90 return 0; ··· 142 136 list_move_tail(&blk->list, &lun->used_list); 143 137 blk->type = 1; 144 138 lun->vlun.nr_free_blocks--; 139 + lun->vlun.nr_inuse_blocks++; 145 140 } 146 141 } 147 142 ··· 171 164 block->id = cur_block_id++; 172 165 173 166 /* First block is reserved for device */ 174 - if (unlikely(lun_iter == 0 && blk_iter == 0)) 167 + if (unlikely(lun_iter == 0 && blk_iter == 0)) { 168 + lun->vlun.nr_free_blocks--; 175 169 continue; 170 + } 176 171 177 172 list_add_tail(&block->list, &lun->free_list); 178 173 } 179 174 180 175 if (dev->ops->get_bb_tbl) { 181 - ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, 182 - dev->blks_per_lun, gennvm_block_bb, gn); 176 + struct ppa_addr ppa; 177 + 178 + ppa.ppa = 0; 179 + ppa.g.ch = lun->vlun.chnl_id; 180 + ppa.g.lun = lun->vlun.id; 181 + ppa = generic_to_dev_addr(dev, ppa); 182 + 183 + ret = dev->ops->get_bb_tbl(dev->q, ppa, 184 + dev->blks_per_lun, 185 + gennvm_block_bb, gn); 183 186 if (ret) 184 187 pr_err("gennvm: could not read BB table\n"); 185 188 } ··· 216 199 if (!gn) 217 200 return -ENOMEM; 218 201 202 + gn->dev = dev; 219 203 gn->nr_luns = dev->nr_luns; 220 204 dev->mp = gn; 221 205 ··· 272 254 blk->type = 1; 273 255 274 256 lun->vlun.nr_free_blocks--; 257 + lun->vlun.nr_inuse_blocks++; 275 258 276 259 spin_unlock(&vlun->lock); 277 260 out: ··· 290 271 case 1: 291 272 list_move_tail(&blk->list, &lun->free_list); 292 273 lun->vlun.nr_free_blocks++; 274 + lun->vlun.nr_inuse_blocks--; 293 275 blk->type = 0; 294 276 break; 295 277 case 2: 296 278 list_move_tail(&blk->list, &lun->bb_list); 279 + lun->vlun.nr_bad_blocks++; 280 + lun->vlun.nr_inuse_blocks--; 297 281 break; 298 282 default: 299 283 WARN_ON_ONCE(1); 300 284 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 301 285 blk->id, blk->type); 302 286 list_move_tail(&blk->list, &lun->bb_list); 287 + lun->vlun.nr_bad_blocks++; 288 + lun->vlun.nr_inuse_blocks--; 303 289 } 304 290 305 291 spin_unlock(&vlun->lock); ··· 316 292 317 293 if (rqd->nr_pages > 1) { 318 294 for (i = 0; i < rqd->nr_pages; i++) 319 - rqd->ppa_list[i] = addr_to_generic_mode(dev, 295 + rqd->ppa_list[i] = dev_to_generic_addr(dev, 320 296 rqd->ppa_list[i]); 321 297 } else { 322 - rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); 298 + rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); 323 299 } 324 300 } 325 301 ··· 329 305 330 306 if (rqd->nr_pages > 1) { 331 307 for (i = 0; i < rqd->nr_pages; i++) 332 - rqd->ppa_list[i] = generic_to_addr_mode(dev, 308 + rqd->ppa_list[i] = generic_to_dev_addr(dev, 333 309 rqd->ppa_list[i]); 334 310 } else { 335 - rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); 311 + rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); 336 312 } 337 313 } 338 314 ··· 378 354 { 379 355 int i; 380 356 381 - if (!dev->ops->set_bb) 357 + if (!dev->ops->set_bb_tbl) 382 358 return; 383 359 384 - if (dev->ops->set_bb(dev->q, rqd, 1)) 360 + if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) 385 361 return; 386 362 387 363 gennvm_addr_to_generic_mode(dev, rqd); ··· 464 440 return &gn->luns[lunid].vlun; 465 441 } 466 442 467 - static void gennvm_free_blocks_print(struct nvm_dev *dev) 443 + static void gennvm_lun_info_print(struct nvm_dev *dev) 468 444 { 469 445 struct gen_nvm *gn = dev->mp; 470 446 struct gen_lun *lun; 471 447 unsigned int i; 472 448 473 - gennvm_for_each_lun(gn, lun, i) 474 - pr_info("%s: lun%8u\t%u\n", 475 - dev->name, i, lun->vlun.nr_free_blocks); 449 + 450 + gennvm_for_each_lun(gn, lun, i) { 451 + spin_lock(&lun->vlun.lock); 452 + 453 + pr_info("%s: lun%8u\t%u\t%u\t%u\n", 454 + dev->name, i, 455 + lun->vlun.nr_free_blocks, 456 + lun->vlun.nr_inuse_blocks, 457 + lun->vlun.nr_bad_blocks); 458 + 459 + spin_unlock(&lun->vlun.lock); 460 + } 476 461 } 477 462 478 463 static struct nvmm_type gennvm = { ··· 499 466 .erase_blk = gennvm_erase_blk, 500 467 501 468 .get_lun = gennvm_get_lun, 502 - .free_blocks_print = gennvm_free_blocks_print, 469 + .lun_info_print = gennvm_lun_info_print, 503 470 }; 504 471 505 472 static int __init gennvm_module_init(void)
+2
drivers/lightnvm/gennvm.h
··· 35 35 }; 36 36 37 37 struct gen_nvm { 38 + struct nvm_dev *dev; 39 + 38 40 int nr_luns; 39 41 struct gen_lun *luns; 40 42 };
+31 -1
drivers/lightnvm/rrpc.c
··· 123 123 return blk->id * rrpc->dev->pgs_per_blk; 124 124 } 125 125 126 + static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, 127 + struct ppa_addr r) 128 + { 129 + struct ppa_addr l; 130 + int secs, pgs, blks, luns; 131 + sector_t ppa = r.ppa; 132 + 133 + l.ppa = 0; 134 + 135 + div_u64_rem(ppa, dev->sec_per_pg, &secs); 136 + l.g.sec = secs; 137 + 138 + sector_div(ppa, dev->sec_per_pg); 139 + div_u64_rem(ppa, dev->sec_per_blk, &pgs); 140 + l.g.pg = pgs; 141 + 142 + sector_div(ppa, dev->pgs_per_blk); 143 + div_u64_rem(ppa, dev->blks_per_lun, &blks); 144 + l.g.blk = blks; 145 + 146 + sector_div(ppa, dev->blks_per_lun); 147 + div_u64_rem(ppa, dev->luns_per_chnl, &luns); 148 + l.g.lun = luns; 149 + 150 + sector_div(ppa, dev->luns_per_chnl); 151 + l.g.ch = ppa; 152 + 153 + return l; 154 + } 155 + 126 156 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) 127 157 { 128 158 struct ppa_addr paddr; 129 159 130 160 paddr.ppa = addr; 131 - return __linear_to_generic_addr(dev, paddr); 161 + return linear_to_generic_addr(dev, paddr); 132 162 } 133 163 134 164 /* requires lun->lock taken */
+13 -9
drivers/md/dm-crypt.c
··· 112 112 * and encrypts / decrypts at the same time. 113 113 */ 114 114 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 115 - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 115 + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, 116 + DM_CRYPT_EXIT_THREAD}; 116 117 117 118 /* 118 119 * The fields in here must be read only after initialization. ··· 1204 1203 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1205 1204 goto pop_from_list; 1206 1205 1206 + if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { 1207 + spin_unlock_irq(&cc->write_thread_wait.lock); 1208 + break; 1209 + } 1210 + 1207 1211 __set_current_state(TASK_INTERRUPTIBLE); 1208 1212 __add_wait_queue(&cc->write_thread_wait, &wait); 1209 1213 1210 1214 spin_unlock_irq(&cc->write_thread_wait.lock); 1211 1215 1212 - if (unlikely(kthread_should_stop())) { 1213 - set_task_state(current, TASK_RUNNING); 1214 - remove_wait_queue(&cc->write_thread_wait, &wait); 1215 - break; 1216 - } 1217 - 1218 1216 schedule(); 1219 1217 1220 - set_task_state(current, TASK_RUNNING); 1221 1218 spin_lock_irq(&cc->write_thread_wait.lock); 1222 1219 __remove_wait_queue(&cc->write_thread_wait, &wait); 1223 1220 goto continue_locked; ··· 1530 1531 if (!cc) 1531 1532 return; 1532 1533 1533 - if (cc->write_thread) 1534 + if (cc->write_thread) { 1535 + spin_lock_irq(&cc->write_thread_wait.lock); 1536 + set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); 1537 + wake_up_locked(&cc->write_thread_wait); 1538 + spin_unlock_irq(&cc->write_thread_wait.lock); 1534 1539 kthread_stop(cc->write_thread); 1540 + } 1535 1541 1536 1542 if (cc->io_queue) 1537 1543 destroy_workqueue(cc->io_queue);
+16 -14
drivers/md/dm-mpath.c
··· 1537 1537 struct block_device **bdev, fmode_t *mode) 1538 1538 { 1539 1539 struct multipath *m = ti->private; 1540 - struct pgpath *pgpath; 1541 1540 unsigned long flags; 1542 1541 int r; 1543 - 1544 - r = 0; 1545 1542 1546 1543 spin_lock_irqsave(&m->lock, flags); 1547 1544 1548 1545 if (!m->current_pgpath) 1549 1546 __choose_pgpath(m, 0); 1550 1547 1551 - pgpath = m->current_pgpath; 1552 - 1553 - if (pgpath) { 1554 - *bdev = pgpath->path.dev->bdev; 1555 - *mode = pgpath->path.dev->mode; 1548 + if (m->current_pgpath) { 1549 + if (!m->queue_io) { 1550 + *bdev = m->current_pgpath->path.dev->bdev; 1551 + *mode = m->current_pgpath->path.dev->mode; 1552 + r = 0; 1553 + } else { 1554 + /* pg_init has not started or completed */ 1555 + r = -ENOTCONN; 1556 + } 1557 + } else { 1558 + /* No path is available */ 1559 + if (m->queue_if_no_path) 1560 + r = -ENOTCONN; 1561 + else 1562 + r = -EIO; 1556 1563 } 1557 - 1558 - if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) 1559 - r = -ENOTCONN; 1560 - else if (!*bdev) 1561 - r = -EIO; 1562 1564 1563 1565 spin_unlock_irqrestore(&m->lock, flags); 1564 1566 1565 - if (r == -ENOTCONN && !fatal_signal_pending(current)) { 1567 + if (r == -ENOTCONN) { 1566 1568 spin_lock_irqsave(&m->lock, flags); 1567 1569 if (!m->current_pg) { 1568 1570 /* Path status changed, redo selection */
+3 -3
drivers/md/dm-thin.c
··· 2432 2432 case PM_WRITE: 2433 2433 if (old_mode != new_mode) 2434 2434 notify_of_pool_mode_change(pool, "write"); 2435 + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; 2435 2436 dm_pool_metadata_read_write(pool->pmd); 2436 2437 pool->process_bio = process_bio; 2437 2438 pool->process_discard = process_discard_bio; ··· 4250 4249 { 4251 4250 struct thin_c *tc = ti->private; 4252 4251 struct pool *pool = tc->pool; 4253 - struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); 4254 4252 4255 - if (!pool_limits->discard_granularity) 4256 - return; /* pool's discard support is disabled */ 4253 + if (!pool->pf.discard_enabled) 4254 + return; 4257 4255 4258 4256 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4259 4257 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
+4 -3
drivers/md/dm.c
··· 591 591 592 592 out: 593 593 dm_put_live_table(md, *srcu_idx); 594 - if (r == -ENOTCONN) { 594 + if (r == -ENOTCONN && !fatal_signal_pending(current)) { 595 595 msleep(10); 596 596 goto retry; 597 597 } ··· 603 603 { 604 604 struct mapped_device *md = bdev->bd_disk->private_data; 605 605 struct dm_target *tgt; 606 + struct block_device *tgt_bdev = NULL; 606 607 int srcu_idx, r; 607 608 608 - r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609 + r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); 609 610 if (r < 0) 610 611 return r; 611 612 ··· 621 620 goto out; 622 621 } 623 622 624 - r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 623 + r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); 625 624 out: 626 625 dm_put_live_table(md, srcu_idx); 627 626 return r;
+2 -2
drivers/media/pci/cx23885/cx23885-core.c
··· 1992 1992 (unsigned long long)pci_resource_start(pci_dev, 0)); 1993 1993 1994 1994 pci_set_master(pci_dev); 1995 - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1995 + err = pci_set_dma_mask(pci_dev, 0xffffffff); 1996 + if (err) { 1996 1997 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1997 - err = -EIO; 1998 1998 goto fail_context; 1999 1999 } 2000 2000
+2 -1
drivers/media/pci/cx25821/cx25821-core.c
··· 1319 1319 dev->pci_lat, (unsigned long long)dev->base_io_addr); 1320 1320 1321 1321 pci_set_master(pci_dev); 1322 - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1322 + err = pci_set_dma_mask(pci_dev, 0xffffffff); 1323 + if (err) { 1323 1324 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1324 1325 err = -EIO; 1325 1326 goto fail_irq;
+2 -2
drivers/media/pci/cx88/cx88-alsa.c
··· 890 890 return err; 891 891 } 892 892 893 - if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { 893 + err = pci_set_dma_mask(pci,DMA_BIT_MASK(32)); 894 + if (err) { 894 895 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 895 - err = -EIO; 896 896 cx88_core_put(core, pci); 897 897 return err; 898 898 }
+2 -1
drivers/media/pci/cx88/cx88-mpeg.c
··· 393 393 if (pci_enable_device(dev->pci)) 394 394 return -EIO; 395 395 pci_set_master(dev->pci); 396 - if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { 396 + err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32)); 397 + if (err) { 397 398 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 398 399 return -EIO; 399 400 }
+2 -2
drivers/media/pci/cx88/cx88-video.c
··· 1314 1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1315 1315 1316 1316 pci_set_master(pci_dev); 1317 - if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { 1317 + err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32)); 1318 + if (err) { 1318 1319 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1319 - err = -EIO; 1320 1320 goto fail_core; 1321 1321 } 1322 1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+1 -1
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
··· 810 810 "%s(): board vendor 0x%x, revision 0x%x\n", 811 811 __func__, board_vendor, board_revision); 812 812 pci_set_master(pci_dev); 813 - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 813 + if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) { 814 814 dev_err(&pci_dev->dev, 815 815 "%s(): 32bit PCI DMA is not supported\n", __func__); 816 816 goto pci_detect_err;
+2 -2
drivers/media/pci/saa7134/saa7134-core.c
··· 951 951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 952 952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 953 953 pci_set_master(pci_dev); 954 - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 954 + err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 955 + if (err) { 955 956 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 956 - err = -EIO; 957 957 goto fail1; 958 958 } 959 959
+2 -2
drivers/media/pci/saa7164/saa7164-core.c
··· 1264 1264 1265 1265 pci_set_master(pci_dev); 1266 1266 /* TODO */ 1267 - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1267 + err = pci_set_dma_mask(pci_dev, 0xffffffff); 1268 + if (err) { 1268 1269 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1269 - err = -EIO; 1270 1270 goto fail_irq; 1271 1271 } 1272 1272
+2 -2
drivers/media/pci/tw68/tw68-core.c
··· 257 257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 258 258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); 259 259 pci_set_master(pci_dev); 260 - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 260 + err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 261 + if (err) { 261 262 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 262 - err = -EIO; 263 263 goto fail1; 264 264 } 265 265
+1
drivers/mtd/nand/jz4740_nand.c
··· 25 25 26 26 #include <linux/gpio.h> 27 27 28 + #include <asm/mach-jz4740/gpio.h> 28 29 #include <asm/mach-jz4740/jz4740_nand.h> 29 30 30 31 #define JZ_REG_NAND_CTRL 0x50
+1 -1
drivers/mtd/nand/nand_base.c
··· 3110 3110 */ 3111 3111 static void nand_shutdown(struct mtd_info *mtd) 3112 3112 { 3113 - nand_get_device(mtd, FL_SHUTDOWN); 3113 + nand_get_device(mtd, FL_PM_SUSPENDED); 3114 3114 } 3115 3115 3116 3116 /* Set default functions */
+3 -2
drivers/net/ethernet/amd/pcnet32.c
··· 1500 1500 return -ENODEV; 1501 1501 } 1502 1502 1503 - if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { 1503 + err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); 1504 + if (err) { 1504 1505 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1506 - return -ENODEV; 1507 + return err; 1507 1508 } 1508 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1509 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
+107 -32
drivers/nvme/host/lightnvm.c
··· 93 93 __le16 cdw14[6]; 94 94 }; 95 95 96 - struct nvme_nvm_bbtbl { 96 + struct nvme_nvm_getbbtbl { 97 97 __u8 opcode; 98 98 __u8 flags; 99 99 __u16 command_id; ··· 101 101 __u64 rsvd[2]; 102 102 __le64 prp1; 103 103 __le64 prp2; 104 - __le32 prp1_len; 105 - __le32 prp2_len; 106 - __le32 lbb; 107 - __u32 rsvd11[3]; 104 + __le64 spba; 105 + __u32 rsvd4[4]; 106 + }; 107 + 108 + struct nvme_nvm_setbbtbl { 109 + __u8 opcode; 110 + __u8 flags; 111 + __u16 command_id; 112 + __le32 nsid; 113 + __le64 rsvd[2]; 114 + __le64 prp1; 115 + __le64 prp2; 116 + __le64 spba; 117 + __le16 nlb; 118 + __u8 value; 119 + __u8 rsvd3; 120 + __u32 rsvd4[3]; 108 121 }; 109 122 110 123 struct nvme_nvm_erase_blk { ··· 142 129 struct nvme_nvm_hb_rw hb_rw; 143 130 struct nvme_nvm_ph_rw ph_rw; 144 131 struct nvme_nvm_l2ptbl l2p; 145 - struct nvme_nvm_bbtbl get_bb; 146 - struct nvme_nvm_bbtbl set_bb; 132 + struct nvme_nvm_getbbtbl get_bb; 133 + struct nvme_nvm_setbbtbl set_bb; 147 134 struct nvme_nvm_erase_blk erase; 148 135 }; 149 136 }; ··· 155 142 __u8 num_ch; 156 143 __u8 num_lun; 157 144 __u8 num_pln; 145 + __u8 rsvd1; 158 146 __le16 num_blk; 159 147 __le16 num_pg; 160 148 __le16 fpg_sz; 161 149 __le16 csecs; 162 150 __le16 sos; 151 + __le16 rsvd2; 163 152 __le32 trdt; 164 153 __le32 trdm; 165 154 __le32 tprt; ··· 169 154 __le32 tbet; 170 155 __le32 tbem; 171 156 __le32 mpos; 157 + __le32 mccap; 172 158 __le16 cpar; 173 - __u8 reserved[913]; 159 + __u8 reserved[906]; 174 160 } __packed; 175 161 176 162 struct nvme_nvm_addr_format { ··· 194 178 __u8 ver_id; 195 179 __u8 vmnt; 196 180 __u8 cgrps; 197 - __u8 res[5]; 181 + __u8 res; 198 182 __le32 cap; 199 183 __le32 dom; 200 184 struct nvme_nvm_addr_format ppaf; 201 - __u8 ppat; 202 - __u8 resv[223]; 185 + __u8 resv[228]; 203 186 struct nvme_nvm_id_group groups[4]; 204 187 } __packed; 188 + 189 + struct nvme_nvm_bb_tbl { 190 + __u8 tblid[4]; 191 + __le16 verid; 192 + __le16 revid; 193 + __le32 rvsd1; 194 + __le32 tblks; 195 + __le32 tfact; 196 + __le32 tgrown; 197 + __le32 tdresv; 198 + __le32 thresv; 199 + __le32 rsvd2[8]; 200 + __u8 blk[0]; 201 + }; 205 202 206 203 /* 207 204 * Check we didn't inadvertently grow the command struct ··· 224 195 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); 225 196 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); 226 197 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); 227 - BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); 198 + BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); 199 + BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); 228 200 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); 229 201 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); 230 202 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); 231 203 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); 232 204 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); 205 + BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); 233 206 } 234 207 235 208 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) ··· 265 234 dst->tbet = le32_to_cpu(src->tbet); 266 235 dst->tbem = le32_to_cpu(src->tbem); 267 236 dst->mpos = le32_to_cpu(src->mpos); 237 + dst->mccap = le32_to_cpu(src->mccap); 268 238 269 239 dst->cpar = le16_to_cpu(src->cpar); 270 240 } ··· 276 244 static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) 277 245 { 278 246 struct nvme_ns *ns = q->queuedata; 247 + struct nvme_dev *dev = ns->dev; 279 248 struct nvme_nvm_id *nvme_nvm_id; 280 249 struct nvme_nvm_command c = {}; 281 250 int ret; ··· 289 256 if (!nvme_nvm_id) 290 257 return -ENOMEM; 291 258 292 - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, 293 - sizeof(struct nvme_nvm_id)); 259 + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, 260 + nvme_nvm_id, sizeof(struct nvme_nvm_id)); 294 261 if (ret) { 295 262 ret = -EIO; 296 263 goto out; ··· 301 268 nvm_id->cgrps = nvme_nvm_id->cgrps; 302 269 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); 303 270 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); 271 + memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, 272 + sizeof(struct nvme_nvm_addr_format)); 304 273 305 274 ret = init_grps(nvm_id, nvme_nvm_id); 306 275 out: ··· 316 281 struct nvme_ns *ns = q->queuedata; 317 282 struct nvme_dev *dev = ns->dev; 318 283 struct nvme_nvm_command c = {}; 319 - u32 len = queue_max_hw_sectors(q) << 9; 284 + u32 len = queue_max_hw_sectors(dev->admin_q) << 9; 320 285 u32 nlb_pr_rq = len / sizeof(u64); 321 286 u64 cmd_slba = slba; 322 287 void *entries; ··· 334 299 c.l2p.slba = cpu_to_le64(cmd_slba); 335 300 c.l2p.nlb = cpu_to_le32(cmd_nlb); 336 301 337 - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, 338 - entries, len); 302 + ret = nvme_submit_sync_cmd(dev->admin_q, 303 + (struct nvme_command *)&c, entries, len); 339 304 if (ret) { 340 305 dev_err(dev->dev, "L2P table transfer failed (%d)\n", 341 306 ret); ··· 357 322 return ret; 358 323 } 359 324 360 - static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, 361 - unsigned int nr_blocks, 362 - nvm_bb_update_fn *update_bbtbl, void *priv) 325 + static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, 326 + int nr_blocks, nvm_bb_update_fn *update_bbtbl, 327 + void *priv) 363 328 { 364 329 struct nvme_ns *ns = q->queuedata; 365 330 struct nvme_dev *dev = ns->dev; 366 331 struct nvme_nvm_command c = {}; 367 - void *bb_bitmap; 368 - u16 bb_bitmap_size; 332 + struct nvme_nvm_bb_tbl *bb_tbl; 333 + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; 369 334 int ret = 0; 370 335 371 336 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; 372 337 c.get_bb.nsid = cpu_to_le32(ns->ns_id); 373 - c.get_bb.lbb = cpu_to_le32(lunid); 374 - bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; 375 - bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); 376 - if (!bb_bitmap) 338 + c.get_bb.spba = cpu_to_le64(ppa.ppa); 339 + 340 + bb_tbl = kzalloc(tblsz, GFP_KERNEL); 341 + if (!bb_tbl) 377 342 return -ENOMEM; 378 343 379 - bitmap_zero(bb_bitmap, nr_blocks); 380 - 381 - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, 382 - bb_bitmap_size); 344 + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, 345 + bb_tbl, tblsz); 383 346 if (ret) { 384 347 dev_err(dev->dev, "get bad block table failed (%d)\n", ret); 385 348 ret = -EIO; 386 349 goto out; 387 350 } 388 351 389 - ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); 352 + if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || 353 + bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { 354 + dev_err(dev->dev, "bbt format mismatch\n"); 355 + ret = -EINVAL; 356 + goto out; 357 + } 358 + 359 + if (le16_to_cpu(bb_tbl->verid) != 1) { 360 + ret = -EINVAL; 361 + dev_err(dev->dev, "bbt version not supported\n"); 362 + goto out; 363 + } 364 + 365 + if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { 366 + ret = -EINVAL; 367 + dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", 368 + le32_to_cpu(bb_tbl->tblks), nr_blocks); 369 + goto out; 370 + } 371 + 372 + ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); 390 373 if (ret) { 391 374 ret = -EINTR; 392 375 goto out; 393 376 } 394 377 395 378 out: 396 - kfree(bb_bitmap); 379 + kfree(bb_tbl); 380 + return ret; 381 + } 382 + 383 + static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, 384 + int type) 385 + { 386 + struct nvme_ns *ns = q->queuedata; 387 + struct nvme_dev *dev = ns->dev; 388 + struct nvme_nvm_command c = {}; 389 + int ret = 0; 390 + 391 + c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; 392 + c.set_bb.nsid = cpu_to_le32(ns->ns_id); 393 + c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); 394 + c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); 395 + c.set_bb.value = type; 396 + 397 + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, 398 + NULL, 0); 399 + if (ret) 400 + dev_err(dev->dev, "set bad block table failed (%d)\n", ret); 397 401 return ret; 398 402 } 399 403 ··· 548 474 .get_l2p_tbl = nvme_nvm_get_l2p_tbl, 549 475 550 476 .get_bb_tbl = nvme_nvm_get_bb_tbl, 477 + .set_bb_tbl = nvme_nvm_set_bb_tbl, 551 478 552 479 .submit_io = nvme_nvm_submit_io, 553 480 .erase_block = nvme_nvm_erase_block,
+25 -14
drivers/nvme/host/pci.c
··· 896 896 goto retry_cmd; 897 897 } 898 898 if (blk_integrity_rq(req)) { 899 - if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 + if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { 900 + dma_unmap_sg(dev->dev, iod->sg, iod->nents, 901 + dma_dir); 900 902 goto error_cmd; 903 + } 901 904 902 905 sg_init_table(iod->meta_sg, 1); 903 906 if (blk_rq_map_integrity_sg( 904 - req->q, req->bio, iod->meta_sg) != 1) 907 + req->q, req->bio, iod->meta_sg) != 1) { 908 + dma_unmap_sg(dev->dev, iod->sg, iod->nents, 909 + dma_dir); 905 910 goto error_cmd; 911 + } 906 912 907 913 if (rq_data_dir(req)) 908 914 nvme_dif_remap(req, nvme_dif_prep); 909 915 910 - if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 + if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { 917 + dma_unmap_sg(dev->dev, iod->sg, iod->nents, 918 + dma_dir); 911 919 goto error_cmd; 920 + } 912 921 } 913 922 } 914 923 ··· 977 968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 978 969 return; 979 970 980 - writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 971 + if (likely(nvmeq->cq_vector >= 0)) 972 + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 981 973 nvmeq->cq_head = head; 982 974 nvmeq->cq_phase = phase; 983 975 ··· 1737 1727 u32 aqa; 1738 1728 u64 cap = lo_hi_readq(&dev->bar->cap); 1739 1729 struct nvme_queue *nvmeq; 1740 - unsigned page_shift = PAGE_SHIFT; 1730 + /* 1731 + * default to a 4K page size, with the intention to update this 1732 + * path in the future to accomodate architectures with differing 1733 + * kernel and IO page sizes. 1734 + */ 1735 + unsigned page_shift = 12; 1741 1736 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1742 - unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; 1743 1737 1744 1738 if (page_shift < dev_page_min) { 1745 1739 dev_err(dev->dev, ··· 1751 1737 "host (%u)\n", 1 << dev_page_min, 1752 1738 1 << page_shift); 1753 1739 return -ENODEV; 1754 - } 1755 - if (page_shift > dev_page_max) { 1756 - dev_info(dev->dev, 1757 - "Device maximum page size (%u) smaller than " 1758 - "host (%u); enabling work-around\n", 1759 - 1 << dev_page_max, 1 << page_shift); 1760 - page_shift = dev_page_max; 1761 1740 } 1762 1741 1763 1742 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? ··· 2275 2268 if (dev->max_hw_sectors) { 2276 2269 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2277 2270 blk_queue_max_segments(ns->queue, 2278 - ((dev->max_hw_sectors << 9) / dev->page_size) + 1); 2271 + (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); 2279 2272 } 2280 2273 if (dev->stripe_size) 2281 2274 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); ··· 2794 2787 { 2795 2788 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2796 2789 nvme_put_dq(dq); 2790 + 2791 + spin_lock_irq(&nvmeq->q_lock); 2792 + nvme_process_cq(nvmeq); 2793 + spin_unlock_irq(&nvmeq->q_lock); 2797 2794 } 2798 2795 2799 2796 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+2 -2
drivers/pci/probe.c
··· 1685 1685 { 1686 1686 struct device *bridge = pci_get_host_bridge_device(dev); 1687 1687 1688 - if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1689 - if (bridge->parent) 1688 + if (IS_ENABLED(CONFIG_OF) && 1689 + bridge->parent && bridge->parent->of_node) { 1690 1690 of_dma_configure(&dev->dev, bridge->parent->of_node); 1691 1691 } else if (has_acpi_companion(bridge)) { 1692 1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
+1 -1
drivers/sh/pm_runtime.c
··· 34 34 35 35 static int __init sh_pm_runtime_init(void) 36 36 { 37 - if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { 38 38 if (!of_find_compatible_node(NULL, NULL, 39 39 "renesas,cpg-mstp-clocks")) 40 40 return 0;
+2 -1
drivers/staging/iio/Kconfig
··· 18 18 source "drivers/staging/iio/trigger/Kconfig" 19 19 20 20 config IIO_DUMMY_EVGEN 21 - tristate 21 + tristate 22 + select IRQ_WORK 22 23 23 24 config IIO_SIMPLE_DUMMY 24 25 tristate "An example driver with no hardware requirements"
+2 -2
drivers/staging/iio/adc/lpc32xx_adc.c
··· 76 76 77 77 if (mask == IIO_CHAN_INFO_RAW) { 78 78 mutex_lock(&indio_dev->mlock); 79 - clk_enable(info->clk); 79 + clk_prepare_enable(info->clk); 80 80 /* Measurement setup */ 81 81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, 82 82 LPC32XX_ADC_SELECT(info->adc_base)); ··· 84 84 __raw_writel(AD_PDN_CTRL | AD_STROBE, 85 85 LPC32XX_ADC_CTRL(info->adc_base)); 86 86 wait_for_completion(&info->completion); /* set by ISR */ 87 - clk_disable(info->clk); 87 + clk_disable_unprepare(info->clk); 88 88 *val = info->value; 89 89 mutex_unlock(&indio_dev->mlock); 90 90
+25 -23
drivers/staging/wilc1000/coreconfigurator.c
··· 13 13 #include "wilc_wlan.h" 14 14 #include <linux/errno.h> 15 15 #include <linux/slab.h> 16 - #include <linux/etherdevice.h> 17 16 #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ 18 17 BEACON_INTERVAL_LEN + CAP_INFO_LEN) 19 - #define ADDR1 4 20 - #define ADDR2 10 21 - #define ADDR3 16 22 18 23 19 /* Basic Frame Type Codes (2-bit) */ 24 20 enum basic_frame_type { ··· 171 175 return ((header[1] & 0x02) >> 1); 172 176 } 173 177 178 + /* This function extracts the MAC Address in 'address1' field of the MAC */ 179 + /* header and updates the MAC Address in the allocated 'addr' variable. */ 180 + static inline void get_address1(u8 *pu8msa, u8 *addr) 181 + { 182 + memcpy(addr, pu8msa + 4, 6); 183 + } 184 + 185 + /* This function extracts the MAC Address in 'address2' field of the MAC */ 186 + /* header and updates the MAC Address in the allocated 'addr' variable. */ 187 + static inline void get_address2(u8 *pu8msa, u8 *addr) 188 + { 189 + memcpy(addr, pu8msa + 10, 6); 190 + } 191 + 192 + /* This function extracts the MAC Address in 'address3' field of the MAC */ 193 + /* header and updates the MAC Address in the allocated 'addr' variable. */ 194 + static inline void get_address3(u8 *pu8msa, u8 *addr) 195 + { 196 + memcpy(addr, pu8msa + 16, 6); 197 + } 198 + 174 199 /* This function extracts the BSSID from the incoming WLAN packet based on */ 175 - /* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ 200 + /* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */ 176 201 /* variable. */ 177 202 static inline void get_BSSID(u8 *data, u8 *bssid) 178 203 { 179 204 if (get_from_ds(data) == 1) 180 - /* 181 - * Extract the MAC Address in 'address2' field of the MAC 182 - * header and update the MAC Address in the allocated 'data' 183 - * variable. 184 - */ 185 - ether_addr_copy(data, bssid + ADDR2); 205 + get_address2(data, bssid); 186 206 else if (get_to_ds(data) == 1) 187 - /* 188 - * Extract the MAC Address in 'address1' field of the MAC 189 - * header and update the MAC Address in the allocated 'data' 190 - * variable. 191 - */ 192 - ether_addr_copy(data, bssid + ADDR1); 207 + get_address1(data, bssid); 193 208 else 194 - /* 195 - * Extract the MAC Address in 'address3' field of the MAC 196 - * header and update the MAC Address in the allocated 'data' 197 - * variable. 198 - */ 199 - ether_addr_copy(data, bssid + ADDR3); 209 + get_address3(data, bssid); 200 210 } 201 211 202 212 /* This function extracts the SSID from a beacon/probe response frame */
+1 -1
drivers/tty/n_tty.c
··· 169 169 { 170 170 struct n_tty_data *ldata = tty->disc_data; 171 171 172 - tty_audit_add_data(tty, to, n, ldata->icanon); 172 + tty_audit_add_data(tty, from, n, ldata->icanon); 173 173 return copy_to_user(to, from, n); 174 174 } 175 175
+1
drivers/tty/serial/8250/8250_fsl.c
··· 60 60 spin_unlock_irqrestore(&up->port.lock, flags); 61 61 return 1; 62 62 } 63 + EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
+1
drivers/tty/serial/8250/Kconfig
··· 373 373 depends on SERIAL_8250 && PCI 374 374 select HSU_DMA if SERIAL_8250_DMA 375 375 select HSU_DMA_PCI if X86_INTEL_MID 376 + select RATIONAL 376 377 help 377 378 Selecting this option will enable handling of the extra features 378 379 present on the UART found on Intel Medfield SOC and various other
+1 -1
drivers/tty/serial/Kconfig
··· 1539 1539 tristate "Freescale lpuart serial port support" 1540 1540 depends on HAS_DMA 1541 1541 select SERIAL_CORE 1542 - select SERIAL_EARLYCON 1543 1542 help 1544 1543 Support for the on-chip lpuart on some Freescale SOCs. 1545 1544 ··· 1546 1547 bool "Console on Freescale lpuart serial port" 1547 1548 depends on SERIAL_FSL_LPUART=y 1548 1549 select SERIAL_CORE_CONSOLE 1550 + select SERIAL_EARLYCON 1549 1551 help 1550 1552 If you have enabled the lpuart serial port on the Freescale SoCs, 1551 1553 you can make it the console by answering Y to this option.
+1 -1
drivers/tty/serial/bcm63xx_uart.c
··· 474 474 475 475 /* register irq and enable rx interrupts */ 476 476 ret = request_irq(port->irq, bcm_uart_interrupt, 0, 477 - bcm_uart_type(port), port); 477 + dev_name(port->dev), port); 478 478 if (ret) 479 479 return ret; 480 480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
+1 -1
drivers/tty/serial/etraxfs-uart.c
··· 894 894 up->regi_ser = of_iomap(np, 0); 895 895 up->port.dev = &pdev->dev; 896 896 897 - up->gpios = mctrl_gpio_init(&pdev->dev, 0); 897 + up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0); 898 898 if (IS_ERR(up->gpios)) 899 899 return PTR_ERR(up->gpios); 900 900
+1 -1
drivers/tty/tty_audit.c
··· 265 265 * 266 266 * Audit @data of @size from @tty, if necessary. 267 267 */ 268 - void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 268 + void tty_audit_add_data(struct tty_struct *tty, const void *data, 269 269 size_t size, unsigned icanon) 270 270 { 271 271 struct tty_audit_buf *buf;
+4
drivers/tty/tty_io.c
··· 1282 1282 int was_stopped = tty->stopped; 1283 1283 1284 1284 if (tty->ops->send_xchar) { 1285 + down_read(&tty->termios_rwsem); 1285 1286 tty->ops->send_xchar(tty, ch); 1287 + up_read(&tty->termios_rwsem); 1286 1288 return 0; 1287 1289 } 1288 1290 1289 1291 if (tty_write_lock(tty, 0) < 0) 1290 1292 return -ERESTARTSYS; 1291 1293 1294 + down_read(&tty->termios_rwsem); 1292 1295 if (was_stopped) 1293 1296 start_tty(tty); 1294 1297 tty->ops->write(tty, &ch, 1); 1295 1298 if (was_stopped) 1296 1299 stop_tty(tty); 1300 + up_read(&tty->termios_rwsem); 1297 1301 tty_write_unlock(tty); 1298 1302 return 0; 1299 1303 }
-4
drivers/tty/tty_ioctl.c
··· 1147 1147 spin_unlock_irq(&tty->flow_lock); 1148 1148 break; 1149 1149 case TCIOFF: 1150 - down_read(&tty->termios_rwsem); 1151 1150 if (STOP_CHAR(tty) != __DISABLED_CHAR) 1152 1151 retval = tty_send_xchar(tty, STOP_CHAR(tty)); 1153 - up_read(&tty->termios_rwsem); 1154 1152 break; 1155 1153 case TCION: 1156 - down_read(&tty->termios_rwsem); 1157 1154 if (START_CHAR(tty) != __DISABLED_CHAR) 1158 1155 retval = tty_send_xchar(tty, START_CHAR(tty)); 1159 - up_read(&tty->termios_rwsem); 1160 1156 break; 1161 1157 default: 1162 1158 return -EINVAL;
+1 -1
drivers/tty/tty_ldisc.c
··· 592 592 593 593 /* Restart the work queue in case no characters kick it off. Safe if 594 594 already running */ 595 - schedule_work(&tty->port->buf.work); 595 + tty_buffer_restart_work(tty->port); 596 596 597 597 tty_unlock(tty); 598 598 return retval;
+122 -22
drivers/usb/chipidea/ci_hdrc_imx.c
··· 84 84 struct imx_usbmisc_data *usbmisc_data; 85 85 bool supports_runtime_pm; 86 86 bool in_lpm; 87 + /* SoC before i.mx6 (except imx23/imx28) needs three clks */ 88 + bool need_three_clks; 89 + struct clk *clk_ipg; 90 + struct clk *clk_ahb; 91 + struct clk *clk_per; 92 + /* --------------------------------- */ 87 93 }; 88 94 89 95 /* Common functions shared by usbmisc drivers */ ··· 141 135 } 142 136 143 137 /* End of common functions shared by usbmisc drivers*/ 138 + static int imx_get_clks(struct device *dev) 139 + { 140 + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); 141 + int ret = 0; 142 + 143 + data->clk_ipg = devm_clk_get(dev, "ipg"); 144 + if (IS_ERR(data->clk_ipg)) { 145 + /* If the platform only needs one clocks */ 146 + data->clk = devm_clk_get(dev, NULL); 147 + if (IS_ERR(data->clk)) { 148 + ret = PTR_ERR(data->clk); 149 + dev_err(dev, 150 + "Failed to get clks, err=%ld,%ld\n", 151 + PTR_ERR(data->clk), PTR_ERR(data->clk_ipg)); 152 + return ret; 153 + } 154 + return ret; 155 + } 156 + 157 + data->clk_ahb = devm_clk_get(dev, "ahb"); 158 + if (IS_ERR(data->clk_ahb)) { 159 + ret = PTR_ERR(data->clk_ahb); 160 + dev_err(dev, 161 + "Failed to get ahb clock, err=%d\n", ret); 162 + return ret; 163 + } 164 + 165 + data->clk_per = devm_clk_get(dev, "per"); 166 + if (IS_ERR(data->clk_per)) { 167 + ret = PTR_ERR(data->clk_per); 168 + dev_err(dev, 169 + "Failed to get per clock, err=%d\n", ret); 170 + return ret; 171 + } 172 + 173 + data->need_three_clks = true; 174 + return ret; 175 + } 176 + 177 + static int imx_prepare_enable_clks(struct device *dev) 178 + { 179 + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); 180 + int ret = 0; 181 + 182 + if (data->need_three_clks) { 183 + ret = clk_prepare_enable(data->clk_ipg); 184 + if (ret) { 185 + dev_err(dev, 186 + "Failed to prepare/enable ipg clk, err=%d\n", 187 + ret); 188 + return ret; 189 + } 190 + 191 + ret = clk_prepare_enable(data->clk_ahb); 192 + if (ret) { 193 + dev_err(dev, 194 + "Failed to prepare/enable ahb clk, err=%d\n", 195 + ret); 196 + clk_disable_unprepare(data->clk_ipg); 197 + return ret; 198 + } 199 + 200 + ret = clk_prepare_enable(data->clk_per); 201 + if (ret) { 202 + dev_err(dev, 203 + "Failed to prepare/enable per clk, err=%d\n", 204 + ret); 205 + clk_disable_unprepare(data->clk_ahb); 206 + clk_disable_unprepare(data->clk_ipg); 207 + return ret; 208 + } 209 + } else { 210 + ret = clk_prepare_enable(data->clk); 211 + if (ret) { 212 + dev_err(dev, 213 + "Failed to prepare/enable clk, err=%d\n", 214 + ret); 215 + return ret; 216 + } 217 + } 218 + 219 + return ret; 220 + } 221 + 222 + static void imx_disable_unprepare_clks(struct device *dev) 223 + { 224 + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); 225 + 226 + if (data->need_three_clks) { 227 + clk_disable_unprepare(data->clk_per); 228 + clk_disable_unprepare(data->clk_ahb); 229 + clk_disable_unprepare(data->clk_ipg); 230 + } else { 231 + clk_disable_unprepare(data->clk); 232 + } 233 + } 144 234 145 235 static int ci_hdrc_imx_probe(struct platform_device *pdev) 146 236 { ··· 247 145 .flags = CI_HDRC_SET_NON_ZERO_TTHA, 248 146 }; 249 147 int ret; 250 - const struct of_device_id *of_id = 251 - of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 252 - const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; 148 + const struct of_device_id *of_id; 149 + const struct ci_hdrc_imx_platform_flag *imx_platform_flag; 150 + 151 + of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 152 + if (!of_id) 153 + return -ENODEV; 154 + 155 + imx_platform_flag = of_id->data; 253 156 254 157 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 255 158 if (!data) 256 159 return -ENOMEM; 257 160 161 + platform_set_drvdata(pdev, data); 258 162 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); 259 163 if (IS_ERR(data->usbmisc_data)) 260 164 return PTR_ERR(data->usbmisc_data); 261 165 262 - data->clk = devm_clk_get(&pdev->dev, NULL); 263 - if (IS_ERR(data->clk)) { 264 - dev_err(&pdev->dev, 265 - "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); 266 - return PTR_ERR(data->clk); 267 - } 268 - 269 - ret = clk_prepare_enable(data->clk); 270 - if (ret) { 271 - dev_err(&pdev->dev, 272 - "Failed to prepare or enable clock, err=%d\n", ret); 166 + ret = imx_get_clks(&pdev->dev); 167 + if (ret) 273 168 return ret; 274 - } 169 + 170 + ret = imx_prepare_enable_clks(&pdev->dev); 171 + if (ret) 172 + return ret; 275 173 276 174 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); 277 175 if (IS_ERR(data->phy)) { ··· 314 212 goto disable_device; 315 213 } 316 214 317 - platform_set_drvdata(pdev, data); 318 - 319 215 if (data->supports_runtime_pm) { 320 216 pm_runtime_set_active(&pdev->dev); 321 217 pm_runtime_enable(&pdev->dev); ··· 326 226 disable_device: 327 227 ci_hdrc_remove_device(data->ci_pdev); 328 228 err_clk: 329 - clk_disable_unprepare(data->clk); 229 + imx_disable_unprepare_clks(&pdev->dev); 330 230 return ret; 331 231 } 332 232 ··· 340 240 pm_runtime_put_noidle(&pdev->dev); 341 241 } 342 242 ci_hdrc_remove_device(data->ci_pdev); 343 - clk_disable_unprepare(data->clk); 243 + imx_disable_unprepare_clks(&pdev->dev); 344 244 345 245 return 0; 346 246 } ··· 352 252 353 253 dev_dbg(dev, "at %s\n", __func__); 354 254 355 - clk_disable_unprepare(data->clk); 255 + imx_disable_unprepare_clks(dev); 356 256 data->in_lpm = true; 357 257 358 258 return 0; ··· 370 270 return 0; 371 271 } 372 272 373 - ret = clk_prepare_enable(data->clk); 273 + ret = imx_prepare_enable_clks(dev); 374 274 if (ret) 375 275 return ret; 376 276 ··· 385 285 return 0; 386 286 387 287 clk_disable: 388 - clk_disable_unprepare(data->clk); 288 + imx_disable_unprepare_clks(dev); 389 289 return ret; 390 290 } 391 291
+2
drivers/usb/chipidea/debug.c
··· 322 322 return -EINVAL; 323 323 324 324 pm_runtime_get_sync(ci->dev); 325 + disable_irq(ci->irq); 325 326 ci_role_stop(ci); 326 327 ret = ci_role_start(ci, role); 328 + enable_irq(ci->irq); 327 329 pm_runtime_put_sync(ci->dev); 328 330 329 331 return ret ? ret : count;
+17
drivers/usb/chipidea/udc.c
··· 1751 1751 return retval; 1752 1752 } 1753 1753 1754 + static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci) 1755 + { 1756 + if (!ci_otg_is_fsm_mode(ci)) 1757 + return; 1758 + 1759 + mutex_lock(&ci->fsm.lock); 1760 + if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) { 1761 + ci->fsm.a_bidl_adis_tmout = 1; 1762 + ci_hdrc_otg_fsm_start(ci); 1763 + } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) { 1764 + ci->fsm.protocol = PROTO_UNDEF; 1765 + ci->fsm.otg->state = OTG_STATE_UNDEFINED; 1766 + } 1767 + mutex_unlock(&ci->fsm.lock); 1768 + } 1769 + 1754 1770 /** 1755 1771 * ci_udc_stop: unregister a gadget driver 1756 1772 */ ··· 1791 1775 ci->driver = NULL; 1792 1776 spin_unlock_irqrestore(&ci->lock, flags); 1793 1777 1778 + ci_udc_stop_for_otg_fsm(ci); 1794 1779 return 0; 1795 1780 } 1796 1781
+6 -4
drivers/usb/chipidea/usbmisc_imx.c
··· 500 500 { 501 501 struct resource *res; 502 502 struct imx_usbmisc *data; 503 - struct of_device_id *tmp_dev; 503 + const struct of_device_id *of_id; 504 + 505 + of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev); 506 + if (!of_id) 507 + return -ENODEV; 504 508 505 509 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 506 510 if (!data) ··· 517 513 if (IS_ERR(data->base)) 518 514 return PTR_ERR(data->base); 519 515 520 - tmp_dev = (struct of_device_id *) 521 - of_match_device(usbmisc_imx_dt_ids, &pdev->dev); 522 - data->ops = (const struct usbmisc_ops *)tmp_dev->data; 516 + data->ops = (const struct usbmisc_ops *)of_id->data; 523 517 platform_set_drvdata(pdev, data); 524 518 525 519 return 0;
+1 -1
drivers/usb/class/usblp.c
··· 884 884 885 885 add_wait_queue(&usblp->wwait, &waita); 886 886 for (;;) { 887 - set_current_state(TASK_INTERRUPTIBLE); 888 887 if (mutex_lock_interruptible(&usblp->mut)) { 889 888 rc = -EINTR; 890 889 break; 891 890 } 891 + set_current_state(TASK_INTERRUPTIBLE); 892 892 rc = usblp_wtest(usblp, nonblock); 893 893 mutex_unlock(&usblp->mut); 894 894 if (rc <= 0)
+1 -2
drivers/usb/core/Kconfig
··· 77 77 78 78 config USB_OTG_FSM 79 79 tristate "USB 2.0 OTG FSM implementation" 80 - depends on USB 81 - select USB_OTG 80 + depends on USB && USB_OTG 82 81 select USB_PHY 83 82 help 84 83 Implements OTG Finite State Machine as specified in On-The-Go
+5 -4
drivers/usb/dwc2/hcd.c
··· 324 324 */ 325 325 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 326 326 { 327 - if (hsotg->lx_state == DWC2_L2) { 327 + if (hsotg->bus_suspended) { 328 328 hsotg->flags.b.port_suspend_change = 1; 329 329 usb_hcd_resume_root_hub(hsotg->priv); 330 - } else { 331 - hsotg->flags.b.port_l1_change = 1; 332 330 } 331 + 332 + if (hsotg->lx_state == DWC2_L1) 333 + hsotg->flags.b.port_l1_change = 1; 333 334 } 334 335 335 336 /** ··· 1429 1428 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1430 1429 dwc2_readl(hsotg->regs + HPRT0)); 1431 1430 1432 - hsotg->bus_suspended = 0; 1433 1431 dwc2_hcd_rem_wakeup(hsotg); 1432 + hsotg->bus_suspended = 0; 1434 1433 1435 1434 /* Change to L0 state */ 1436 1435 hsotg->lx_state = DWC2_L0;
+2 -1
drivers/usb/dwc2/platform.c
··· 108 108 .host_ls_low_power_phy_clk = -1, 109 109 .ts_dline = -1, 110 110 .reload_ctl = -1, 111 - .ahbcfg = 0x7, /* INCR16 */ 111 + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 112 + GAHBCFG_HBSTLEN_SHIFT, 112 113 .uframe_sched = -1, 113 114 .external_id_pin_ctl = -1, 114 115 .hibernation = -1,
+4
drivers/usb/dwc3/dwc3-pci.c
··· 34 34 #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 35 35 #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 36 36 #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 37 + #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa 38 + #define PCI_DEVICE_ID_INTEL_APL 0x5aaa 37 39 38 40 static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 39 41 static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; ··· 212 210 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 213 211 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 214 212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 213 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, 214 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 215 215 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 216 216 { } /* Terminating Entry */ 217 217 };
+23 -1
drivers/usb/dwc3/gadget.c
··· 2744 2744 } 2745 2745 2746 2746 dwc->gadget.ops = &dwc3_gadget_ops; 2747 - dwc->gadget.max_speed = USB_SPEED_SUPER; 2748 2747 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2749 2748 dwc->gadget.sg_supported = true; 2750 2749 dwc->gadget.name = "dwc3-gadget"; 2750 + 2751 + /* 2752 + * FIXME We might be setting max_speed to <SUPER, however versions 2753 + * <2.20a of dwc3 have an issue with metastability (documented 2754 + * elsewhere in this driver) which tells us we can't set max speed to 2755 + * anything lower than SUPER. 2756 + * 2757 + * Because gadget.max_speed is only used by composite.c and function 2758 + * drivers (i.e. it won't go into dwc3's registers) we are allowing this 2759 + * to happen so we avoid sending SuperSpeed Capability descriptor 2760 + * together with our BOS descriptor as that could confuse host into 2761 + * thinking we can handle super speed. 2762 + * 2763 + * Note that, in fact, we won't even support GetBOS requests when speed 2764 + * is less than super speed because we don't have means, yet, to tell 2765 + * composite.c that we are USB 2.0 + LPM ECN. 2766 + */ 2767 + if (dwc->revision < DWC3_REVISION_220A) 2768 + dwc3_trace(trace_dwc3_gadget, 2769 + "Changing max_speed on rev %08x\n", 2770 + dwc->revision); 2771 + 2772 + dwc->gadget.max_speed = dwc->maximum_speed; 2751 2773 2752 2774 /* 2753 2775 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
+1 -1
drivers/usb/gadget/function/f_loopback.c
··· 329 329 for (i = 0; i < loop->qlen && result == 0; i++) { 330 330 result = -ENOMEM; 331 331 332 - in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); 332 + in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC); 333 333 if (!in_req) 334 334 goto fail; 335 335
+1 -1
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 1633 1633 spin_lock(&udc->lock); 1634 1634 1635 1635 int_enb = usba_int_enb_get(udc); 1636 - status = usba_readl(udc, INT_STA) & int_enb; 1636 + status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); 1637 1637 DBG(DBG_INT, "irq, status=%#08x\n", status); 1638 1638 1639 1639 if (status & USBA_DET_SUSPEND) {
+9 -6
drivers/usb/host/xhci-hub.c
··· 782 782 status |= USB_PORT_STAT_SUSPEND; 783 783 } 784 784 } 785 - if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 786 - && (raw_port_status & PORT_POWER) 787 - && (bus_state->suspended_ports & (1 << wIndex))) { 788 - bus_state->suspended_ports &= ~(1 << wIndex); 789 - if (hcd->speed < HCD_USB3) 790 - bus_state->port_c_suspend |= 1 << wIndex; 785 + if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && 786 + (raw_port_status & PORT_POWER)) { 787 + if (bus_state->suspended_ports & (1 << wIndex)) { 788 + bus_state->suspended_ports &= ~(1 << wIndex); 789 + if (hcd->speed < HCD_USB3) 790 + bus_state->port_c_suspend |= 1 << wIndex; 791 + } 792 + bus_state->resume_done[wIndex] = 0; 793 + clear_bit(wIndex, &bus_state->resuming_ports); 791 794 } 792 795 if (raw_port_status & PORT_CONNECT) { 793 796 status |= USB_PORT_STAT_CONNECTION;
+6 -26
drivers/usb/host/xhci-ring.c
··· 3896 3896 return ret; 3897 3897 } 3898 3898 3899 - static int ep_ring_is_processing(struct xhci_hcd *xhci, 3900 - int slot_id, unsigned int ep_index) 3901 - { 3902 - struct xhci_virt_device *xdev; 3903 - struct xhci_ring *ep_ring; 3904 - struct xhci_ep_ctx *ep_ctx; 3905 - struct xhci_virt_ep *xep; 3906 - dma_addr_t hw_deq; 3907 - 3908 - xdev = xhci->devs[slot_id]; 3909 - xep = &xhci->devs[slot_id]->eps[ep_index]; 3910 - ep_ring = xep->ring; 3911 - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3912 - 3913 - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING) 3914 - return 0; 3915 - 3916 - hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 3917 - return (hw_deq != 3918 - xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue)); 3919 - } 3920 - 3921 3899 /* 3922 3900 * Check transfer ring to guarantee there is enough room for the urb. 3923 3901 * Update ISO URB start_frame and interval. ··· 3961 3983 } 3962 3984 3963 3985 /* Calculate the start frame and put it in urb->start_frame. */ 3964 - if (HCC_CFC(xhci->hcc_params) && 3965 - ep_ring_is_processing(xhci, slot_id, ep_index)) { 3966 - urb->start_frame = xep->next_frame_id; 3967 - goto skip_start_over; 3986 + if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { 3987 + if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 3988 + EP_STATE_RUNNING) { 3989 + urb->start_frame = xep->next_frame_id; 3990 + goto skip_start_over; 3991 + } 3968 3992 } 3969 3993 3970 3994 start_frame = readl(&xhci->run_regs->microframe_index);
+10
drivers/usb/host/xhci.c
··· 175 175 command |= CMD_RESET; 176 176 writel(command, &xhci->op_regs->command); 177 177 178 + /* Existing Intel xHCI controllers require a delay of 1 mS, 179 + * after setting the CMD_RESET bit, and before accessing any 180 + * HC registers. This allows the HC to complete the 181 + * reset operation and be ready for HC register access. 182 + * Without this delay, the subsequent HC register access, 183 + * may result in a system hang very rarely. 184 + */ 185 + if (xhci->quirks & XHCI_INTEL_HOST) 186 + udelay(1000); 187 + 178 188 ret = xhci_handshake(&xhci->op_regs->command, 179 189 CMD_RESET, 0, 10 * 1000 * 1000); 180 190 if (ret)
+6 -6
drivers/usb/musb/musb_core.c
··· 132 132 /*-------------------------------------------------------------------------*/ 133 133 134 134 #ifndef CONFIG_BLACKFIN 135 - static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 135 + static int musb_ulpi_read(struct usb_phy *phy, u32 reg) 136 136 { 137 137 void __iomem *addr = phy->io_priv; 138 138 int i = 0; ··· 151 151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 152 152 */ 153 153 154 - musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 154 + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); 155 155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 156 156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 157 157 ··· 176 176 return ret; 177 177 } 178 178 179 - static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 179 + static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg) 180 180 { 181 181 void __iomem *addr = phy->io_priv; 182 182 int i = 0; ··· 191 191 power &= ~MUSB_POWER_SUSPENDM; 192 192 musb_writeb(addr, MUSB_POWER, power); 193 193 194 - musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 195 - musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 194 + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); 195 + musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val); 196 196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 197 197 198 198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) ··· 1668 1668 static bool use_dma = 1; 1669 1669 1670 1670 /* "modprobe ... use_dma=0" etc */ 1671 - module_param(use_dma, bool, 0); 1671 + module_param(use_dma, bool, 0644); 1672 1672 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1673 1673 1674 1674 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+16 -6
drivers/usb/musb/musb_host.c
··· 112 112 struct musb *musb = ep->musb; 113 113 void __iomem *epio = ep->regs; 114 114 u16 csr; 115 - u16 lastcsr = 0; 116 115 int retries = 1000; 117 116 118 117 csr = musb_readw(epio, MUSB_TXCSR); 119 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 120 - if (csr != lastcsr) 121 - dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); 122 - lastcsr = csr; 123 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 124 120 musb_writew(epio, MUSB_TXCSR, csr); 125 121 csr = musb_readw(epio, MUSB_TXCSR); 126 - if (WARN(retries-- < 1, 122 + 123 + /* 124 + * FIXME: sometimes the tx fifo flush failed, it has been 125 + * observed during device disconnect on AM335x. 126 + * 127 + * To reproduce the issue, ensure tx urb(s) are queued when 128 + * unplug the usb device which is connected to AM335x usb 129 + * host port. 130 + * 131 + * I found using a usb-ethernet device and running iperf 132 + * (client on AM335x) has very high chance to trigger it. 133 + * 134 + * Better to turn on dev_dbg() in musb_cleanup_urb() with 135 + * CPPI enabled to see the issue when aborting the tx channel. 136 + */ 137 + if (dev_WARN_ONCE(musb->controller, retries-- < 1, 127 138 "Could not flush host TX%d fifo: csr: %04x\n", 128 139 ep->epnum, csr)) 129 140 return; 130 - mdelay(1); 131 141 } 132 142 } 133 143
+1 -3
drivers/usb/phy/Kconfig
··· 21 21 config FSL_USB2_OTG 22 22 bool "Freescale USB OTG Transceiver Driver" 23 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 24 - select USB_OTG 25 24 select USB_PHY 26 25 help 27 26 Enable this to support Freescale USB OTG transceiver. ··· 167 168 168 169 config USB_MV_OTG 169 170 tristate "Marvell USB OTG support" 170 - depends on USB_EHCI_MV && USB_MV_UDC && PM 171 - select USB_OTG 171 + depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG 172 172 select USB_PHY 173 173 help 174 174 Say Y here if you want to build Marvell USB OTG transciever
+5 -2
drivers/usb/phy/phy-mxs-usb.c
··· 452 452 struct clk *clk; 453 453 struct mxs_phy *mxs_phy; 454 454 int ret; 455 - const struct of_device_id *of_id = 456 - of_match_device(mxs_phy_dt_ids, &pdev->dev); 455 + const struct of_device_id *of_id; 457 456 struct device_node *np = pdev->dev.of_node; 457 + 458 + of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); 459 + if (!of_id) 460 + return -ENODEV; 458 461 459 462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 460 463 base = devm_ioremap_resource(&pdev->dev, res);
+1 -1
drivers/usb/phy/phy-omap-otg.c
··· 105 105 extcon = extcon_get_extcon_dev(config->extcon); 106 106 if (!extcon) 107 107 return -EPROBE_DEFER; 108 - otg_dev->extcon = extcon; 109 108 110 109 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); 111 110 if (!otg_dev) ··· 114 115 if (IS_ERR(otg_dev->base)) 115 116 return PTR_ERR(otg_dev->base); 116 117 118 + otg_dev->extcon = extcon; 117 119 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 118 120 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 119 121
+11
drivers/usb/serial/option.c
··· 161 161 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 162 162 #define NOVATELWIRELESS_PRODUCT_E362 0x9010 163 163 #define NOVATELWIRELESS_PRODUCT_E371 0x9011 164 + #define NOVATELWIRELESS_PRODUCT_U620L 0x9022 164 165 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 165 166 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166 167 ··· 355 354 /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * 356 355 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 357 356 #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 357 + #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 358 358 359 359 /* iBall 3.5G connect wireless modem */ 360 360 #define IBALL_3_5G_CONNECT 0x9605 ··· 519 517 520 518 static const struct option_blacklist_info four_g_w14_blacklist = { 521 519 .sendsetup = BIT(0) | BIT(1), 520 + }; 521 + 522 + static const struct option_blacklist_info four_g_w100_blacklist = { 523 + .sendsetup = BIT(1) | BIT(2), 524 + .reserved = BIT(3), 522 525 }; 523 526 524 527 static const struct option_blacklist_info alcatel_x200_blacklist = { ··· 1059 1052 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1060 1053 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1061 1054 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, 1055 + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) }, 1062 1056 1063 1057 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1064 1058 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, ··· 1649 1641 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1650 1642 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1651 1643 }, 1644 + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), 1645 + .driver_info = (kernel_ulong_t)&four_g_w100_blacklist 1646 + }, 1652 1647 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1653 1648 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1654 1649 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+74 -20
drivers/usb/serial/qcserial.c
··· 22 22 #define DRIVER_AUTHOR "Qualcomm Inc" 23 23 #define DRIVER_DESC "Qualcomm USB Serial driver" 24 24 25 + #define QUECTEL_EC20_PID 0x9215 26 + 25 27 /* standard device layouts supported by this driver */ 26 28 enum qcserial_layouts { 27 29 QCSERIAL_G2K = 0, /* Gobi 2000 */ ··· 173 171 }; 174 172 MODULE_DEVICE_TABLE(usb, id_table); 175 173 174 + static int handle_quectel_ec20(struct device *dev, int ifnum) 175 + { 176 + int altsetting = 0; 177 + 178 + /* 179 + * Quectel EC20 Mini PCIe LTE module layout: 180 + * 0: DM/DIAG (use libqcdm from ModemManager for communication) 181 + * 1: NMEA 182 + * 2: AT-capable modem port 183 + * 3: Modem interface 184 + * 4: NDIS 185 + */ 186 + switch (ifnum) { 187 + case 0: 188 + dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n"); 189 + break; 190 + case 1: 191 + dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n"); 192 + break; 193 + case 2: 194 + case 3: 195 + dev_dbg(dev, "Quectel EC20 Modem port found\n"); 196 + break; 197 + case 4: 198 + /* Don't claim the QMI/net interface */ 199 + altsetting = -1; 200 + break; 201 + } 202 + 203 + return altsetting; 204 + } 205 + 176 206 static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 177 207 { 178 208 struct usb_host_interface *intf = serial->interface->cur_altsetting; ··· 214 180 __u8 ifnum; 215 181 int altsetting = -1; 216 182 bool sendsetup = false; 183 + 184 + /* we only support vendor specific functions */ 185 + if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) 186 + goto done; 217 187 218 188 nintf = serial->dev->actconfig->desc.bNumInterfaces; 219 189 dev_dbg(dev, "Num Interfaces = %d\n", nintf); ··· 278 240 altsetting = -1; 279 241 break; 280 242 case QCSERIAL_G2K: 243 + /* handle non-standard layouts */ 244 + if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) { 245 + altsetting = handle_quectel_ec20(dev, ifnum); 246 + goto done; 247 + } 248 + 281 249 /* 282 250 * Gobi 2K+ USB layout: 283 251 * 0: QMI/net ··· 345 301 break; 346 302 case QCSERIAL_HWI: 347 303 /* 348 - * Huawei layout: 349 - * 0: AT-capable modem port 350 - * 1: DM/DIAG 351 - * 2: AT-capable modem port 352 - * 3: CCID-compatible PCSC interface 353 - * 4: QMI/net 354 - * 5: NMEA 304 + * Huawei devices map functions by subclass + protocol 305 + * instead of interface numbers. The protocol identify 306 + * a specific function, while the subclass indicate a 307 + * specific firmware source 308 + * 309 + * This is a blacklist of functions known to be 310 + * non-serial. The rest are assumed to be serial and 311 + * will be handled by this driver 355 312 */ 356 - switch (ifnum) { 357 - case 0: 358 - case 2: 359 - dev_dbg(dev, "Modem port found\n"); 360 - break; 361 - case 1: 362 - dev_dbg(dev, "DM/DIAG interface found\n"); 363 - break; 364 - case 5: 365 - dev_dbg(dev, "NMEA GPS interface found\n"); 366 - break; 367 - default: 368 - /* don't claim any unsupported interface */ 313 + switch (intf->desc.bInterfaceProtocol) { 314 + /* QMI combined (qmi_wwan) */ 315 + case 0x07: 316 + case 0x37: 317 + case 0x67: 318 + /* QMI data (qmi_wwan) */ 319 + case 0x08: 320 + case 0x38: 321 + case 0x68: 322 + /* QMI control (qmi_wwan) */ 323 + case 0x09: 324 + case 0x39: 325 + case 0x69: 326 + /* NCM like (huawei_cdc_ncm) */ 327 + case 0x16: 328 + case 0x46: 329 + case 0x76: 369 330 altsetting = -1; 370 331 break; 332 + default: 333 + dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n", 334 + intf->desc.bInterfaceClass, 335 + intf->desc.bInterfaceSubClass, 336 + intf->desc.bInterfaceProtocol); 371 337 } 372 338 break; 373 339 default:
+2
drivers/usb/serial/ti_usb_3410_5052.c
··· 159 159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 160 160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 161 161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 162 + { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, 162 163 { } /* terminator */ 163 164 }; 164 165 ··· 192 191 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 193 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 194 193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 + { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, 195 195 { } /* terminator */ 196 196 }; 197 197
+4
drivers/usb/serial/ti_usb_3410_5052.h
··· 56 56 #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 57 57 #define ABBOTT_STRIP_PORT_ID 0x3420 58 58 59 + /* Honeywell vendor and product IDs */ 60 + #define HONEYWELL_VENDOR_ID 0x10ac 61 + #define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */ 62 + 59 63 /* Commands */ 60 64 #define TI_GET_VERSION 0x01 61 65 #define TI_GET_PORT_STATUS 0x02
+6
fs/Kconfig
··· 46 46 or if unsure, say N. Saying Y will increase the size of the kernel 47 47 by about 5kB. 48 48 49 + config FS_DAX_PMD 50 + bool 51 + default FS_DAX 52 + depends on FS_DAX 53 + depends on BROKEN 54 + 49 55 endif # BLOCK 50 56 51 57 # Posix ACL utility routines
+16 -2
fs/block_dev.c
··· 390 390 struct page *page) 391 391 { 392 392 const struct block_device_operations *ops = bdev->bd_disk->fops; 393 + int result = -EOPNOTSUPP; 394 + 393 395 if (!ops->rw_page || bdev_get_integrity(bdev)) 394 - return -EOPNOTSUPP; 395 - return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 396 + return result; 397 + 398 + result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); 399 + if (result) 400 + return result; 401 + result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 402 + blk_queue_exit(bdev->bd_queue); 403 + return result; 396 404 } 397 405 EXPORT_SYMBOL_GPL(bdev_read_page); 398 406 ··· 429 421 int result; 430 422 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; 431 423 const struct block_device_operations *ops = bdev->bd_disk->fops; 424 + 432 425 if (!ops->rw_page || bdev_get_integrity(bdev)) 433 426 return -EOPNOTSUPP; 427 + result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); 428 + if (result) 429 + return result; 430 + 434 431 set_page_writeback(page); 435 432 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); 436 433 if (result) 437 434 end_page_writeback(page); 438 435 else 439 436 unlock_page(page); 437 + blk_queue_exit(bdev->bd_queue); 440 438 return result; 441 439 } 442 440 EXPORT_SYMBOL_GPL(bdev_write_page);
+110
fs/configfs/dir.c
··· 1636 1636 .iterate = configfs_readdir, 1637 1637 }; 1638 1638 1639 + /** 1640 + * configfs_register_group - creates a parent-child relation between two groups 1641 + * @parent_group: parent group 1642 + * @group: child group 1643 + * 1644 + * link groups, creates dentry for the child and attaches it to the 1645 + * parent dentry. 1646 + * 1647 + * Return: 0 on success, negative errno code on error 1648 + */ 1649 + int configfs_register_group(struct config_group *parent_group, 1650 + struct config_group *group) 1651 + { 1652 + struct configfs_subsystem *subsys = parent_group->cg_subsys; 1653 + struct dentry *parent; 1654 + int ret; 1655 + 1656 + mutex_lock(&subsys->su_mutex); 1657 + link_group(parent_group, group); 1658 + mutex_unlock(&subsys->su_mutex); 1659 + 1660 + parent = parent_group->cg_item.ci_dentry; 1661 + 1662 + mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); 1663 + ret = create_default_group(parent_group, group); 1664 + if (!ret) { 1665 + spin_lock(&configfs_dirent_lock); 1666 + configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); 1667 + spin_unlock(&configfs_dirent_lock); 1668 + } 1669 + mutex_unlock(&d_inode(parent)->i_mutex); 1670 + return ret; 1671 + } 1672 + EXPORT_SYMBOL(configfs_register_group); 1673 + 1674 + /** 1675 + * configfs_unregister_group() - unregisters a child group from its parent 1676 + * @group: parent group to be unregistered 1677 + * 1678 + * Undoes configfs_register_group() 1679 + */ 1680 + void configfs_unregister_group(struct config_group *group) 1681 + { 1682 + struct configfs_subsystem *subsys = group->cg_subsys; 1683 + struct dentry *dentry = group->cg_item.ci_dentry; 1684 + struct dentry *parent = group->cg_item.ci_parent->ci_dentry; 1685 + 1686 + mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); 1687 + spin_lock(&configfs_dirent_lock); 1688 + configfs_detach_prep(dentry, NULL); 1689 + spin_unlock(&configfs_dirent_lock); 1690 + 1691 + configfs_detach_group(&group->cg_item); 1692 + d_inode(dentry)->i_flags |= S_DEAD; 1693 + dont_mount(dentry); 1694 + d_delete(dentry); 1695 + mutex_unlock(&d_inode(parent)->i_mutex); 1696 + 1697 + dput(dentry); 1698 + 1699 + mutex_lock(&subsys->su_mutex); 1700 + unlink_group(group); 1701 + mutex_unlock(&subsys->su_mutex); 1702 + } 1703 + EXPORT_SYMBOL(configfs_unregister_group); 1704 + 1705 + /** 1706 + * configfs_register_default_group() - allocates and registers a child group 1707 + * @parent_group: parent group 1708 + * @name: child group name 1709 + * @item_type: child item type description 1710 + * 1711 + * boilerplate to allocate and register a child group with its parent. We need 1712 + * kzalloc'ed memory because child's default_group is initially empty. 1713 + * 1714 + * Return: allocated config group or ERR_PTR() on error 1715 + */ 1716 + struct config_group * 1717 + configfs_register_default_group(struct config_group *parent_group, 1718 + const char *name, 1719 + struct config_item_type *item_type) 1720 + { 1721 + int ret; 1722 + struct config_group *group; 1723 + 1724 + group = kzalloc(sizeof(*group), GFP_KERNEL); 1725 + if (!group) 1726 + return ERR_PTR(-ENOMEM); 1727 + config_group_init_type_name(group, name, item_type); 1728 + 1729 + ret = configfs_register_group(parent_group, group); 1730 + if (ret) { 1731 + kfree(group); 1732 + return ERR_PTR(ret); 1733 + } 1734 + return group; 1735 + } 1736 + EXPORT_SYMBOL(configfs_register_default_group); 1737 + 1738 + /** 1739 + * configfs_unregister_default_group() - unregisters and frees a child group 1740 + * @group: the group to act on 1741 + */ 1742 + void configfs_unregister_default_group(struct config_group *group) 1743 + { 1744 + configfs_unregister_group(group); 1745 + kfree(group); 1746 + } 1747 + EXPORT_SYMBOL(configfs_unregister_default_group); 1748 + 1639 1749 int configfs_register_subsystem(struct configfs_subsystem *subsys) 1640 1750 { 1641 1751 int err;
+4
fs/dax.c
··· 541 541 unsigned long pfn; 542 542 int result = 0; 543 543 544 + /* dax pmd mappings are broken wrt gup and fork */ 545 + if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) 546 + return VM_FAULT_FALLBACK; 547 + 544 548 /* Fall back to PTEs if we're going to COW */ 545 549 if (write && !(vma->vm_flags & VM_SHARED)) 546 550 return VM_FAULT_FALLBACK;
+2
fs/ext2/super.c
··· 569 569 /* Fall through */ 570 570 case Opt_dax: 571 571 #ifdef CONFIG_FS_DAX 572 + ext2_msg(sb, KERN_WARNING, 573 + "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 572 574 set_opt(sbi->s_mount_opt, DAX); 573 575 #else 574 576 ext2_msg(sb, KERN_INFO, "dax option not supported");
+5 -1
fs/ext4/super.c
··· 1664 1664 } 1665 1665 sbi->s_jquota_fmt = m->mount_opt; 1666 1666 #endif 1667 - #ifndef CONFIG_FS_DAX 1668 1667 } else if (token == Opt_dax) { 1668 + #ifdef CONFIG_FS_DAX 1669 + ext4_msg(sb, KERN_WARNING, 1670 + "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1671 + sbi->s_mount_opt |= m->mount_opt; 1672 + #else 1669 1673 ext4_msg(sb, KERN_INFO, "dax option not supported"); 1670 1674 return -1; 1671 1675 #endif
+11 -5
fs/fat/dir.c
··· 610 610 int status = fat_parse_long(inode, &cpos, &bh, &de, 611 611 &unicode, &nr_slots); 612 612 if (status < 0) { 613 - ctx->pos = cpos; 613 + bh = NULL; 614 614 ret = status; 615 - goto out; 615 + goto end_of_dir; 616 616 } else if (status == PARSE_INVALID) 617 617 goto record_end; 618 618 else if (status == PARSE_NOT_LONGNAME) ··· 654 654 fill_len = short_len; 655 655 656 656 start_filldir: 657 - if (!fake_offset) 658 - ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); 657 + ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); 658 + if (fake_offset && ctx->pos < 2) 659 + ctx->pos = 2; 659 660 660 661 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { 661 662 if (!dir_emit_dot(file, ctx)) ··· 682 681 fake_offset = 0; 683 682 ctx->pos = cpos; 684 683 goto get_new; 684 + 685 685 end_of_dir: 686 - ctx->pos = cpos; 686 + if (fake_offset && cpos < 2) 687 + ctx->pos = 2; 688 + else 689 + ctx->pos = cpos; 687 690 fill_failed: 688 691 brelse(bh); 689 692 if (unicode) 690 693 __putname(unicode); 691 694 out: 692 695 mutex_unlock(&sbi->s_lock); 696 + 693 697 return ret; 694 698 } 695 699
+32 -33
fs/hugetlbfs/inode.c
··· 332 332 * truncation is indicated by end of range being LLONG_MAX 333 333 * In this case, we first scan the range and release found pages. 334 334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 335 - * maps and global counts. 335 + * maps and global counts. Page faults can not race with truncation 336 + * in this routine. hugetlb_no_page() prevents page faults in the 337 + * truncated range. It checks i_size before allocation, and again after 338 + * with the page table lock for the page held. The same lock must be 339 + * acquired to unmap a page. 336 340 * hole punch is indicated if end is not LLONG_MAX 337 341 * In the hole punch case we scan the range and release found pages. 338 342 * Only when releasing a page is the associated region/reserv map 339 343 * deleted. The region/reserv map for ranges without associated 340 - * pages are not modified. 344 + * pages are not modified. Page faults can race with hole punch. 345 + * This is indicated if we find a mapped page. 341 346 * Note: If the passed end of range value is beyond the end of file, but 342 347 * not LLONG_MAX this routine still performs a hole punch operation. 343 348 */ ··· 366 361 next = start; 367 362 while (next < end) { 368 363 /* 369 - * Make sure to never grab more pages that we 370 - * might possibly need. 364 + * Don't grab more pages than the number left in the range. 371 365 */ 372 366 if (end - next < lookup_nr) 373 367 lookup_nr = end - next; 374 368 375 369 /* 376 - * This pagevec_lookup() may return pages past 'end', 377 - * so we must check for page->index > end. 370 + * When no more pages are found, we are done. 378 371 */ 379 - if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { 380 - if (next == start) 381 - break; 382 - next = start; 383 - continue; 384 - } 372 + if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) 373 + break; 385 374 386 375 for (i = 0; i < pagevec_count(&pvec); ++i) { 387 376 struct page *page = pvec.pages[i]; 388 377 u32 hash; 378 + 379 + /* 380 + * The page (index) could be beyond end. This is 381 + * only possible in the punch hole case as end is 382 + * max page offset in the truncate case. 383 + */ 384 + next = page->index; 385 + if (next >= end) 386 + break; 389 387 390 388 hash = hugetlb_fault_mutex_hash(h, current->mm, 391 389 &pseudo_vma, ··· 396 388 mutex_lock(&hugetlb_fault_mutex_table[hash]); 397 389 398 390 lock_page(page); 399 - if (page->index >= end) { 400 - unlock_page(page); 401 - mutex_unlock(&hugetlb_fault_mutex_table[hash]); 402 - next = end; /* we are done */ 403 - break; 404 - } 405 - 406 - /* 407 - * If page is mapped, it was faulted in after being 408 - * unmapped. Do nothing in this race case. In the 409 - * normal case page is not mapped. 410 - */ 411 - if (!page_mapped(page)) { 391 + if (likely(!page_mapped(page))) { 412 392 bool rsv_on_error = !PagePrivate(page); 413 393 /* 414 394 * We must free the huge page and remove ··· 417 421 hugetlb_fix_reserve_counts( 418 422 inode, rsv_on_error); 419 423 } 424 + } else { 425 + /* 426 + * If page is mapped, it was faulted in after 427 + * being unmapped. It indicates a race between 428 + * hole punch and page fault. Do nothing in 429 + * this case. Getting here in a truncate 430 + * operation is a bug. 431 + */ 432 + BUG_ON(truncate_op); 420 433 } 421 434 422 - if (page->index > next) 423 - next = page->index; 424 - 425 - ++next; 426 435 unlock_page(page); 427 - 428 436 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 429 437 } 438 + ++next; 430 439 huge_pagevec_release(&pvec); 440 + cond_resched(); 431 441 } 432 442 433 443 if (truncate_op) ··· 649 647 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 650 648 i_size_write(inode, offset + len); 651 649 inode->i_ctime = CURRENT_TIME; 652 - spin_lock(&inode->i_lock); 653 - inode->i_private = NULL; 654 - spin_unlock(&inode->i_lock); 655 650 out: 656 651 mutex_unlock(&inode->i_mutex); 657 652 return error;
+2
fs/ncpfs/ioctl.c
··· 525 525 switch (rqdata.cmd) { 526 526 case NCP_LOCK_EX: 527 527 case NCP_LOCK_SH: 528 + if (rqdata.timeout < 0) 529 + return -EINVAL; 528 530 if (rqdata.timeout == 0) 529 531 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; 530 532 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
+2
fs/ocfs2/namei.c
··· 372 372 mlog_errno(status); 373 373 goto leave; 374 374 } 375 + /* update inode->i_mode after mask with "umask". */ 376 + inode->i_mode = mode; 375 377 376 378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 377 379 S_ISDIR(mode),
+8
fs/splice.c
··· 809 809 */ 810 810 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) 811 811 { 812 + /* 813 + * Check for signal early to make process killable when there are 814 + * always buffers available 815 + */ 816 + if (signal_pending(current)) 817 + return -ERESTARTSYS; 818 + 812 819 while (!pipe->nrbufs) { 813 820 if (!pipe->writers) 814 821 return 0; ··· 891 884 892 885 splice_from_pipe_begin(sd); 893 886 do { 887 + cond_resched(); 894 888 ret = splice_from_pipe_next(pipe, sd); 895 889 if (ret > 0) 896 890 ret = splice_from_pipe_feed(pipe, sd, actor);
+2 -9
fs/sysv/inode.c
··· 162 162 inode->i_fop = &sysv_dir_operations; 163 163 inode->i_mapping->a_ops = &sysv_aops; 164 164 } else if (S_ISLNK(inode->i_mode)) { 165 - if (inode->i_blocks) { 166 - inode->i_op = &sysv_symlink_inode_operations; 167 - inode->i_mapping->a_ops = &sysv_aops; 168 - } else { 169 - inode->i_op = &simple_symlink_inode_operations; 170 - inode->i_link = (char *)SYSV_I(inode)->i_data; 171 - nd_terminate_link(inode->i_link, inode->i_size, 172 - sizeof(SYSV_I(inode)->i_data) - 1); 173 - } 165 + inode->i_op = &sysv_symlink_inode_operations; 166 + inode->i_mapping->a_ops = &sysv_aops; 174 167 } else 175 168 init_special_inode(inode, inode->i_mode, rdev); 176 169 }
+3
include/drm/drm_atomic.h
··· 136 136 137 137 void drm_atomic_legacy_backoff(struct drm_atomic_state *state); 138 138 139 + void 140 + drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); 141 + 139 142 int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 140 143 int __must_check drm_atomic_commit(struct drm_atomic_state *state); 141 144 int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+1 -1
include/kvm/arm_vgic.h
··· 342 342 struct irq_phys_map *map, bool level); 343 343 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 344 344 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 345 - int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); 346 345 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, 347 346 int virt_irq, int irq); 348 347 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 348 + bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 349 349 350 350 #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 351 351 #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
+2
include/linux/blkdev.h
··· 794 794 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 795 795 struct scsi_ioctl_command __user *); 796 796 797 + extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); 798 + extern void blk_queue_exit(struct request_queue *q); 797 799 extern void blk_start_queue(struct request_queue *q); 798 800 extern void blk_stop_queue(struct request_queue *q); 799 801 extern void blk_sync_queue(struct request_queue *q);
+10
include/linux/configfs.h
··· 197 197 int configfs_register_subsystem(struct configfs_subsystem *subsys); 198 198 void configfs_unregister_subsystem(struct configfs_subsystem *subsys); 199 199 200 + int configfs_register_group(struct config_group *parent_group, 201 + struct config_group *group); 202 + void configfs_unregister_group(struct config_group *group); 203 + 204 + struct config_group * 205 + configfs_register_default_group(struct config_group *parent_group, 206 + const char *name, 207 + struct config_item_type *item_type); 208 + void configfs_unregister_default_group(struct config_group *group); 209 + 200 210 /* These functions can sleep and can alloc with GFP_KERNEL */ 201 211 /* WARNING: These cannot be called underneath configfs callbacks!! */ 202 212 int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
+1 -1
include/linux/gfp.h
··· 271 271 272 272 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 273 273 { 274 - return gfp_flags & __GFP_DIRECT_RECLAIM; 274 + return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); 275 275 } 276 276 277 277 #ifdef CONFIG_HIGHMEM
+11
include/linux/kvm_host.h
··· 460 460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 461 461 idx++) 462 462 463 + static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 464 + { 465 + struct kvm_vcpu *vcpu; 466 + int i; 467 + 468 + kvm_for_each_vcpu(i, vcpu, kvm) 469 + if (vcpu->vcpu_id == id) 470 + return vcpu; 471 + return NULL; 472 + } 473 + 463 474 #define kvm_for_each_memslot(memslot, slots) \ 464 475 for (memslot = &slots->memslots[0]; \ 465 476 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+44 -132
include/linux/lightnvm.h
··· 58 58 struct nvm_id_group { 59 59 u8 mtype; 60 60 u8 fmtype; 61 - u16 res16; 62 61 u8 num_ch; 63 62 u8 num_lun; 64 63 u8 num_pln; ··· 73 74 u32 tbet; 74 75 u32 tbem; 75 76 u32 mpos; 77 + u32 mccap; 76 78 u16 cpar; 77 - u8 res[913]; 78 - } __packed; 79 + }; 79 80 80 81 struct nvm_addr_format { 81 82 u8 ch_offset; ··· 90 91 u8 pg_len; 91 92 u8 sect_offset; 92 93 u8 sect_len; 93 - u8 res[4]; 94 94 }; 95 95 96 96 struct nvm_id { 97 97 u8 ver_id; 98 98 u8 vmnt; 99 99 u8 cgrps; 100 - u8 res[5]; 101 100 u32 cap; 102 101 u32 dom; 103 102 struct nvm_addr_format ppaf; 104 - u8 ppat; 105 - u8 resv[224]; 106 103 struct nvm_id_group groups[4]; 107 104 } __packed; 108 105 ··· 118 123 #define NVM_VERSION_MINOR 0 119 124 #define NVM_VERSION_PATCH 0 120 125 121 - #define NVM_SEC_BITS (8) 122 - #define NVM_PL_BITS (6) 123 - #define NVM_PG_BITS (16) 124 126 #define NVM_BLK_BITS (16) 125 - #define NVM_LUN_BITS (10) 127 + #define NVM_PG_BITS (16) 128 + #define NVM_SEC_BITS (8) 129 + #define NVM_PL_BITS (8) 130 + #define NVM_LUN_BITS (8) 126 131 #define NVM_CH_BITS (8) 127 132 128 133 struct ppa_addr { 134 + /* Generic structure for all addresses */ 129 135 union { 130 - /* Channel-based PPA format in nand 4x2x2x2x8x10 */ 131 136 struct { 132 - u64 ch : 4; 133 - u64 sec : 2; /* 4 sectors per page */ 134 - u64 pl : 2; /* 4 planes per LUN */ 135 - u64 lun : 2; /* 4 LUNs per channel */ 136 - u64 pg : 8; /* 256 pages per block */ 137 - u64 blk : 10;/* 1024 blocks per plane */ 138 - u64 resved : 36; 139 - } chnl; 140 - 141 - /* Generic structure for all addresses */ 142 - struct { 137 + u64 blk : NVM_BLK_BITS; 138 + u64 pg : NVM_PG_BITS; 143 139 u64 sec : NVM_SEC_BITS; 144 140 u64 pl : NVM_PL_BITS; 145 - u64 pg : NVM_PG_BITS; 146 - u64 blk : NVM_BLK_BITS; 147 141 u64 lun : NVM_LUN_BITS; 148 142 u64 ch : NVM_CH_BITS; 149 143 } g; 150 144 151 145 u64 ppa; 152 146 }; 153 - } __packed; 147 + }; 154 148 155 149 struct nvm_rq { 156 150 struct nvm_tgt_instance *ins; ··· 175 191 struct nvm_block; 176 192 177 193 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 178 - typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); 194 + typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); 179 195 typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); 180 196 typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, 181 197 nvm_l2p_update_fn *, void *); 182 - typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, 198 + typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, 183 199 nvm_bb_update_fn *, void *); 184 200 typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); 185 201 typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); ··· 194 210 nvm_id_fn *identity; 195 211 nvm_get_l2p_tbl_fn *get_l2p_tbl; 196 212 nvm_op_bb_tbl_fn *get_bb_tbl; 197 - nvm_op_set_bb_fn *set_bb; 213 + nvm_op_set_bb_fn *set_bb_tbl; 198 214 199 215 nvm_submit_io_fn *submit_io; 200 216 nvm_erase_blk_fn *erase_block; ··· 204 220 nvm_dev_dma_alloc_fn *dev_dma_alloc; 205 221 nvm_dev_dma_free_fn *dev_dma_free; 206 222 207 - uint8_t max_phys_sect; 223 + unsigned int max_phys_sect; 208 224 }; 209 225 210 226 struct nvm_lun { ··· 213 229 int lun_id; 214 230 int chnl_id; 215 231 232 + unsigned int nr_inuse_blocks; /* Number of used blocks */ 216 233 unsigned int nr_free_blocks; /* Number of unused blocks */ 234 + unsigned int nr_bad_blocks; /* Number of bad blocks */ 217 235 struct nvm_block *blocks; 218 236 219 237 spinlock_t lock; ··· 249 263 int blks_per_lun; 250 264 int sec_size; 251 265 int oob_size; 252 - int addr_mode; 253 - struct nvm_addr_format addr_format; 266 + struct nvm_addr_format ppaf; 254 267 255 268 /* Calculated/Cached values. These do not reflect the actual usable 256 269 * blocks at run-time. ··· 275 290 char name[DISK_NAME_LEN]; 276 291 }; 277 292 278 - /* fallback conversion */ 279 - static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, 280 - struct ppa_addr r) 293 + static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, 294 + struct ppa_addr r) 281 295 { 282 296 struct ppa_addr l; 283 297 284 - l.ppa = r.g.sec + 285 - r.g.pg * dev->sec_per_pg + 286 - r.g.blk * (dev->pgs_per_blk * 287 - dev->sec_per_pg) + 288 - r.g.lun * (dev->blks_per_lun * 289 - dev->pgs_per_blk * 290 - dev->sec_per_pg) + 291 - r.g.ch * (dev->blks_per_lun * 292 - dev->pgs_per_blk * 293 - dev->luns_per_chnl * 294 - dev->sec_per_pg); 298 + l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; 299 + l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; 300 + l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; 301 + l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; 302 + l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; 303 + l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; 295 304 296 305 return l; 297 306 } 298 307 299 - /* fallback conversion */ 300 - static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, 301 - struct ppa_addr r) 302 - { 303 - struct ppa_addr l; 304 - int secs, pgs, blks, luns; 305 - sector_t ppa = r.ppa; 306 - 307 - l.ppa = 0; 308 - 309 - div_u64_rem(ppa, dev->sec_per_pg, &secs); 310 - l.g.sec = secs; 311 - 312 - sector_div(ppa, dev->sec_per_pg); 313 - div_u64_rem(ppa, dev->sec_per_blk, &pgs); 314 - l.g.pg = pgs; 315 - 316 - sector_div(ppa, dev->pgs_per_blk); 317 - div_u64_rem(ppa, dev->blks_per_lun, &blks); 318 - l.g.blk = blks; 319 - 320 - sector_div(ppa, dev->blks_per_lun); 321 - div_u64_rem(ppa, dev->luns_per_chnl, &luns); 322 - l.g.lun = luns; 323 - 324 - sector_div(ppa, dev->luns_per_chnl); 325 - l.g.ch = ppa; 326 - 327 - return l; 328 - } 329 - 330 - static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) 308 + static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, 309 + struct ppa_addr r) 331 310 { 332 311 struct ppa_addr l; 333 312 334 - l.ppa = 0; 335 - 336 - l.chnl.sec = r.g.sec; 337 - l.chnl.pl = r.g.pl; 338 - l.chnl.pg = r.g.pg; 339 - l.chnl.blk = r.g.blk; 340 - l.chnl.lun = r.g.lun; 341 - l.chnl.ch = r.g.ch; 342 - 343 - return l; 344 - } 345 - 346 - static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) 347 - { 348 - struct ppa_addr l; 349 - 350 - l.ppa = 0; 351 - 352 - l.g.sec = r.chnl.sec; 353 - l.g.pl = r.chnl.pl; 354 - l.g.pg = r.chnl.pg; 355 - l.g.blk = r.chnl.blk; 356 - l.g.lun = r.chnl.lun; 357 - l.g.ch = r.chnl.ch; 313 + /* 314 + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. 315 + */ 316 + l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & 317 + (((1 << dev->ppaf.blk_len) - 1)); 318 + l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & 319 + (((1 << dev->ppaf.pg_len) - 1)); 320 + l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & 321 + (((1 << dev->ppaf.sect_len) - 1)); 322 + l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & 323 + (((1 << dev->ppaf.pln_len) - 1)); 324 + l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & 325 + (((1 << dev->ppaf.lun_len) - 1)); 326 + l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & 327 + (((1 << dev->ppaf.ch_len) - 1)); 358 328 359 329 return l; 360 - } 361 - 362 - static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, 363 - struct ppa_addr gppa) 364 - { 365 - switch (dev->addr_mode) { 366 - case NVM_ADDRMODE_LINEAR: 367 - return __linear_to_generic_addr(dev, gppa); 368 - case NVM_ADDRMODE_CHANNEL: 369 - return __chnl_to_generic_addr(gppa); 370 - default: 371 - BUG(); 372 - } 373 - return gppa; 374 - } 375 - 376 - static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, 377 - struct ppa_addr gppa) 378 - { 379 - switch (dev->addr_mode) { 380 - case NVM_ADDRMODE_LINEAR: 381 - return __generic_to_linear_addr(dev, gppa); 382 - case NVM_ADDRMODE_CHANNEL: 383 - return __generic_to_chnl_addr(gppa); 384 - default: 385 - BUG(); 386 - } 387 - return gppa; 388 330 } 389 331 390 332 static inline int ppa_empty(struct ppa_addr ppa_addr) ··· 380 468 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, 381 469 unsigned long); 382 470 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); 383 - typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); 471 + typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); 384 472 385 473 struct nvmm_type { 386 474 const char *name; ··· 404 492 nvmm_get_lun_fn *get_lun; 405 493 406 494 /* Statistics */ 407 - nvmm_free_blocks_print_fn *free_blocks_print; 495 + nvmm_lun_info_print_fn *lun_info_print; 408 496 struct list_head list; 409 497 }; 410 498
+33 -35
include/linux/module.h
··· 302 302 struct latch_tree_node node; 303 303 }; 304 304 305 + struct module_layout { 306 + /* The actual code + data. */ 307 + void *base; 308 + /* Total size. */ 309 + unsigned int size; 310 + /* The size of the executable code. */ 311 + unsigned int text_size; 312 + /* Size of RO section of the module (text+rodata) */ 313 + unsigned int ro_size; 314 + 315 + #ifdef CONFIG_MODULES_TREE_LOOKUP 316 + struct mod_tree_node mtn; 317 + #endif 318 + }; 319 + 320 + #ifdef CONFIG_MODULES_TREE_LOOKUP 321 + /* Only touch one cacheline for common rbtree-for-core-layout case. */ 322 + #define __module_layout_align ____cacheline_aligned 323 + #else 324 + #define __module_layout_align 325 + #endif 326 + 305 327 struct module { 306 328 enum module_state state; 307 329 ··· 388 366 /* Startup function. */ 389 367 int (*init)(void); 390 368 391 - /* 392 - * If this is non-NULL, vfree() after init() returns. 393 - * 394 - * Cacheline align here, such that: 395 - * module_init, module_core, init_size, core_size, 396 - * init_text_size, core_text_size and mtn_core::{mod,node[0]} 397 - * are on the same cacheline. 398 - */ 399 - void *module_init ____cacheline_aligned; 400 - 401 - /* Here is the actual code + data, vfree'd on unload. */ 402 - void *module_core; 403 - 404 - /* Here are the sizes of the init and core sections */ 405 - unsigned int init_size, core_size; 406 - 407 - /* The size of the executable code in each section. */ 408 - unsigned int init_text_size, core_text_size; 409 - 410 - #ifdef CONFIG_MODULES_TREE_LOOKUP 411 - /* 412 - * We want mtn_core::{mod,node[0]} to be in the same cacheline as the 413 - * above entries such that a regular lookup will only touch one 414 - * cacheline. 415 - */ 416 - struct mod_tree_node mtn_core; 417 - struct mod_tree_node mtn_init; 418 - #endif 419 - 420 - /* Size of RO sections of the module (text+rodata) */ 421 - unsigned int init_ro_size, core_ro_size; 369 + /* Core layout: rbtree is accessed frequently, so keep together. */ 370 + struct module_layout core_layout __module_layout_align; 371 + struct module_layout init_layout; 422 372 423 373 /* Arch-specific module values */ 424 374 struct mod_arch_specific arch; ··· 499 505 static inline bool within_module_core(unsigned long addr, 500 506 const struct module *mod) 501 507 { 502 - return (unsigned long)mod->module_core <= addr && 503 - addr < (unsigned long)mod->module_core + mod->core_size; 508 + return (unsigned long)mod->core_layout.base <= addr && 509 + addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; 504 510 } 505 511 506 512 static inline bool within_module_init(unsigned long addr, 507 513 const struct module *mod) 508 514 { 509 - return (unsigned long)mod->module_init <= addr && 510 - addr < (unsigned long)mod->module_init + mod->init_size; 515 + return (unsigned long)mod->init_layout.base <= addr && 516 + addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; 511 517 } 512 518 513 519 static inline bool within_module(unsigned long addr, const struct module *mod) ··· 762 768 #ifdef CONFIG_DEBUG_SET_MODULE_RONX 763 769 extern void set_all_modules_text_rw(void); 764 770 extern void set_all_modules_text_ro(void); 771 + extern void module_enable_ro(const struct module *mod); 772 + extern void module_disable_ro(const struct module *mod); 765 773 #else 766 774 static inline void set_all_modules_text_rw(void) { } 767 775 static inline void set_all_modules_text_ro(void) { } 776 + static inline void module_enable_ro(const struct module *mod) { } 777 + static inline void module_disable_ro(const struct module *mod) { } 768 778 #endif 769 779 770 780 #ifdef CONFIG_GENERIC_BUG
+1 -1
include/linux/of_dma.h
··· 80 80 static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 81 81 const char *name) 82 82 { 83 - return NULL; 83 + return ERR_PTR(-ENODEV); 84 84 } 85 85 86 86 static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
-1
include/linux/signal.h
··· 239 239 extern void set_current_blocked(sigset_t *); 240 240 extern void __set_current_blocked(const sigset_t *); 241 241 extern int show_unhandled_signals; 242 - extern int sigsuspend(sigset_t *); 243 242 244 243 struct sigaction { 245 244 #ifndef __ARCH_HAS_IRIX_SIGACTION
+27 -18
include/linux/slab.h
··· 158 158 #endif 159 159 160 160 /* 161 + * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 162 + * Intended for arches that get misalignment faults even for 64 bit integer 163 + * aligned buffers. 164 + */ 165 + #ifndef ARCH_SLAB_MINALIGN 166 + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 167 + #endif 168 + 169 + /* 170 + * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 171 + * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 172 + * aligned pointers. 173 + */ 174 + #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 175 + #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 176 + #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 177 + 178 + /* 161 179 * Kmalloc array related definitions 162 180 */ 163 181 ··· 304 286 } 305 287 #endif /* !CONFIG_SLOB */ 306 288 307 - void *__kmalloc(size_t size, gfp_t flags); 308 - void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 289 + void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; 290 + void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; 309 291 void kmem_cache_free(struct kmem_cache *, void *); 310 292 311 293 /* ··· 316 298 * Note that interrupts must be enabled when calling these functions. 317 299 */ 318 300 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 319 - bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 301 + int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 320 302 321 303 #ifdef CONFIG_NUMA 322 - void *__kmalloc_node(size_t size, gfp_t flags, int node); 323 - void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 304 + void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; 305 + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; 324 306 #else 325 307 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 326 308 { ··· 334 316 #endif 335 317 336 318 #ifdef CONFIG_TRACING 337 - extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 319 + extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; 338 320 339 321 #ifdef CONFIG_NUMA 340 322 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 341 323 gfp_t gfpflags, 342 - int node, size_t size); 324 + int node, size_t size) __assume_slab_alignment; 343 325 #else 344 326 static __always_inline void * 345 327 kmem_cache_alloc_node_trace(struct kmem_cache *s, ··· 372 354 } 373 355 #endif /* CONFIG_TRACING */ 374 356 375 - extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 357 + extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; 376 358 377 359 #ifdef CONFIG_TRACING 378 - extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 360 + extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; 379 361 #else 380 362 static __always_inline void * 381 363 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) ··· 499 481 #endif 500 482 return __kmalloc_node(size, flags, node); 501 483 } 502 - 503 - /* 504 - * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 505 - * Intended for arches that get misalignment faults even for 64 bit integer 506 - * aligned buffers. 507 - */ 508 - #ifndef ARCH_SLAB_MINALIGN 509 - #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 510 - #endif 511 484 512 485 struct memcg_cache_array { 513 486 struct rcu_head rcu;
+3 -3
include/linux/tty.h
··· 607 607 608 608 /* tty_audit.c */ 609 609 #ifdef CONFIG_AUDIT 610 - extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 610 + extern void tty_audit_add_data(struct tty_struct *tty, const void *data, 611 611 size_t size, unsigned icanon); 612 612 extern void tty_audit_exit(void); 613 613 extern void tty_audit_fork(struct signal_struct *sig); ··· 615 615 extern void tty_audit_push(struct tty_struct *tty); 616 616 extern int tty_audit_push_current(void); 617 617 #else 618 - static inline void tty_audit_add_data(struct tty_struct *tty, 619 - unsigned char *data, size_t size, unsigned icanon) 618 + static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, 619 + size_t size, unsigned icanon) 620 620 { 621 621 } 622 622 static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+2 -2
kernel/debug/kdb/kdb_main.c
··· 2021 2021 continue; 2022 2022 2023 2023 kdb_printf("%-20s%8u 0x%p ", mod->name, 2024 - mod->core_size, (void *)mod); 2024 + mod->core_layout.size, (void *)mod); 2025 2025 #ifdef CONFIG_MODULE_UNLOAD 2026 2026 kdb_printf("%4d ", module_refcount(mod)); 2027 2027 #endif ··· 2031 2031 kdb_printf(" (Loading)"); 2032 2032 else 2033 2033 kdb_printf(" (Live)"); 2034 - kdb_printf(" 0x%p", mod->module_core); 2034 + kdb_printf(" 0x%p", mod->core_layout.base); 2035 2035 2036 2036 #ifdef CONFIG_MODULE_UNLOAD 2037 2037 {
+1 -6
kernel/gcov/base.c
··· 123 123 } 124 124 125 125 #ifdef CONFIG_MODULES 126 - static inline int within(void *addr, void *start, unsigned long size) 127 - { 128 - return ((addr >= start) && (addr < start + size)); 129 - } 130 - 131 126 /* Update list and generate events when modules are unloaded. */ 132 127 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, 133 128 void *data) ··· 137 142 138 143 /* Remove entries located in module from linked list. */ 139 144 while ((info = gcov_info_next(info))) { 140 - if (within(info, mod->module_core, mod->core_size)) { 145 + if (within_module((unsigned long)info, mod)) { 141 146 gcov_info_unlink(prev, info); 142 147 if (gcov_events_enabled) 143 148 gcov_event(GCOV_REMOVE, info);
+168 -187
kernel/module.c
··· 80 80 # define debug_align(X) (X) 81 81 #endif 82 82 83 - /* 84 - * Given BASE and SIZE this macro calculates the number of pages the 85 - * memory regions occupies 86 - */ 87 - #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ 88 - (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ 89 - PFN_DOWN((unsigned long)BASE) + 1) \ 90 - : (0UL)) 91 - 92 83 /* If this is set, the section belongs in the init part of the module */ 93 84 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 94 85 ··· 99 108 * Use a latched RB-tree for __module_address(); this allows us to use 100 109 * RCU-sched lookups of the address from any context. 101 110 * 102 - * Because modules have two address ranges: init and core, we need two 103 - * latch_tree_nodes entries. Therefore we need the back-pointer from 104 - * mod_tree_node. 105 - * 106 - * Because init ranges are short lived we mark them unlikely and have placed 107 - * them outside the critical cacheline in struct module. 108 - * 109 111 * This is conditional on PERF_EVENTS || TRACING because those can really hit 110 112 * __module_address() hard by doing a lot of stack unwinding; potentially from 111 113 * NMI context. ··· 106 122 107 123 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) 108 124 { 109 - struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 110 - struct module *mod = mtn->mod; 125 + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 111 126 112 - if (unlikely(mtn == &mod->mtn_init)) 113 - return (unsigned long)mod->module_init; 114 - 115 - return (unsigned long)mod->module_core; 127 + return (unsigned long)layout->base; 116 128 } 117 129 118 130 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) 119 131 { 120 - struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 121 - struct module *mod = mtn->mod; 132 + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 122 133 123 - if (unlikely(mtn == &mod->mtn_init)) 124 - return (unsigned long)mod->init_size; 125 - 126 - return (unsigned long)mod->core_size; 134 + return (unsigned long)layout->size; 127 135 } 128 136 129 137 static __always_inline bool ··· 173 197 */ 174 198 static void mod_tree_insert(struct module *mod) 175 199 { 176 - mod->mtn_core.mod = mod; 177 - mod->mtn_init.mod = mod; 200 + mod->core_layout.mtn.mod = mod; 201 + mod->init_layout.mtn.mod = mod; 178 202 179 - __mod_tree_insert(&mod->mtn_core); 180 - if (mod->init_size) 181 - __mod_tree_insert(&mod->mtn_init); 203 + __mod_tree_insert(&mod->core_layout.mtn); 204 + if (mod->init_layout.size) 205 + __mod_tree_insert(&mod->init_layout.mtn); 182 206 } 183 207 184 208 static void mod_tree_remove_init(struct module *mod) 185 209 { 186 - if (mod->init_size) 187 - __mod_tree_remove(&mod->mtn_init); 210 + if (mod->init_layout.size) 211 + __mod_tree_remove(&mod->init_layout.mtn); 188 212 } 189 213 190 214 static void mod_tree_remove(struct module *mod) 191 215 { 192 - __mod_tree_remove(&mod->mtn_core); 216 + __mod_tree_remove(&mod->core_layout.mtn); 193 217 mod_tree_remove_init(mod); 194 218 } 195 219 ··· 243 267 244 268 static void mod_update_bounds(struct module *mod) 245 269 { 246 - __mod_update_bounds(mod->module_core, mod->core_size); 247 - if (mod->init_size) 248 - __mod_update_bounds(mod->module_init, mod->init_size); 270 + __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); 271 + if (mod->init_layout.size) 272 + __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); 249 273 } 250 274 251 275 #ifdef CONFIG_KGDB_KDB ··· 1190 1214 static ssize_t show_coresize(struct module_attribute *mattr, 1191 1215 struct module_kobject *mk, char *buffer) 1192 1216 { 1193 - return sprintf(buffer, "%u\n", mk->mod->core_size); 1217 + return sprintf(buffer, "%u\n", mk->mod->core_layout.size); 1194 1218 } 1195 1219 1196 1220 static struct module_attribute modinfo_coresize = ··· 1199 1223 static ssize_t show_initsize(struct module_attribute *mattr, 1200 1224 struct module_kobject *mk, char *buffer) 1201 1225 { 1202 - return sprintf(buffer, "%u\n", mk->mod->init_size); 1226 + return sprintf(buffer, "%u\n", mk->mod->init_layout.size); 1203 1227 } 1204 1228 1205 1229 static struct module_attribute modinfo_initsize = ··· 1849 1873 /* 1850 1874 * LKM RO/NX protection: protect module's text/ro-data 1851 1875 * from modification and any data from execution. 1876 + * 1877 + * General layout of module is: 1878 + * [text] [read-only-data] [writable data] 1879 + * text_size -----^ ^ ^ 1880 + * ro_size ------------------------| | 1881 + * size -------------------------------------------| 1882 + * 1883 + * These values are always page-aligned (as is base) 1852 1884 */ 1853 - void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) 1885 + static void frob_text(const struct module_layout *layout, 1886 + int (*set_memory)(unsigned long start, int num_pages)) 1854 1887 { 1855 - unsigned long begin_pfn = PFN_DOWN((unsigned long)start); 1856 - unsigned long end_pfn = PFN_DOWN((unsigned long)end); 1857 - 1858 - if (end_pfn > begin_pfn) 1859 - set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); 1888 + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1889 + BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1890 + set_memory((unsigned long)layout->base, 1891 + layout->text_size >> PAGE_SHIFT); 1860 1892 } 1861 1893 1862 - static void set_section_ro_nx(void *base, 1863 - unsigned long text_size, 1864 - unsigned long ro_size, 1865 - unsigned long total_size) 1894 + static void frob_rodata(const struct module_layout *layout, 1895 + int (*set_memory)(unsigned long start, int num_pages)) 1866 1896 { 1867 - /* begin and end PFNs of the current subsection */ 1868 - unsigned long begin_pfn; 1869 - unsigned long end_pfn; 1870 - 1871 - /* 1872 - * Set RO for module text and RO-data: 1873 - * - Always protect first page. 1874 - * - Do not protect last partial page. 1875 - */ 1876 - if (ro_size > 0) 1877 - set_page_attributes(base, base + ro_size, set_memory_ro); 1878 - 1879 - /* 1880 - * Set NX permissions for module data: 1881 - * - Do not protect first partial page. 1882 - * - Always protect last page. 1883 - */ 1884 - if (total_size > text_size) { 1885 - begin_pfn = PFN_UP((unsigned long)base + text_size); 1886 - end_pfn = PFN_UP((unsigned long)base + total_size); 1887 - if (end_pfn > begin_pfn) 1888 - set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); 1889 - } 1897 + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1898 + BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1899 + BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1900 + set_memory((unsigned long)layout->base + layout->text_size, 1901 + (layout->ro_size - layout->text_size) >> PAGE_SHIFT); 1890 1902 } 1891 1903 1892 - static void unset_module_core_ro_nx(struct module *mod) 1904 + static void frob_writable_data(const struct module_layout *layout, 1905 + int (*set_memory)(unsigned long start, int num_pages)) 1893 1906 { 1894 - set_page_attributes(mod->module_core + mod->core_text_size, 1895 - mod->module_core + mod->core_size, 1896 - set_memory_x); 1897 - set_page_attributes(mod->module_core, 1898 - mod->module_core + mod->core_ro_size, 1899 - set_memory_rw); 1907 + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1908 + BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1909 + BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); 1910 + set_memory((unsigned long)layout->base + layout->ro_size, 1911 + (layout->size - layout->ro_size) >> PAGE_SHIFT); 1900 1912 } 1901 1913 1902 - static void unset_module_init_ro_nx(struct module *mod) 1914 + /* livepatching wants to disable read-only so it can frob module. */ 1915 + void module_disable_ro(const struct module *mod) 1903 1916 { 1904 - set_page_attributes(mod->module_init + mod->init_text_size, 1905 - mod->module_init + mod->init_size, 1906 - set_memory_x); 1907 - set_page_attributes(mod->module_init, 1908 - mod->module_init + mod->init_ro_size, 1909 - set_memory_rw); 1917 + frob_text(&mod->core_layout, set_memory_rw); 1918 + frob_rodata(&mod->core_layout, set_memory_rw); 1919 + frob_text(&mod->init_layout, set_memory_rw); 1920 + frob_rodata(&mod->init_layout, set_memory_rw); 1921 + } 1922 + 1923 + void module_enable_ro(const struct module *mod) 1924 + { 1925 + frob_text(&mod->core_layout, set_memory_ro); 1926 + frob_rodata(&mod->core_layout, set_memory_ro); 1927 + frob_text(&mod->init_layout, set_memory_ro); 1928 + frob_rodata(&mod->init_layout, set_memory_ro); 1929 + } 1930 + 1931 + static void module_enable_nx(const struct module *mod) 1932 + { 1933 + frob_rodata(&mod->core_layout, set_memory_nx); 1934 + frob_writable_data(&mod->core_layout, set_memory_nx); 1935 + frob_rodata(&mod->init_layout, set_memory_nx); 1936 + frob_writable_data(&mod->init_layout, set_memory_nx); 1937 + } 1938 + 1939 + static void module_disable_nx(const struct module *mod) 1940 + { 1941 + frob_rodata(&mod->core_layout, set_memory_x); 1942 + frob_writable_data(&mod->core_layout, set_memory_x); 1943 + frob_rodata(&mod->init_layout, set_memory_x); 1944 + frob_writable_data(&mod->init_layout, set_memory_x); 1910 1945 } 1911 1946 1912 1947 /* Iterate through all modules and set each module's text as RW */ ··· 1929 1942 list_for_each_entry_rcu(mod, &modules, list) { 1930 1943 if (mod->state == MODULE_STATE_UNFORMED) 1931 1944 continue; 1932 - if ((mod->module_core) && (mod->core_text_size)) { 1933 - set_page_attributes(mod->module_core, 1934 - mod->module_core + mod->core_text_size, 1935 - set_memory_rw); 1936 - } 1937 - if ((mod->module_init) && (mod->init_text_size)) { 1938 - set_page_attributes(mod->module_init, 1939 - mod->module_init + mod->init_text_size, 1940 - set_memory_rw); 1941 - } 1945 + 1946 + frob_text(&mod->core_layout, set_memory_rw); 1947 + frob_text(&mod->init_layout, set_memory_rw); 1942 1948 } 1943 1949 mutex_unlock(&module_mutex); 1944 1950 } ··· 1945 1965 list_for_each_entry_rcu(mod, &modules, list) { 1946 1966 if (mod->state == MODULE_STATE_UNFORMED) 1947 1967 continue; 1948 - if ((mod->module_core) && (mod->core_text_size)) { 1949 - set_page_attributes(mod->module_core, 1950 - mod->module_core + mod->core_text_size, 1951 - set_memory_ro); 1952 - } 1953 - if ((mod->module_init) && (mod->init_text_size)) { 1954 - set_page_attributes(mod->module_init, 1955 - mod->module_init + mod->init_text_size, 1956 - set_memory_ro); 1957 - } 1968 + 1969 + frob_text(&mod->core_layout, set_memory_ro); 1970 + frob_text(&mod->init_layout, set_memory_ro); 1958 1971 } 1959 1972 mutex_unlock(&module_mutex); 1960 1973 } 1974 + 1975 + static void disable_ro_nx(const struct module_layout *layout) 1976 + { 1977 + frob_text(layout, set_memory_rw); 1978 + frob_rodata(layout, set_memory_rw); 1979 + frob_rodata(layout, set_memory_x); 1980 + frob_writable_data(layout, set_memory_x); 1981 + } 1982 + 1961 1983 #else 1962 - static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } 1963 - static void unset_module_core_ro_nx(struct module *mod) { } 1964 - static void unset_module_init_ro_nx(struct module *mod) { } 1984 + static void disable_ro_nx(const struct module_layout *layout) { } 1985 + static void module_enable_nx(const struct module *mod) { } 1986 + static void module_disable_nx(const struct module *mod) { } 1965 1987 #endif 1966 1988 1967 1989 void __weak module_memfree(void *module_region) ··· 2015 2033 synchronize_sched(); 2016 2034 mutex_unlock(&module_mutex); 2017 2035 2018 - /* This may be NULL, but that's OK */ 2019 - unset_module_init_ro_nx(mod); 2036 + /* This may be empty, but that's OK */ 2037 + disable_ro_nx(&mod->init_layout); 2020 2038 module_arch_freeing_init(mod); 2021 - module_memfree(mod->module_init); 2039 + module_memfree(mod->init_layout.base); 2022 2040 kfree(mod->args); 2023 2041 percpu_modfree(mod); 2024 2042 2025 2043 /* Free lock-classes; relies on the preceding sync_rcu(). */ 2026 - lockdep_free_key_range(mod->module_core, mod->core_size); 2044 + lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 2027 2045 2028 2046 /* Finally, free the core (containing the module structure) */ 2029 - unset_module_core_ro_nx(mod); 2030 - module_memfree(mod->module_core); 2047 + disable_ro_nx(&mod->core_layout); 2048 + module_memfree(mod->core_layout.base); 2031 2049 2032 2050 #ifdef CONFIG_MPU 2033 2051 update_protections(current->mm); ··· 2230 2248 || s->sh_entsize != ~0UL 2231 2249 || strstarts(sname, ".init")) 2232 2250 continue; 2233 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i); 2251 + s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); 2234 2252 pr_debug("\t%s\n", sname); 2235 2253 } 2236 2254 switch (m) { 2237 2255 case 0: /* executable */ 2238 - mod->core_size = debug_align(mod->core_size); 2239 - mod->core_text_size = mod->core_size; 2256 + mod->core_layout.size = debug_align(mod->core_layout.size); 2257 + mod->core_layout.text_size = mod->core_layout.size; 2240 2258 break; 2241 2259 case 1: /* RO: text and ro-data */ 2242 - mod->core_size = debug_align(mod->core_size); 2243 - mod->core_ro_size = mod->core_size; 2260 + mod->core_layout.size = debug_align(mod->core_layout.size); 2261 + mod->core_layout.ro_size = mod->core_layout.size; 2244 2262 break; 2245 2263 case 3: /* whole core */ 2246 - mod->core_size = debug_align(mod->core_size); 2264 + mod->core_layout.size = debug_align(mod->core_layout.size); 2247 2265 break; 2248 2266 } 2249 2267 } ··· 2259 2277 || s->sh_entsize != ~0UL 2260 2278 || !strstarts(sname, ".init")) 2261 2279 continue; 2262 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) 2280 + s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) 2263 2281 | INIT_OFFSET_MASK); 2264 2282 pr_debug("\t%s\n", sname); 2265 2283 } 2266 2284 switch (m) { 2267 2285 case 0: /* executable */ 2268 - mod->init_size = debug_align(mod->init_size); 2269 - mod->init_text_size = mod->init_size; 2286 + mod->init_layout.size = debug_align(mod->init_layout.size); 2287 + mod->init_layout.text_size = mod->init_layout.size; 2270 2288 break; 2271 2289 case 1: /* RO: text and ro-data */ 2272 - mod->init_size = debug_align(mod->init_size); 2273 - mod->init_ro_size = mod->init_size; 2290 + mod->init_layout.size = debug_align(mod->init_layout.size); 2291 + mod->init_layout.ro_size = mod->init_layout.size; 2274 2292 break; 2275 2293 case 3: /* whole init */ 2276 - mod->init_size = debug_align(mod->init_size); 2294 + mod->init_layout.size = debug_align(mod->init_layout.size); 2277 2295 break; 2278 2296 } 2279 2297 } ··· 2383 2401 } 2384 2402 if (sym->st_shndx == SHN_UNDEF) 2385 2403 return 'U'; 2386 - if (sym->st_shndx == SHN_ABS) 2404 + if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) 2387 2405 return 'a'; 2388 2406 if (sym->st_shndx >= SHN_LORESERVE) 2389 2407 return '?'; ··· 2412 2430 } 2413 2431 2414 2432 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, 2415 - unsigned int shnum) 2433 + unsigned int shnum, unsigned int pcpundx) 2416 2434 { 2417 2435 const Elf_Shdr *sec; 2418 2436 ··· 2420 2438 || src->st_shndx >= shnum 2421 2439 || !src->st_name) 2422 2440 return false; 2441 + 2442 + #ifdef CONFIG_KALLSYMS_ALL 2443 + if (src->st_shndx == pcpundx) 2444 + return true; 2445 + #endif 2423 2446 2424 2447 sec = sechdrs + src->st_shndx; 2425 2448 if (!(sec->sh_flags & SHF_ALLOC) ··· 2453 2466 2454 2467 /* Put symbol section at end of init part of module. */ 2455 2468 symsect->sh_flags |= SHF_ALLOC; 2456 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, 2469 + symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, 2457 2470 info->index.sym) | INIT_OFFSET_MASK; 2458 2471 pr_debug("\t%s\n", info->secstrings + symsect->sh_name); 2459 2472 ··· 2463 2476 /* Compute total space required for the core symbols' strtab. */ 2464 2477 for (ndst = i = 0; i < nsrc; i++) { 2465 2478 if (i == 0 || 2466 - is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2479 + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2480 + info->index.pcpu)) { 2467 2481 strtab_size += strlen(&info->strtab[src[i].st_name])+1; 2468 2482 ndst++; 2469 2483 } 2470 2484 } 2471 2485 2472 2486 /* Append room for core symbols at end of core part. */ 2473 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2474 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); 2475 - mod->core_size += strtab_size; 2476 - mod->core_size = debug_align(mod->core_size); 2487 + info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); 2488 + info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); 2489 + mod->core_layout.size += strtab_size; 2490 + mod->core_layout.size = debug_align(mod->core_layout.size); 2477 2491 2478 2492 /* Put string table section at end of init part of module. */ 2479 2493 strsect->sh_flags |= SHF_ALLOC; 2480 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, 2494 + strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, 2481 2495 info->index.str) | INIT_OFFSET_MASK; 2482 - mod->init_size = debug_align(mod->init_size); 2496 + mod->init_layout.size = debug_align(mod->init_layout.size); 2483 2497 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2484 2498 } 2485 2499 ··· 2501 2513 for (i = 0; i < mod->num_symtab; i++) 2502 2514 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); 2503 2515 2504 - mod->core_symtab = dst = mod->module_core + info->symoffs; 2505 - mod->core_strtab = s = mod->module_core + info->stroffs; 2516 + mod->core_symtab = dst = mod->core_layout.base + info->symoffs; 2517 + mod->core_strtab = s = mod->core_layout.base + info->stroffs; 2506 2518 src = mod->symtab; 2507 2519 for (ndst = i = 0; i < mod->num_symtab; i++) { 2508 2520 if (i == 0 || 2509 - is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2521 + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2522 + info->index.pcpu)) { 2510 2523 dst[ndst] = src[i]; 2511 2524 dst[ndst++].st_name = s - mod->core_strtab; 2512 2525 s += strlcpy(s, &mod->strtab[src[i].st_name], ··· 2953 2964 void *ptr; 2954 2965 2955 2966 /* Do the allocs. */ 2956 - ptr = module_alloc(mod->core_size); 2967 + ptr = module_alloc(mod->core_layout.size); 2957 2968 /* 2958 2969 * The pointer to this block is stored in the module structure 2959 2970 * which is inside the block. Just mark it as not being a ··· 2963 2974 if (!ptr) 2964 2975 return -ENOMEM; 2965 2976 2966 - memset(ptr, 0, mod->core_size); 2967 - mod->module_core = ptr; 2977 + memset(ptr, 0, mod->core_layout.size); 2978 + mod->core_layout.base = ptr; 2968 2979 2969 - if (mod->init_size) { 2970 - ptr = module_alloc(mod->init_size); 2980 + if (mod->init_layout.size) { 2981 + ptr = module_alloc(mod->init_layout.size); 2971 2982 /* 2972 2983 * The pointer to this block is stored in the module structure 2973 2984 * which is inside the block. This block doesn't need to be ··· 2976 2987 */ 2977 2988 kmemleak_ignore(ptr); 2978 2989 if (!ptr) { 2979 - module_memfree(mod->module_core); 2990 + module_memfree(mod->core_layout.base); 2980 2991 return -ENOMEM; 2981 2992 } 2982 - memset(ptr, 0, mod->init_size); 2983 - mod->module_init = ptr; 2993 + memset(ptr, 0, mod->init_layout.size); 2994 + mod->init_layout.base = ptr; 2984 2995 } else 2985 - mod->module_init = NULL; 2996 + mod->init_layout.base = NULL; 2986 2997 2987 2998 /* Transfer each section which specifies SHF_ALLOC */ 2988 2999 pr_debug("final section addresses:\n"); ··· 2994 3005 continue; 2995 3006 2996 3007 if (shdr->sh_entsize & INIT_OFFSET_MASK) 2997 - dest = mod->module_init 3008 + dest = mod->init_layout.base 2998 3009 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 2999 3010 else 3000 - dest = mod->module_core + shdr->sh_entsize; 3011 + dest = mod->core_layout.base + shdr->sh_entsize; 3001 3012 3002 3013 if (shdr->sh_type != SHT_NOBITS) 3003 3014 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); ··· 3059 3070 * Do it before processing of module parameters, so the module 3060 3071 * can provide parameter accessor functions of its own. 3061 3072 */ 3062 - if (mod->module_init) 3063 - flush_icache_range((unsigned long)mod->module_init, 3064 - (unsigned long)mod->module_init 3065 - + mod->init_size); 3066 - flush_icache_range((unsigned long)mod->module_core, 3067 - (unsigned long)mod->module_core + mod->core_size); 3073 + if (mod->init_layout.base) 3074 + flush_icache_range((unsigned long)mod->init_layout.base, 3075 + (unsigned long)mod->init_layout.base 3076 + + mod->init_layout.size); 3077 + flush_icache_range((unsigned long)mod->core_layout.base, 3078 + (unsigned long)mod->core_layout.base + mod->core_layout.size); 3068 3079 3069 3080 set_fs(old_fs); 3070 3081 } ··· 3122 3133 { 3123 3134 percpu_modfree(mod); 3124 3135 module_arch_freeing_init(mod); 3125 - module_memfree(mod->module_init); 3126 - module_memfree(mod->module_core); 3136 + module_memfree(mod->init_layout.base); 3137 + module_memfree(mod->core_layout.base); 3127 3138 } 3128 3139 3129 3140 int __weak module_finalize(const Elf_Ehdr *hdr, ··· 3210 3221 ret = -ENOMEM; 3211 3222 goto fail; 3212 3223 } 3213 - freeinit->module_init = mod->module_init; 3224 + freeinit->module_init = mod->init_layout.base; 3214 3225 3215 3226 /* 3216 3227 * We want to find out whether @mod uses async during init. Clear ··· 3268 3279 mod->strtab = mod->core_strtab; 3269 3280 #endif 3270 3281 mod_tree_remove_init(mod); 3271 - unset_module_init_ro_nx(mod); 3282 + disable_ro_nx(&mod->init_layout); 3272 3283 module_arch_freeing_init(mod); 3273 - mod->module_init = NULL; 3274 - mod->init_size = 0; 3275 - mod->init_ro_size = 0; 3276 - mod->init_text_size = 0; 3284 + mod->init_layout.base = NULL; 3285 + mod->init_layout.size = 0; 3286 + mod->init_layout.ro_size = 0; 3287 + mod->init_layout.text_size = 0; 3277 3288 /* 3278 3289 * We want to free module_init, but be aware that kallsyms may be 3279 3290 * walking this with preempt disabled. In all the failure paths, we ··· 3362 3373 /* This relies on module_mutex for list integrity. */ 3363 3374 module_bug_finalize(info->hdr, info->sechdrs, mod); 3364 3375 3365 - /* Set RO and NX regions for core */ 3366 - set_section_ro_nx(mod->module_core, 3367 - mod->core_text_size, 3368 - mod->core_ro_size, 3369 - mod->core_size); 3370 - 3371 - /* Set RO and NX regions for init */ 3372 - set_section_ro_nx(mod->module_init, 3373 - mod->init_text_size, 3374 - mod->init_ro_size, 3375 - mod->init_size); 3376 + /* Set RO and NX regions */ 3377 + module_enable_ro(mod); 3378 + module_enable_nx(mod); 3376 3379 3377 3380 /* Mark state as coming so strong_try_module_get() ignores us, 3378 3381 * but kallsyms etc. can see us. */ ··· 3529 3548 MODULE_STATE_GOING, mod); 3530 3549 3531 3550 /* we can't deallocate the module until we clear memory protection */ 3532 - unset_module_init_ro_nx(mod); 3533 - unset_module_core_ro_nx(mod); 3551 + module_disable_ro(mod); 3552 + module_disable_nx(mod); 3534 3553 3535 3554 ddebug_cleanup: 3536 3555 dynamic_debug_remove(info->debug); ··· 3553 3572 mutex_unlock(&module_mutex); 3554 3573 free_module: 3555 3574 /* Free lock-classes; relies on the preceding sync_rcu() */ 3556 - lockdep_free_key_range(mod->module_core, mod->core_size); 3575 + lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 3557 3576 3558 3577 module_deallocate(mod, info); 3559 3578 free_copy: ··· 3631 3650 3632 3651 /* At worse, next value is at end of module */ 3633 3652 if (within_module_init(addr, mod)) 3634 - nextval = (unsigned long)mod->module_init+mod->init_text_size; 3653 + nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; 3635 3654 else 3636 - nextval = (unsigned long)mod->module_core+mod->core_text_size; 3655 + nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; 3637 3656 3638 3657 /* Scan for closest preceding symbol, and next symbol. (ELF 3639 3658 starts real symbols at 1). */ ··· 3880 3899 return 0; 3881 3900 3882 3901 seq_printf(m, "%s %u", 3883 - mod->name, mod->init_size + mod->core_size); 3902 + mod->name, mod->init_layout.size + mod->core_layout.size); 3884 3903 print_unload_info(m, mod); 3885 3904 3886 3905 /* Informative for users. */ ··· 3889 3908 mod->state == MODULE_STATE_COMING ? "Loading" : 3890 3909 "Live"); 3891 3910 /* Used by oprofile and other similar tools. */ 3892 - seq_printf(m, " 0x%pK", mod->module_core); 3911 + seq_printf(m, " 0x%pK", mod->core_layout.base); 3893 3912 3894 3913 /* Taints info */ 3895 3914 if (mod->taints) ··· 4032 4051 struct module *mod = __module_address(addr); 4033 4052 if (mod) { 4034 4053 /* Make sure it's within the text section. */ 4035 - if (!within(addr, mod->module_init, mod->init_text_size) 4036 - && !within(addr, mod->module_core, mod->core_text_size)) 4054 + if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) 4055 + && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) 4037 4056 mod = NULL; 4038 4057 } 4039 4058 return mod;
+4 -1
kernel/panic.c
··· 152 152 * We may have ended up stopping the CPU holding the lock (in 153 153 * smp_send_stop()) while still having some valuable data in the console 154 154 * buffer. Try to acquire the lock then release it regardless of the 155 - * result. The release will also print the buffers out. 155 + * result. The release will also print the buffers out. Locks debug 156 + * should be disabled to avoid reporting bad unlock balance when 157 + * panic() is not being callled from OOPS. 156 158 */ 159 + debug_locks_off(); 157 160 console_trylock(); 158 161 console_unlock(); 159 162
+2 -2
kernel/pid.c
··· 467 467 rcu_read_lock(); 468 468 if (type != PIDTYPE_PID) 469 469 task = task->group_leader; 470 - pid = get_pid(task->pids[type].pid); 470 + pid = get_pid(rcu_dereference(task->pids[type].pid)); 471 471 rcu_read_unlock(); 472 472 return pid; 473 473 } ··· 528 528 if (likely(pid_alive(task))) { 529 529 if (type != PIDTYPE_PID) 530 530 task = task->group_leader; 531 - nr = pid_nr_ns(task->pids[type].pid, ns); 531 + nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); 532 532 } 533 533 rcu_read_unlock(); 534 534
+1 -1
kernel/signal.c
··· 3503 3503 3504 3504 #endif 3505 3505 3506 - int sigsuspend(sigset_t *set) 3506 + static int sigsuspend(sigset_t *set) 3507 3507 { 3508 3508 current->saved_sigmask = current->blocked; 3509 3509 set_current_blocked(set);
+2 -2
mm/huge_memory.c
··· 2009 2009 /* 2010 2010 * Be somewhat over-protective like KSM for now! 2011 2011 */ 2012 - if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 2012 + if (*vm_flags & VM_NO_THP) 2013 2013 return -EINVAL; 2014 2014 *vm_flags &= ~VM_NOHUGEPAGE; 2015 2015 *vm_flags |= VM_HUGEPAGE; ··· 2025 2025 /* 2026 2026 * Be somewhat over-protective like KSM for now! 2027 2027 */ 2028 - if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 2028 + if (*vm_flags & VM_NO_THP) 2029 2029 return -EINVAL; 2030 2030 *vm_flags &= ~VM_HUGEPAGE; 2031 2031 *vm_flags |= VM_NOHUGEPAGE;
+2
mm/kasan/kasan.c
··· 19 19 #include <linux/export.h> 20 20 #include <linux/init.h> 21 21 #include <linux/kernel.h> 22 + #include <linux/kmemleak.h> 22 23 #include <linux/memblock.h> 23 24 #include <linux/memory.h> 24 25 #include <linux/mm.h> ··· 445 444 446 445 if (ret) { 447 446 find_vm_area(addr)->flags |= VM_KASAN; 447 + kmemleak_ignore(ret); 448 448 return 0; 449 449 } 450 450
+4 -4
mm/memory.c
··· 3015 3015 } else { 3016 3016 /* 3017 3017 * The fault handler has no page to lock, so it holds 3018 - * i_mmap_lock for write to protect against truncate. 3018 + * i_mmap_lock for read to protect against truncate. 3019 3019 */ 3020 - i_mmap_unlock_write(vma->vm_file->f_mapping); 3020 + i_mmap_unlock_read(vma->vm_file->f_mapping); 3021 3021 } 3022 3022 goto uncharge_out; 3023 3023 } ··· 3031 3031 } else { 3032 3032 /* 3033 3033 * The fault handler has no page to lock, so it holds 3034 - * i_mmap_lock for write to protect against truncate. 3034 + * i_mmap_lock for read to protect against truncate. 3035 3035 */ 3036 - i_mmap_unlock_write(vma->vm_file->f_mapping); 3036 + i_mmap_unlock_read(vma->vm_file->f_mapping); 3037 3037 } 3038 3038 return ret; 3039 3039 uncharge_out:
+3 -1
mm/page-writeback.c
··· 1542 1542 for (;;) { 1543 1543 unsigned long now = jiffies; 1544 1544 unsigned long dirty, thresh, bg_thresh; 1545 - unsigned long m_dirty, m_thresh, m_bg_thresh; 1545 + unsigned long m_dirty = 0; /* stop bogus uninit warnings */ 1546 + unsigned long m_thresh = 0; 1547 + unsigned long m_bg_thresh = 0; 1546 1548 1547 1549 /* 1548 1550 * Unstable writes are a feature of certain networked
+1 -1
mm/slab.c
··· 3419 3419 } 3420 3420 EXPORT_SYMBOL(kmem_cache_free_bulk); 3421 3421 3422 - bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3422 + int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3423 3423 void **p) 3424 3424 { 3425 3425 return __kmem_cache_alloc_bulk(s, flags, size, p);
+1 -1
mm/slab.h
··· 170 170 * may be allocated or freed using these operations. 171 171 */ 172 172 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 173 - bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 173 + int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 174 174 175 175 #ifdef CONFIG_MEMCG_KMEM 176 176 /*
+3 -3
mm/slab_common.c
··· 112 112 kmem_cache_free(s, p[i]); 113 113 } 114 114 115 - bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 115 + int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 116 116 void **p) 117 117 { 118 118 size_t i; ··· 121 121 void *x = p[i] = kmem_cache_alloc(s, flags); 122 122 if (!x) { 123 123 __kmem_cache_free_bulk(s, i, p); 124 - return false; 124 + return 0; 125 125 } 126 126 } 127 - return true; 127 + return i; 128 128 } 129 129 130 130 #ifdef CONFIG_MEMCG_KMEM
+1 -1
mm/slob.c
··· 617 617 } 618 618 EXPORT_SYMBOL(kmem_cache_free_bulk); 619 619 620 - bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 620 + int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 621 621 void **p) 622 622 { 623 623 return __kmem_cache_alloc_bulk(s, flags, size, p);
+211 -93
mm/slub.c
··· 1065 1065 return 0; 1066 1066 } 1067 1067 1068 + /* Supports checking bulk free of a constructed freelist */ 1068 1069 static noinline struct kmem_cache_node *free_debug_processing( 1069 - struct kmem_cache *s, struct page *page, void *object, 1070 + struct kmem_cache *s, struct page *page, 1071 + void *head, void *tail, int bulk_cnt, 1070 1072 unsigned long addr, unsigned long *flags) 1071 1073 { 1072 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1075 + void *object = head; 1076 + int cnt = 0; 1073 1077 1074 1078 spin_lock_irqsave(&n->list_lock, *flags); 1075 1079 slab_lock(page); 1076 1080 1077 1081 if (!check_slab(s, page)) 1078 1082 goto fail; 1083 + 1084 + next_object: 1085 + cnt++; 1079 1086 1080 1087 if (!check_valid_pointer(s, page, object)) { 1081 1088 slab_err(s, page, "Invalid object pointer 0x%p", object); ··· 1114 1107 if (s->flags & SLAB_STORE_USER) 1115 1108 set_track(s, object, TRACK_FREE, addr); 1116 1109 trace(s, page, object, 0); 1110 + /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1117 1111 init_object(s, object, SLUB_RED_INACTIVE); 1112 + 1113 + /* Reached end of constructed freelist yet? */ 1114 + if (object != tail) { 1115 + object = get_freepointer(s, object); 1116 + goto next_object; 1117 + } 1118 1118 out: 1119 + if (cnt != bulk_cnt) 1120 + slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1121 + bulk_cnt, cnt); 1122 + 1119 1123 slab_unlock(page); 1120 1124 /* 1121 1125 * Keep node_lock to preserve integrity ··· 1222 1204 1223 1205 return flags; 1224 1206 } 1225 - #else 1207 + #else /* !CONFIG_SLUB_DEBUG */ 1226 1208 static inline void setup_object_debug(struct kmem_cache *s, 1227 1209 struct page *page, void *object) {} 1228 1210 ··· 1230 1212 struct page *page, void *object, unsigned long addr) { return 0; } 1231 1213 1232 1214 static inline struct kmem_cache_node *free_debug_processing( 1233 - struct kmem_cache *s, struct page *page, void *object, 1215 + struct kmem_cache *s, struct page *page, 1216 + void *head, void *tail, int bulk_cnt, 1234 1217 unsigned long addr, unsigned long *flags) { return NULL; } 1235 1218 1236 1219 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) ··· 1292 1273 return memcg_kmem_get_cache(s, flags); 1293 1274 } 1294 1275 1295 - static inline void slab_post_alloc_hook(struct kmem_cache *s, 1296 - gfp_t flags, void *object) 1276 + static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1277 + size_t size, void **p) 1297 1278 { 1279 + size_t i; 1280 + 1298 1281 flags &= gfp_allowed_mask; 1299 - kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1300 - kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1282 + for (i = 0; i < size; i++) { 1283 + void *object = p[i]; 1284 + 1285 + kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1286 + kmemleak_alloc_recursive(object, s->object_size, 1, 1287 + s->flags, flags); 1288 + kasan_slab_alloc(s, object); 1289 + } 1301 1290 memcg_kmem_put_cache(s); 1302 - kasan_slab_alloc(s, object); 1303 1291 } 1304 1292 1305 1293 static inline void slab_free_hook(struct kmem_cache *s, void *x) ··· 1332 1306 debug_check_no_obj_freed(x, s->object_size); 1333 1307 1334 1308 kasan_slab_free(s, x); 1309 + } 1310 + 1311 + static inline void slab_free_freelist_hook(struct kmem_cache *s, 1312 + void *head, void *tail) 1313 + { 1314 + /* 1315 + * Compiler cannot detect this function can be removed if slab_free_hook() 1316 + * evaluates to nothing. Thus, catch all relevant config debug options here. 1317 + */ 1318 + #if defined(CONFIG_KMEMCHECK) || \ 1319 + defined(CONFIG_LOCKDEP) || \ 1320 + defined(CONFIG_DEBUG_KMEMLEAK) || \ 1321 + defined(CONFIG_DEBUG_OBJECTS_FREE) || \ 1322 + defined(CONFIG_KASAN) 1323 + 1324 + void *object = head; 1325 + void *tail_obj = tail ? : head; 1326 + 1327 + do { 1328 + slab_free_hook(s, object); 1329 + } while ((object != tail_obj) && 1330 + (object = get_freepointer(s, object))); 1331 + #endif 1335 1332 } 1336 1333 1337 1334 static void setup_object(struct kmem_cache *s, struct page *page, ··· 2344 2295 * And if we were unable to get a new slab from the partial slab lists then 2345 2296 * we need to allocate a new slab. This is the slowest path since it involves 2346 2297 * a call to the page allocator and the setup of a new slab. 2298 + * 2299 + * Version of __slab_alloc to use when we know that interrupts are 2300 + * already disabled (which is the case for bulk allocation). 2347 2301 */ 2348 - static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2302 + static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2349 2303 unsigned long addr, struct kmem_cache_cpu *c) 2350 2304 { 2351 2305 void *freelist; 2352 2306 struct page *page; 2353 - unsigned long flags; 2354 - 2355 - local_irq_save(flags); 2356 - #ifdef CONFIG_PREEMPT 2357 - /* 2358 - * We may have been preempted and rescheduled on a different 2359 - * cpu before disabling interrupts. Need to reload cpu area 2360 - * pointer. 2361 - */ 2362 - c = this_cpu_ptr(s->cpu_slab); 2363 - #endif 2364 2307 2365 2308 page = c->page; 2366 2309 if (!page) ··· 2410 2369 VM_BUG_ON(!c->page->frozen); 2411 2370 c->freelist = get_freepointer(s, freelist); 2412 2371 c->tid = next_tid(c->tid); 2413 - local_irq_restore(flags); 2414 2372 return freelist; 2415 2373 2416 2374 new_slab: ··· 2426 2386 2427 2387 if (unlikely(!freelist)) { 2428 2388 slab_out_of_memory(s, gfpflags, node); 2429 - local_irq_restore(flags); 2430 2389 return NULL; 2431 2390 } 2432 2391 ··· 2441 2402 deactivate_slab(s, page, get_freepointer(s, freelist)); 2442 2403 c->page = NULL; 2443 2404 c->freelist = NULL; 2444 - local_irq_restore(flags); 2445 2405 return freelist; 2406 + } 2407 + 2408 + /* 2409 + * Another one that disabled interrupt and compensates for possible 2410 + * cpu changes by refetching the per cpu area pointer. 2411 + */ 2412 + static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2413 + unsigned long addr, struct kmem_cache_cpu *c) 2414 + { 2415 + void *p; 2416 + unsigned long flags; 2417 + 2418 + local_irq_save(flags); 2419 + #ifdef CONFIG_PREEMPT 2420 + /* 2421 + * We may have been preempted and rescheduled on a different 2422 + * cpu before disabling interrupts. Need to reload cpu area 2423 + * pointer. 2424 + */ 2425 + c = this_cpu_ptr(s->cpu_slab); 2426 + #endif 2427 + 2428 + p = ___slab_alloc(s, gfpflags, node, addr, c); 2429 + local_irq_restore(flags); 2430 + return p; 2446 2431 } 2447 2432 2448 2433 /* ··· 2482 2419 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2483 2420 gfp_t gfpflags, int node, unsigned long addr) 2484 2421 { 2485 - void **object; 2422 + void *object; 2486 2423 struct kmem_cache_cpu *c; 2487 2424 struct page *page; 2488 2425 unsigned long tid; ··· 2561 2498 if (unlikely(gfpflags & __GFP_ZERO) && object) 2562 2499 memset(object, 0, s->object_size); 2563 2500 2564 - slab_post_alloc_hook(s, gfpflags, object); 2501 + slab_post_alloc_hook(s, gfpflags, 1, &object); 2565 2502 2566 2503 return object; 2567 2504 } ··· 2632 2569 * handling required then we can return immediately. 2633 2570 */ 2634 2571 static void __slab_free(struct kmem_cache *s, struct page *page, 2635 - void *x, unsigned long addr) 2572 + void *head, void *tail, int cnt, 2573 + unsigned long addr) 2574 + 2636 2575 { 2637 2576 void *prior; 2638 - void **object = (void *)x; 2639 2577 int was_frozen; 2640 2578 struct page new; 2641 2579 unsigned long counters; ··· 2646 2582 stat(s, FREE_SLOWPATH); 2647 2583 2648 2584 if (kmem_cache_debug(s) && 2649 - !(n = free_debug_processing(s, page, x, addr, &flags))) 2585 + !(n = free_debug_processing(s, page, head, tail, cnt, 2586 + addr, &flags))) 2650 2587 return; 2651 2588 2652 2589 do { ··· 2657 2592 } 2658 2593 prior = page->freelist; 2659 2594 counters = page->counters; 2660 - set_freepointer(s, object, prior); 2595 + set_freepointer(s, tail, prior); 2661 2596 new.counters = counters; 2662 2597 was_frozen = new.frozen; 2663 - new.inuse--; 2598 + new.inuse -= cnt; 2664 2599 if ((!new.inuse || !prior) && !was_frozen) { 2665 2600 2666 2601 if (kmem_cache_has_cpu_partial(s) && !prior) { ··· 2691 2626 2692 2627 } while (!cmpxchg_double_slab(s, page, 2693 2628 prior, counters, 2694 - object, new.counters, 2629 + head, new.counters, 2695 2630 "__slab_free")); 2696 2631 2697 2632 if (likely(!n)) { ··· 2756 2691 * 2757 2692 * If fastpath is not possible then fall back to __slab_free where we deal 2758 2693 * with all sorts of special processing. 2694 + * 2695 + * Bulk free of a freelist with several objects (all pointing to the 2696 + * same page) possible by specifying head and tail ptr, plus objects 2697 + * count (cnt). Bulk free indicated by tail pointer being set. 2759 2698 */ 2760 - static __always_inline void slab_free(struct kmem_cache *s, 2761 - struct page *page, void *x, unsigned long addr) 2699 + static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 2700 + void *head, void *tail, int cnt, 2701 + unsigned long addr) 2762 2702 { 2763 - void **object = (void *)x; 2703 + void *tail_obj = tail ? : head; 2764 2704 struct kmem_cache_cpu *c; 2765 2705 unsigned long tid; 2766 2706 2767 - slab_free_hook(s, x); 2707 + slab_free_freelist_hook(s, head, tail); 2768 2708 2769 2709 redo: 2770 2710 /* ··· 2788 2718 barrier(); 2789 2719 2790 2720 if (likely(page == c->page)) { 2791 - set_freepointer(s, object, c->freelist); 2721 + set_freepointer(s, tail_obj, c->freelist); 2792 2722 2793 2723 if (unlikely(!this_cpu_cmpxchg_double( 2794 2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2795 2725 c->freelist, tid, 2796 - object, next_tid(tid)))) { 2726 + head, next_tid(tid)))) { 2797 2727 2798 2728 note_cmpxchg_failure("slab_free", s, tid); 2799 2729 goto redo; 2800 2730 } 2801 2731 stat(s, FREE_FASTPATH); 2802 2732 } else 2803 - __slab_free(s, page, x, addr); 2733 + __slab_free(s, page, head, tail_obj, cnt, addr); 2804 2734 2805 2735 } 2806 2736 ··· 2809 2739 s = cache_from_obj(s, x); 2810 2740 if (!s) 2811 2741 return; 2812 - slab_free(s, virt_to_head_page(x), x, _RET_IP_); 2742 + slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 2813 2743 trace_kmem_cache_free(_RET_IP_, x); 2814 2744 } 2815 2745 EXPORT_SYMBOL(kmem_cache_free); 2816 2746 2817 - /* Note that interrupts must be enabled when calling this function. */ 2818 - void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 2819 - { 2820 - struct kmem_cache_cpu *c; 2747 + struct detached_freelist { 2821 2748 struct page *page; 2822 - int i; 2749 + void *tail; 2750 + void *freelist; 2751 + int cnt; 2752 + }; 2823 2753 2824 - local_irq_disable(); 2825 - c = this_cpu_ptr(s->cpu_slab); 2754 + /* 2755 + * This function progressively scans the array with free objects (with 2756 + * a limited look ahead) and extract objects belonging to the same 2757 + * page. It builds a detached freelist directly within the given 2758 + * page/objects. This can happen without any need for 2759 + * synchronization, because the objects are owned by running process. 2760 + * The freelist is build up as a single linked list in the objects. 2761 + * The idea is, that this detached freelist can then be bulk 2762 + * transferred to the real freelist(s), but only requiring a single 2763 + * synchronization primitive. Look ahead in the array is limited due 2764 + * to performance reasons. 2765 + */ 2766 + static int build_detached_freelist(struct kmem_cache *s, size_t size, 2767 + void **p, struct detached_freelist *df) 2768 + { 2769 + size_t first_skipped_index = 0; 2770 + int lookahead = 3; 2771 + void *object; 2826 2772 2827 - for (i = 0; i < size; i++) { 2828 - void *object = p[i]; 2773 + /* Always re-init detached_freelist */ 2774 + df->page = NULL; 2829 2775 2830 - BUG_ON(!object); 2831 - /* kmem cache debug support */ 2832 - s = cache_from_obj(s, object); 2833 - if (unlikely(!s)) 2834 - goto exit; 2835 - slab_free_hook(s, object); 2776 + do { 2777 + object = p[--size]; 2778 + } while (!object && size); 2836 2779 2837 - page = virt_to_head_page(object); 2780 + if (!object) 2781 + return 0; 2838 2782 2839 - if (c->page == page) { 2840 - /* Fastpath: local CPU free */ 2841 - set_freepointer(s, object, c->freelist); 2842 - c->freelist = object; 2843 - } else { 2844 - c->tid = next_tid(c->tid); 2845 - local_irq_enable(); 2846 - /* Slowpath: overhead locked cmpxchg_double_slab */ 2847 - __slab_free(s, page, object, _RET_IP_); 2848 - local_irq_disable(); 2849 - c = this_cpu_ptr(s->cpu_slab); 2783 + /* Start new detached freelist */ 2784 + set_freepointer(s, object, NULL); 2785 + df->page = virt_to_head_page(object); 2786 + df->tail = object; 2787 + df->freelist = object; 2788 + p[size] = NULL; /* mark object processed */ 2789 + df->cnt = 1; 2790 + 2791 + while (size) { 2792 + object = p[--size]; 2793 + if (!object) 2794 + continue; /* Skip processed objects */ 2795 + 2796 + /* df->page is always set at this point */ 2797 + if (df->page == virt_to_head_page(object)) { 2798 + /* Opportunity build freelist */ 2799 + set_freepointer(s, object, df->freelist); 2800 + df->freelist = object; 2801 + df->cnt++; 2802 + p[size] = NULL; /* mark object processed */ 2803 + 2804 + continue; 2850 2805 } 2806 + 2807 + /* Limit look ahead search */ 2808 + if (!--lookahead) 2809 + break; 2810 + 2811 + if (!first_skipped_index) 2812 + first_skipped_index = size + 1; 2851 2813 } 2852 - exit: 2853 - c->tid = next_tid(c->tid); 2854 - local_irq_enable(); 2814 + 2815 + return first_skipped_index; 2816 + } 2817 + 2818 + 2819 + /* Note that interrupts must be enabled when calling this function. */ 2820 + void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) 2821 + { 2822 + if (WARN_ON(!size)) 2823 + return; 2824 + 2825 + do { 2826 + struct detached_freelist df; 2827 + struct kmem_cache *s; 2828 + 2829 + /* Support for memcg */ 2830 + s = cache_from_obj(orig_s, p[size - 1]); 2831 + 2832 + size = build_detached_freelist(s, size, p, &df); 2833 + if (unlikely(!df.page)) 2834 + continue; 2835 + 2836 + slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 2837 + } while (likely(size)); 2855 2838 } 2856 2839 EXPORT_SYMBOL(kmem_cache_free_bulk); 2857 2840 2858 2841 /* Note that interrupts must be enabled when calling this function. */ 2859 - bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2860 - void **p) 2842 + int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2843 + void **p) 2861 2844 { 2862 2845 struct kmem_cache_cpu *c; 2863 2846 int i; 2864 2847 2848 + /* memcg and kmem_cache debug support */ 2849 + s = slab_pre_alloc_hook(s, flags); 2850 + if (unlikely(!s)) 2851 + return false; 2865 2852 /* 2866 2853 * Drain objects in the per cpu slab, while disabling local 2867 2854 * IRQs, which protects against PREEMPT and interrupts ··· 2931 2804 void *object = c->freelist; 2932 2805 2933 2806 if (unlikely(!object)) { 2934 - local_irq_enable(); 2935 2807 /* 2936 2808 * Invoking slow path likely have side-effect 2937 2809 * of re-populating per CPU c->freelist 2938 2810 */ 2939 - p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2811 + p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 2940 2812 _RET_IP_, c); 2941 - if (unlikely(!p[i])) { 2942 - __kmem_cache_free_bulk(s, i, p); 2943 - return false; 2944 - } 2945 - local_irq_disable(); 2813 + if (unlikely(!p[i])) 2814 + goto error; 2815 + 2946 2816 c = this_cpu_ptr(s->cpu_slab); 2947 2817 continue; /* goto for-loop */ 2948 2818 } 2949 - 2950 - /* kmem_cache debug support */ 2951 - s = slab_pre_alloc_hook(s, flags); 2952 - if (unlikely(!s)) { 2953 - __kmem_cache_free_bulk(s, i, p); 2954 - c->tid = next_tid(c->tid); 2955 - local_irq_enable(); 2956 - return false; 2957 - } 2958 - 2959 2819 c->freelist = get_freepointer(s, object); 2960 2820 p[i] = object; 2961 - 2962 - /* kmem_cache debug support */ 2963 - slab_post_alloc_hook(s, flags, object); 2964 2821 } 2965 2822 c->tid = next_tid(c->tid); 2966 2823 local_irq_enable(); ··· 2957 2846 memset(p[j], 0, s->object_size); 2958 2847 } 2959 2848 2960 - return true; 2849 + /* memcg and kmem_cache debug support */ 2850 + slab_post_alloc_hook(s, flags, size, p); 2851 + return i; 2852 + error: 2853 + local_irq_enable(); 2854 + slab_post_alloc_hook(s, flags, i, p); 2855 + __kmem_cache_free_bulk(s, i, p); 2856 + return 0; 2961 2857 } 2962 2858 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2963 2859 ··· 3629 3511 __free_kmem_pages(page, compound_order(page)); 3630 3512 return; 3631 3513 } 3632 - slab_free(page->slab_cache, page, object, _RET_IP_); 3514 + slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 3633 3515 } 3634 3516 EXPORT_SYMBOL(kfree); 3635 3517
+2 -3
mm/vmalloc.c
··· 1443 1443 vmap_debug_free_range(va->va_start, va->va_end); 1444 1444 kasan_free_shadow(vm); 1445 1445 free_unmap_vmap_area(va); 1446 - vm->size -= PAGE_SIZE; 1447 1446 1448 1447 return vm; 1449 1448 } ··· 1467 1468 return; 1468 1469 } 1469 1470 1470 - debug_check_no_locks_freed(addr, area->size); 1471 - debug_check_no_obj_freed(addr, area->size); 1471 + debug_check_no_locks_freed(addr, get_vm_area_size(area)); 1472 + debug_check_no_obj_freed(addr, get_vm_area_size(area)); 1472 1473 1473 1474 if (deallocate_pages) { 1474 1475 int i;
+1 -1
scripts/kernel-doc
··· 2711 2711 2712 2712 # generate a sequence of code that will splice in highlighting information 2713 2713 # using the s// operator. 2714 - foreach my $k (keys @highlights) { 2714 + for (my $k = 0; $k < @highlights; $k++) { 2715 2715 my $pattern = $highlights[$k][0]; 2716 2716 my $result = $highlights[$k][1]; 2717 2717 # print STDERR "scanning pattern:$pattern, highlight:($result)\n";
+10 -1
tools/Makefile
··· 32 32 @echo ' from the kernel command line to build and install one of' 33 33 @echo ' the tools above' 34 34 @echo '' 35 + @echo ' $$ make tools/all' 36 + @echo '' 37 + @echo ' builds all tools.' 38 + @echo '' 35 39 @echo ' $$ make tools/install' 36 40 @echo '' 37 41 @echo ' installs all tools.' ··· 81 77 freefall: FORCE 82 78 $(call descend,laptop/$@) 83 79 80 + all: acpi cgroup cpupower hv firewire lguest \ 81 + perf selftests turbostat usb \ 82 + virtio vm net x86_energy_perf_policy \ 83 + tmon freefall 84 + 84 85 acpi_install: 85 86 $(call descend,power/$(@:_install=),install) 86 87 ··· 110 101 install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ 111 102 perf_install selftests_install turbostat_install usb_install \ 112 103 virtio_install vm_install net_install x86_energy_perf_policy_install \ 113 - tmon freefall_install 104 + tmon_install freefall_install 114 105 115 106 acpi_clean: 116 107 $(call descend,power/acpi,clean)
+1
tools/perf/builtin-inject.c
··· 675 675 .fork = perf_event__repipe, 676 676 .exit = perf_event__repipe, 677 677 .lost = perf_event__repipe, 678 + .lost_samples = perf_event__repipe, 678 679 .aux = perf_event__repipe, 679 680 .itrace_start = perf_event__repipe, 680 681 .context_switch = perf_event__repipe,
+3 -3
tools/perf/builtin-report.c
··· 44 44 struct report { 45 45 struct perf_tool tool; 46 46 struct perf_session *session; 47 - bool force, use_tui, use_gtk, use_stdio; 47 + bool use_tui, use_gtk, use_stdio; 48 48 bool hide_unresolved; 49 49 bool dont_use_callchains; 50 50 bool show_full_info; ··· 678 678 "file", "vmlinux pathname"), 679 679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 680 680 "file", "kallsyms pathname"), 681 - OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), 681 + OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 682 682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 683 683 "load module symbols - WARNING: use only with -k and LIVE kernel"), 684 684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, ··· 832 832 } 833 833 834 834 file.path = input_name; 835 - file.force = report.force; 835 + file.force = symbol_conf.force; 836 836 837 837 repeat: 838 838 session = perf_session__new(&file, false, &report.tool);
+1 -6
tools/perf/ui/browsers/hists.c
··· 1430 1430 1431 1431 struct popup_action { 1432 1432 struct thread *thread; 1433 - struct dso *dso; 1434 1433 struct map_symbol ms; 1435 1434 int socket; 1436 1435 ··· 1564 1565 return 0; 1565 1566 1566 1567 act->ms.map = map; 1567 - act->dso = map->dso; 1568 1568 act->fn = do_zoom_dso; 1569 1569 return 1; 1570 1570 } ··· 1825 1827 1826 1828 while (1) { 1827 1829 struct thread *thread = NULL; 1828 - struct dso *dso = NULL; 1829 1830 struct map *map = NULL; 1830 1831 int choice = 0; 1831 1832 int socked_id = -1; ··· 1836 1839 if (browser->he_selection != NULL) { 1837 1840 thread = hist_browser__selected_thread(browser); 1838 1841 map = browser->selection->map; 1839 - if (map) 1840 - dso = map->dso; 1841 1842 socked_id = browser->he_selection->socket; 1842 1843 } 1843 1844 switch (key) { ··· 1869 1874 hist_browser__dump(browser); 1870 1875 continue; 1871 1876 case 'd': 1872 - actions->dso = dso; 1877 + actions->ms.map = map; 1873 1878 do_zoom_dso(browser, actions); 1874 1879 continue; 1875 1880 case 'V':
+1
tools/perf/util/build-id.c
··· 76 76 .exit = perf_event__exit_del_thread, 77 77 .attr = perf_event__process_attr, 78 78 .build_id = perf_event__process_build_id, 79 + .ordered_events = true, 79 80 }; 80 81 81 82 int build_id__sprintf(const u8 *build_id, int len, char *bf)
+17
tools/perf/util/dso.c
··· 933 933 /* Add new node and rebalance tree */ 934 934 rb_link_node(&dso->rb_node, parent, p); 935 935 rb_insert_color(&dso->rb_node, root); 936 + dso->root = root; 936 937 } 937 938 return NULL; 938 939 } ··· 946 945 947 946 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 948 947 { 948 + struct rb_root *root = dso->root; 949 + 949 950 if (name == NULL) 950 951 return; 951 952 952 953 if (dso->long_name_allocated) 953 954 free((char *)dso->long_name); 954 955 956 + if (root) { 957 + rb_erase(&dso->rb_node, root); 958 + /* 959 + * __dso__findlink_by_longname() isn't guaranteed to add it 960 + * back, so a clean removal is required here. 961 + */ 962 + RB_CLEAR_NODE(&dso->rb_node); 963 + dso->root = NULL; 964 + } 965 + 955 966 dso->long_name = name; 956 967 dso->long_name_len = strlen(name); 957 968 dso->long_name_allocated = name_allocated; 969 + 970 + if (root) 971 + __dso__findlink_by_longname(root, dso, NULL); 958 972 } 959 973 960 974 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) ··· 1062 1046 dso->kernel = DSO_TYPE_USER; 1063 1047 dso->needs_swap = DSO_SWAP__UNSET; 1064 1048 RB_CLEAR_NODE(&dso->rb_node); 1049 + dso->root = NULL; 1065 1050 INIT_LIST_HEAD(&dso->node); 1066 1051 INIT_LIST_HEAD(&dso->data.open_entry); 1067 1052 pthread_mutex_init(&dso->lock, NULL);
+1
tools/perf/util/dso.h
··· 135 135 pthread_mutex_t lock; 136 136 struct list_head node; 137 137 struct rb_node rb_node; /* rbtree node sorted by long name */ 138 + struct rb_root *root; /* root of rbtree that rb_node is in */ 138 139 struct rb_root symbols[MAP__NR_TYPES]; 139 140 struct rb_root symbol_names[MAP__NR_TYPES]; 140 141 struct {
+1
tools/perf/util/machine.c
··· 91 91 92 92 list_for_each_entry_safe(pos, n, &dsos->head, node) { 93 93 RB_CLEAR_NODE(&pos->rb_node); 94 + pos->root = NULL; 94 95 list_del_init(&pos->node); 95 96 dso__put(pos); 96 97 }
+17 -7
tools/perf/util/probe-finder.c
··· 1183 1183 container_of(pf, struct trace_event_finder, pf); 1184 1184 struct perf_probe_point *pp = &pf->pev->point; 1185 1185 struct probe_trace_event *tev; 1186 - struct perf_probe_arg *args; 1186 + struct perf_probe_arg *args = NULL; 1187 1187 int ret, i; 1188 1188 1189 1189 /* Check number of tevs */ ··· 1198 1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, 1199 1199 pp->retprobe, pp->function, &tev->point); 1200 1200 if (ret < 0) 1201 - return ret; 1201 + goto end; 1202 1202 1203 1203 tev->point.realname = strdup(dwarf_diename(sc_die)); 1204 - if (!tev->point.realname) 1205 - return -ENOMEM; 1204 + if (!tev->point.realname) { 1205 + ret = -ENOMEM; 1206 + goto end; 1207 + } 1206 1208 1207 1209 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 1208 1210 tev->point.offset); 1209 1211 1210 1212 /* Expand special probe argument if exist */ 1211 1213 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); 1212 - if (args == NULL) 1213 - return -ENOMEM; 1214 + if (args == NULL) { 1215 + ret = -ENOMEM; 1216 + goto end; 1217 + } 1214 1218 1215 1219 ret = expand_probe_args(sc_die, pf, args); 1216 1220 if (ret < 0) ··· 1238 1234 } 1239 1235 1240 1236 end: 1237 + if (ret) { 1238 + clear_probe_trace_event(tev); 1239 + tf->ntevs--; 1240 + } 1241 1241 free(args); 1242 1242 return ret; 1243 1243 } ··· 1254 1246 struct trace_event_finder tf = { 1255 1247 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1256 1248 .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; 1257 - int ret; 1249 + int ret, i; 1258 1250 1259 1251 /* Allocate result tevs array */ 1260 1252 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); ··· 1266 1258 1267 1259 ret = debuginfo__find_probes(dbg, &tf.pf); 1268 1260 if (ret < 0) { 1261 + for (i = 0; i < tf.ntevs; i++) 1262 + clear_probe_trace_event(&tf.tevs[i]); 1269 1263 zfree(tevs); 1270 1264 return ret; 1271 1265 }
+16 -18
tools/perf/util/symbol.c
··· 654 654 struct map_groups *kmaps = map__kmaps(map); 655 655 struct map *curr_map; 656 656 struct symbol *pos; 657 - int count = 0, moved = 0; 657 + int count = 0; 658 + struct rb_root old_root = dso->symbols[map->type]; 658 659 struct rb_root *root = &dso->symbols[map->type]; 659 660 struct rb_node *next = rb_first(root); 660 661 661 662 if (!kmaps) 662 663 return -1; 663 664 665 + *root = RB_ROOT; 666 + 664 667 while (next) { 665 668 char *module; 666 669 667 670 pos = rb_entry(next, struct symbol, rb_node); 668 671 next = rb_next(&pos->rb_node); 672 + 673 + rb_erase_init(&pos->rb_node, &old_root); 669 674 670 675 module = strchr(pos->name, '\t'); 671 676 if (module) ··· 679 674 curr_map = map_groups__find(kmaps, map->type, pos->start); 680 675 681 676 if (!curr_map || (filter && filter(curr_map, pos))) { 682 - rb_erase_init(&pos->rb_node, root); 683 677 symbol__delete(pos); 684 - } else { 685 - pos->start -= curr_map->start - curr_map->pgoff; 686 - if (pos->end) 687 - pos->end -= curr_map->start - curr_map->pgoff; 688 - if (curr_map->dso != map->dso) { 689 - rb_erase_init(&pos->rb_node, root); 690 - symbols__insert( 691 - &curr_map->dso->symbols[curr_map->type], 692 - pos); 693 - ++moved; 694 - } else { 695 - ++count; 696 - } 678 + continue; 697 679 } 680 + 681 + pos->start -= curr_map->start - curr_map->pgoff; 682 + if (pos->end) 683 + pos->end -= curr_map->start - curr_map->pgoff; 684 + symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 685 + ++count; 698 686 } 699 687 700 688 /* Symbols have been adjusted */ 701 689 dso->adjust_symbols = 1; 702 690 703 - return count + moved; 691 + return count; 704 692 } 705 693 706 694 /* ··· 1436 1438 if (lstat(dso->name, &st) < 0) 1437 1439 goto out; 1438 1440 1439 - if (st.st_uid && (st.st_uid != geteuid())) { 1441 + if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { 1440 1442 pr_warning("File %s not owned by current user or root, " 1441 - "ignoring it.\n", dso->name); 1443 + "ignoring it (use -f to override).\n", dso->name); 1442 1444 goto out; 1443 1445 } 1444 1446
+1
tools/perf/util/symbol.h
··· 84 84 unsigned short priv_size; 85 85 unsigned short nr_events; 86 86 bool try_vmlinux_path, 87 + force, 87 88 ignore_vmlinux, 88 89 ignore_vmlinux_buildid, 89 90 show_kernel_path,
+4 -4
tools/power/x86/turbostat/turbostat.c
··· 1173 1173 unsigned long long msr; 1174 1174 unsigned int ratio; 1175 1175 1176 - get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1176 + get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 1177 1177 1178 - fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1178 + fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1179 1179 1180 1180 ratio = (msr >> 40) & 0xFF; 1181 1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", ··· 1807 1807 * 1808 1808 * MSR_SMI_COUNT 0x00000034 1809 1809 * 1810 - * MSR_NHM_PLATFORM_INFO 0x000000ce 1810 + * MSR_PLATFORM_INFO 0x000000ce 1811 1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 1812 1812 * 1813 1813 * MSR_PKG_C3_RESIDENCY 0x000003f8 ··· 1876 1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1877 1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1878 1878 1879 - get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1879 + get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); 1880 1880 base_ratio = (msr >> 8) & 0xFF; 1881 1881 1882 1882 base_hz = base_ratio * bclk * 1000000;
+1 -1
tools/testing/selftests/futex/README
··· 27 27 o Where possible, any helper functions or other package-wide code shall be 28 28 implemented in header files, avoiding the need to compile intermediate object 29 29 files. 30 - o External dependendencies shall remain as minimal as possible. Currently gcc 30 + o External dependencies shall remain as minimal as possible. Currently gcc 31 31 and glibc are the only dependencies. 32 32 o Tests return 0 for success and < 0 for failure. 33 33
+7 -4
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 492 492 pid_t parent = getppid(); 493 493 int fd; 494 494 void *map1, *map2; 495 + int page_size = sysconf(_SC_PAGESIZE); 496 + 497 + ASSERT_LT(0, page_size); 495 498 496 499 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 497 500 ASSERT_EQ(0, ret); ··· 507 504 508 505 EXPECT_EQ(parent, syscall(__NR_getppid)); 509 506 map1 = (void *)syscall(sysno, 510 - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); 507 + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); 511 508 EXPECT_NE(MAP_FAILED, map1); 512 509 /* mmap2() should never return. */ 513 510 map2 = (void *)syscall(sysno, 514 - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 511 + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 515 512 EXPECT_EQ(MAP_FAILED, map2); 516 513 517 514 /* The test failed, so clean up the resources. */ 518 - munmap(map1, PAGE_SIZE); 519 - munmap(map2, PAGE_SIZE); 515 + munmap(map1, page_size); 516 + munmap(map2, page_size); 520 517 close(fd); 521 518 } 522 519
+1
tools/vm/page-types.c
··· 128 128 [KPF_THP] = "t:thp", 129 129 [KPF_BALLOON] = "o:balloon", 130 130 [KPF_ZERO_PAGE] = "z:zero_page", 131 + [KPF_IDLE] = "i:idle_page", 131 132 132 133 [KPF_RESERVED] = "r:reserved", 133 134 [KPF_MLOCKED] = "m:mlocked",
+17 -11
virt/kvm/arm/arch_timer.c
··· 221 221 kvm_timer_update_state(vcpu); 222 222 223 223 /* 224 - * If we enter the guest with the virtual input level to the VGIC 225 - * asserted, then we have already told the VGIC what we need to, and 226 - * we don't need to exit from the guest until the guest deactivates 227 - * the already injected interrupt, so therefore we should set the 228 - * hardware active state to prevent unnecessary exits from the guest. 229 - * 230 - * Conversely, if the virtual input level is deasserted, then always 231 - * clear the hardware active state to ensure that hardware interrupts 232 - * from the timer triggers a guest exit. 233 - */ 234 - if (timer->irq.level) 224 + * If we enter the guest with the virtual input level to the VGIC 225 + * asserted, then we have already told the VGIC what we need to, and 226 + * we don't need to exit from the guest until the guest deactivates 227 + * the already injected interrupt, so therefore we should set the 228 + * hardware active state to prevent unnecessary exits from the guest. 229 + * 230 + * Also, if we enter the guest with the virtual timer interrupt active, 231 + * then it must be active on the physical distributor, because we set 232 + * the HW bit and the guest must be able to deactivate the virtual and 233 + * physical interrupt at the same time. 234 + * 235 + * Conversely, if the virtual input level is deasserted and the virtual 236 + * interrupt is not active, then always clear the hardware active state 237 + * to ensure that hardware interrupts from the timer triggers a guest 238 + * exit. 239 + */ 240 + if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) 235 241 phys_active = true; 236 242 else 237 243 phys_active = false;
+24 -26
virt/kvm/arm/vgic.c
··· 1096 1096 vgic_set_lr(vcpu, lr_nr, vlr); 1097 1097 } 1098 1098 1099 + static bool dist_active_irq(struct kvm_vcpu *vcpu) 1100 + { 1101 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1102 + 1103 + return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); 1104 + } 1105 + 1106 + bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) 1107 + { 1108 + int i; 1109 + 1110 + for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { 1111 + struct vgic_lr vlr = vgic_get_lr(vcpu, i); 1112 + 1113 + if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) 1114 + return true; 1115 + } 1116 + 1117 + return dist_active_irq(vcpu); 1118 + } 1119 + 1099 1120 /* 1100 1121 * An interrupt may have been disabled after being made pending on the 1101 1122 * CPU interface (the classic case is a timer running while we're ··· 1269 1248 * may have been serviced from another vcpu. In all cases, 1270 1249 * move along. 1271 1250 */ 1272 - if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) 1251 + if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) 1273 1252 goto epilog; 1274 1253 1275 1254 /* SGIs */ ··· 1417 1396 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1418 1397 { 1419 1398 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1420 - struct irq_phys_map *map; 1421 - bool phys_active; 1422 1399 bool level_pending; 1423 - int ret; 1424 1400 1425 1401 if (!(vlr.state & LR_HW)) 1426 1402 return false; 1427 1403 1428 - map = vgic_irq_map_search(vcpu, vlr.irq); 1429 - BUG_ON(!map); 1430 - 1431 - ret = irq_get_irqchip_state(map->irq, 1432 - IRQCHIP_STATE_ACTIVE, 1433 - &phys_active); 1434 - 1435 - WARN_ON(ret); 1436 - 1437 - if (phys_active) 1438 - return 0; 1404 + if (vlr.state & LR_STATE_ACTIVE) 1405 + return false; 1439 1406 1440 1407 spin_lock(&dist->lock); 1441 1408 level_pending = process_queued_irq(vcpu, lr, vlr); ··· 1487 1478 1488 1479 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1489 1480 } 1490 - 1491 - int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) 1492 - { 1493 - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1494 - 1495 - if (!irqchip_in_kernel(vcpu->kvm)) 1496 - return 0; 1497 - 1498 - return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); 1499 - } 1500 - 1501 1481 1502 1482 void vgic_kick_vcpus(struct kvm *kvm) 1503 1483 {