Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.11-rc5 into tty-next

We want the serial fixes in here as well to handle merge issues.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+3831 -1289
+6
Documentation/admin-guide/kernel-parameters.txt
··· 1725 1725 kernel and module base offset ASLR (Address Space 1726 1726 Layout Randomization). 1727 1727 1728 + kasan_multi_shot 1729 + [KNL] Enforce KASAN (Kernel Address Sanitizer) to print 1730 + report on every invalid memory access. Without this 1731 + parameter KASAN will print report only for the first 1732 + invalid access. 1733 + 1728 1734 keepinitrd [HW,ARM] 1729 1735 1730 1736 kernelcore= [KNL,X86,IA-64,PPC]
+2 -1
Documentation/devicetree/bindings/rng/omap_rng.txt
··· 12 12 - reg : Offset and length of the register set for the module 13 13 - interrupts : the interrupt number for the RNG module. 14 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 15 - - clocks: the trng clock source 15 + - clocks: the trng clock source. Only mandatory for the 16 + "inside-secure,safexcel-eip76" compatible. 16 17 17 18 Example: 18 19 /* AM335x */
+63
Documentation/virtual/kvm/api.txt
··· 3377 3377 __u32 pad; 3378 3378 }; 3379 3379 3380 + 4.104 KVM_X86_GET_MCE_CAP_SUPPORTED 3381 + 3382 + Capability: KVM_CAP_MCE 3383 + Architectures: x86 3384 + Type: system ioctl 3385 + Parameters: u64 mce_cap (out) 3386 + Returns: 0 on success, -1 on error 3387 + 3388 + Returns supported MCE capabilities. The u64 mce_cap parameter 3389 + has the same format as the MSR_IA32_MCG_CAP register. Supported 3390 + capabilities will have the corresponding bits set. 3391 + 3392 + 4.105 KVM_X86_SETUP_MCE 3393 + 3394 + Capability: KVM_CAP_MCE 3395 + Architectures: x86 3396 + Type: vcpu ioctl 3397 + Parameters: u64 mcg_cap (in) 3398 + Returns: 0 on success, 3399 + -EFAULT if u64 mcg_cap cannot be read, 3400 + -EINVAL if the requested number of banks is invalid, 3401 + -EINVAL if requested MCE capability is not supported. 3402 + 3403 + Initializes MCE support for use. The u64 mcg_cap parameter 3404 + has the same format as the MSR_IA32_MCG_CAP register and 3405 + specifies which capabilities should be enabled. The maximum 3406 + supported number of error-reporting banks can be retrieved when 3407 + checking for KVM_CAP_MCE. The supported capabilities can be 3408 + retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED. 3409 + 3410 + 4.106 KVM_X86_SET_MCE 3411 + 3412 + Capability: KVM_CAP_MCE 3413 + Architectures: x86 3414 + Type: vcpu ioctl 3415 + Parameters: struct kvm_x86_mce (in) 3416 + Returns: 0 on success, 3417 + -EFAULT if struct kvm_x86_mce cannot be read, 3418 + -EINVAL if the bank number is invalid, 3419 + -EINVAL if VAL bit is not set in status field. 3420 + 3421 + Inject a machine check error (MCE) into the guest. The input 3422 + parameter is: 3423 + 3424 + struct kvm_x86_mce { 3425 + __u64 status; 3426 + __u64 addr; 3427 + __u64 misc; 3428 + __u64 mcg_status; 3429 + __u8 bank; 3430 + __u8 pad1[7]; 3431 + __u64 pad2[3]; 3432 + }; 3433 + 3434 + If the MCE being reported is an uncorrected error, KVM will 3435 + inject it as an MCE exception into the guest. If the guest 3436 + MCG_STATUS register reports that an MCE is in progress, KVM 3437 + causes an KVM_EXIT_SHUTDOWN vmexit. 3438 + 3439 + Otherwise, if the MCE is a corrected error, KVM will just 3440 + store it in the corresponding bank (provided this bank is 3441 + not holding a previously reported uncorrected error). 3442 + 3380 3443 5. The kvm_run structure 3381 3444 ------------------------ 3382 3445
+6
MAINTAINERS
··· 4775 4775 S: Maintained 4776 4776 F: drivers/edac/mpc85xx_edac.[ch] 4777 4777 4778 + EDAC-PND2 4779 + M: Tony Luck <tony.luck@intel.com> 4780 + L: linux-edac@vger.kernel.org 4781 + S: Maintained 4782 + F: drivers/edac/pnd2_edac.[ch] 4783 + 4778 4784 EDAC-PASEMI 4779 4785 M: Egor Martovetsky <egor@pasemi.com> 4780 4786 L: linux-edac@vger.kernel.org
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 11 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc5 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+1
arch/arc/boot/dts/skeleton.dtsi
··· 26 26 device_type = "cpu"; 27 27 compatible = "snps,arc770d"; 28 28 reg = <0>; 29 + clocks = <&core_clk>; 29 30 }; 30 31 }; 31 32
+1
arch/arc/boot/dts/skeleton_hs.dtsi
··· 21 21 device_type = "cpu"; 22 22 compatible = "snps,archs38"; 23 23 reg = <0>; 24 + clocks = <&core_clk>; 24 25 }; 25 26 }; 26 27
+20 -1
arch/arc/boot/dts/skeleton_hs_idu.dtsi
··· 19 19 20 20 cpu@0 { 21 21 device_type = "cpu"; 22 - compatible = "snps,archs38xN"; 22 + compatible = "snps,archs38"; 23 23 reg = <0>; 24 + clocks = <&core_clk>; 25 + }; 26 + cpu@1 { 27 + device_type = "cpu"; 28 + compatible = "snps,archs38"; 29 + reg = <1>; 30 + clocks = <&core_clk>; 31 + }; 32 + cpu@2 { 33 + device_type = "cpu"; 34 + compatible = "snps,archs38"; 35 + reg = <2>; 36 + clocks = <&core_clk>; 37 + }; 38 + cpu@3 { 39 + device_type = "cpu"; 40 + compatible = "snps,archs38"; 41 + reg = <3>; 42 + clocks = <&core_clk>; 24 43 }; 25 44 }; 26 45
+13 -7
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
··· 112 112 interrupts = <7>; 113 113 bus-width = <4>; 114 114 }; 115 + }; 115 116 116 - /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ 117 - uio_ev: uio@0xD0000000 { 118 - compatible = "generic-uio"; 119 - reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 120 - reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 121 - interrupts = <23>; 122 - }; 117 + /* 118 + * Embedded Vision subsystem UIO mappings; only relevant for EV VDK 119 + * 120 + * This node is intentionally put outside of MB above becase 121 + * it maps areas outside of MB's 0xEz-0xFz. 122 + */ 123 + uio_ev: uio@0xD0000000 { 124 + compatible = "generic-uio"; 125 + reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 126 + reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 127 + interrupt-parent = <&mb_intc>; 128 + interrupts = <23>; 123 129 }; 124 130 };
+1 -3
arch/arc/include/asm/kprobes.h
··· 54 54 void kretprobe_trampoline(void); 55 55 void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 56 56 #else 57 - static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 58 - { 59 - } 57 + #define trap_is_kprobe(address, regs) 60 58 #endif /* CONFIG_KPROBES */ 61 59 62 60 #endif /* _ARC_KPROBES_H */
+9 -3
arch/arc/kernel/entry-arcv2.S
··· 100 100 ;################### Non TLB Exception Handling ############################# 101 101 102 102 ENTRY(EV_SWI) 103 - flag 1 103 + ; TODO: implement this 104 + EXCEPTION_PROLOGUE 105 + b ret_from_exception 104 106 END(EV_SWI) 105 107 106 108 ENTRY(EV_DivZero) 107 - flag 1 109 + ; TODO: implement this 110 + EXCEPTION_PROLOGUE 111 + b ret_from_exception 108 112 END(EV_DivZero) 109 113 110 114 ENTRY(EV_DCError) 111 - flag 1 115 + ; TODO: implement this 116 + EXCEPTION_PROLOGUE 117 + b ret_from_exception 112 118 END(EV_DCError) 113 119 114 120 ; ---------------------------------------------
+12 -4
arch/arc/kernel/setup.c
··· 10 10 #include <linux/fs.h> 11 11 #include <linux/delay.h> 12 12 #include <linux/root_dev.h> 13 + #include <linux/clk.h> 13 14 #include <linux/clk-provider.h> 14 15 #include <linux/clocksource.h> 15 16 #include <linux/console.h> ··· 489 488 { 490 489 char *str; 491 490 int cpu_id = ptr_to_cpu(v); 492 - struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); 493 - u32 freq = 0; 491 + struct device *cpu_dev = get_cpu_device(cpu_id); 492 + struct clk *cpu_clk; 493 + unsigned long freq = 0; 494 494 495 495 if (!cpu_online(cpu_id)) { 496 496 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); ··· 504 502 505 503 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 506 504 507 - of_property_read_u32(core_clk, "clock-frequency", &freq); 505 + cpu_clk = clk_get(cpu_dev, NULL); 506 + if (IS_ERR(cpu_clk)) { 507 + seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n", 508 + cpu_id); 509 + } else { 510 + freq = clk_get_rate(cpu_clk); 511 + } 508 512 if (freq) 509 - seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", 513 + seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n", 510 514 freq / 1000000, (freq / 10000) % 100); 511 515 512 516 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
+3
arch/arc/mm/cache.c
··· 633 633 634 634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 635 635 636 + /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ 637 + read_aux_reg(r); 638 + 636 639 /* Important to wait for flush to complete */ 637 640 while (read_aux_reg(r) & SLC_CTRL_BUSY); 638 641 }
+2 -2
arch/arm/boot/dts/sun8i-a33.dtsi
··· 113 113 simple-audio-card,mclk-fs = <512>; 114 114 simple-audio-card,aux-devs = <&codec_analog>; 115 115 simple-audio-card,routing = 116 - "Left DAC", "Digital Left DAC", 117 - "Right DAC", "Digital Right DAC"; 116 + "Left DAC", "AIF1 Slot 0 Left", 117 + "Right DAC", "AIF1 Slot 0 Right"; 118 118 status = "disabled"; 119 119 120 120 simple-audio-card,cpu {
-2
arch/arm64/include/asm/current.h
··· 3 3 4 4 #include <linux/compiler.h> 5 5 6 - #include <asm/sysreg.h> 7 - 8 6 #ifndef __ASSEMBLY__ 9 7 10 8 struct task_struct;
+1 -1
arch/arm64/kernel/smp.c
··· 944 944 #ifdef CONFIG_HOTPLUG_CPU 945 945 int any_cpu = raw_smp_processor_id(); 946 946 947 - if (cpu_ops[any_cpu]->cpu_die) 947 + if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) 948 948 return true; 949 949 #endif 950 950 return false;
-1
arch/arm64/kernel/vdso/.gitignore
··· 1 1 vdso.lds 2 - vdso-offsets.h
-41
arch/c6x/kernel/ptrace.c
··· 70 70 0, sizeof(*regs)); 71 71 } 72 72 73 - static int gpr_set(struct task_struct *target, 74 - const struct user_regset *regset, 75 - unsigned int pos, unsigned int count, 76 - const void *kbuf, const void __user *ubuf) 77 - { 78 - int ret; 79 - struct pt_regs *regs = task_pt_regs(target); 80 - 81 - /* Don't copyin TSR or CSR */ 82 - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 83 - &regs, 84 - 0, PT_TSR * sizeof(long)); 85 - if (ret) 86 - return ret; 87 - 88 - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 89 - PT_TSR * sizeof(long), 90 - (PT_TSR + 1) * sizeof(long)); 91 - if (ret) 92 - return ret; 93 - 94 - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 95 - &regs, 96 - (PT_TSR + 1) * sizeof(long), 97 - PT_CSR * sizeof(long)); 98 - if (ret) 99 - return ret; 100 - 101 - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 102 - PT_CSR * sizeof(long), 103 - (PT_CSR + 1) * sizeof(long)); 104 - if (ret) 105 - return ret; 106 - 107 - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 108 - &regs, 109 - (PT_CSR + 1) * sizeof(long), -1); 110 - return ret; 111 - } 112 - 113 73 enum c6x_regset { 114 74 REGSET_GPR, 115 75 }; ··· 81 121 .size = sizeof(u32), 82 122 .align = sizeof(u32), 83 123 .get = gpr_get, 84 - .set = gpr_set 85 124 }, 86 125 }; 87 126
+5 -3
arch/h8300/kernel/ptrace.c
··· 95 95 long *reg = (long *)&regs; 96 96 97 97 /* build user regs in buffer */ 98 - for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 + BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); 99 + for (r = 0; r < sizeof(regs) / sizeof(long); r++) 99 100 *reg++ = h8300_get_reg(target, r); 100 101 101 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ··· 114 113 long *reg; 115 114 116 115 /* build user regs in buffer */ 117 - for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 116 + BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); 117 + for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++) 118 118 *reg++ = h8300_get_reg(target, r); 119 119 120 120 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ··· 124 122 return ret; 125 123 126 124 /* write back to pt_regs */ 127 - for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 125 + for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++) 128 126 h8300_put_reg(target, r, *reg++); 129 127 return 0; 130 128 }
+13 -1
arch/m68k/configs/amiga_defconfig
··· 25 25 # CONFIG_EFI_PARTITION is not set 26 26 CONFIG_SYSV68_PARTITION=y 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68020=y ··· 61 60 CONFIG_NET_FOU_IP_TUNNELS=y 62 61 CONFIG_INET_AH=m 63 62 CONFIG_INET_ESP=m 63 + CONFIG_INET_ESP_OFFLOAD=m 64 64 CONFIG_INET_IPCOMP=m 65 65 CONFIG_INET_XFRM_MODE_TRANSPORT=m 66 66 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 73 71 CONFIG_IPV6_ROUTER_PREF=y 74 72 CONFIG_INET6_AH=m 75 73 CONFIG_INET6_ESP=m 74 + CONFIG_INET6_ESP_OFFLOAD=m 76 75 CONFIG_INET6_IPCOMP=m 77 76 CONFIG_IPV6_ILA=m 78 77 CONFIG_IPV6_VTI=m ··· 104 101 CONFIG_NFT_CT=m 105 102 CONFIG_NFT_SET_RBTREE=m 106 103 CONFIG_NFT_SET_HASH=m 104 + CONFIG_NFT_SET_BITMAP=m 107 105 CONFIG_NFT_COUNTER=m 108 106 CONFIG_NFT_LOG=m 109 107 CONFIG_NFT_LIMIT=m ··· 302 298 CONFIG_NET_L3_MASTER_DEV=y 303 299 CONFIG_AF_KCM=m 304 300 # CONFIG_WIRELESS is not set 301 + CONFIG_PSAMPLE=m 302 + CONFIG_NET_IFE=m 305 303 CONFIG_NET_DEVLINK=m 306 304 # CONFIG_UEVENT_HELPER is not set 307 305 CONFIG_DEVTMPFS=y ··· 377 371 CONFIG_MACVLAN=m 378 372 CONFIG_MACVTAP=m 379 373 CONFIG_IPVLAN=m 374 + CONFIG_IPVTAP=m 380 375 CONFIG_VXLAN=m 381 376 CONFIG_GENEVE=m 382 377 CONFIG_GTP=m ··· 390 383 # CONFIG_NET_VENDOR_AMAZON is not set 391 384 CONFIG_A2065=y 392 385 CONFIG_ARIADNE=y 386 + # CONFIG_NET_VENDOR_AQUANTIA is not set 393 387 # CONFIG_NET_VENDOR_ARC is not set 394 388 # CONFIG_NET_CADENCE is not set 395 389 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 412 404 # CONFIG_NET_VENDOR_SOLARFLARE is not set 413 405 # CONFIG_NET_VENDOR_SMSC is not set 414 406 # CONFIG_NET_VENDOR_STMICRO is not set 415 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 416 407 # CONFIG_NET_VENDOR_VIA is not set 417 408 # CONFIG_NET_VENDOR_WIZNET is not set 418 409 CONFIG_PPP=m ··· 571 564 CONFIG_DLM=m 572 565 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 573 566 CONFIG_MAGIC_SYSRQ=y 567 + CONFIG_WW_MUTEX_SELFTEST=m 568 + CONFIG_ATOMIC64_SELFTEST=m 574 569 CONFIG_ASYNC_RAID6_TEST=m 575 570 CONFIG_TEST_HEXDUMP=m 576 571 CONFIG_TEST_STRING_HELPERS=m ··· 603 594 CONFIG_CRYPTO_LRW=m 604 595 CONFIG_CRYPTO_PCBC=m 605 596 CONFIG_CRYPTO_KEYWRAP=m 597 + CONFIG_CRYPTO_CMAC=m 606 598 CONFIG_CRYPTO_XCBC=m 607 599 CONFIG_CRYPTO_VMAC=m 608 600 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 615 605 CONFIG_CRYPTO_SHA3=m 616 606 CONFIG_CRYPTO_TGR192=m 617 607 CONFIG_CRYPTO_WP512=m 608 + CONFIG_CRYPTO_AES_TI=m 618 609 CONFIG_CRYPTO_ANUBIS=m 619 610 CONFIG_CRYPTO_BLOWFISH=m 620 611 CONFIG_CRYPTO_CAMELLIA=m ··· 640 629 CONFIG_CRYPTO_USER_API_RNG=m 641 630 CONFIG_CRYPTO_USER_API_AEAD=m 642 631 # CONFIG_CRYPTO_HW is not set 632 + CONFIG_CRC32_SELFTEST=m 643 633 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/apollo_defconfig
··· 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_SYSV68_PARTITION=y 28 28 CONFIG_IOSCHED_DEADLINE=m 29 + CONFIG_MQ_IOSCHED_DEADLINE=m 29 30 CONFIG_KEXEC=y 30 31 CONFIG_BOOTINFO_PROC=y 31 32 CONFIG_M68020=y ··· 59 58 CONFIG_NET_FOU_IP_TUNNELS=y 60 59 CONFIG_INET_AH=m 61 60 CONFIG_INET_ESP=m 61 + CONFIG_INET_ESP_OFFLOAD=m 62 62 CONFIG_INET_IPCOMP=m 63 63 CONFIG_INET_XFRM_MODE_TRANSPORT=m 64 64 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 71 69 CONFIG_IPV6_ROUTER_PREF=y 72 70 CONFIG_INET6_AH=m 73 71 CONFIG_INET6_ESP=m 72 + CONFIG_INET6_ESP_OFFLOAD=m 74 73 CONFIG_INET6_IPCOMP=m 75 74 CONFIG_IPV6_ILA=m 76 75 CONFIG_IPV6_VTI=m ··· 102 99 CONFIG_NFT_CT=m 103 100 CONFIG_NFT_SET_RBTREE=m 104 101 CONFIG_NFT_SET_HASH=m 102 + CONFIG_NFT_SET_BITMAP=m 105 103 CONFIG_NFT_COUNTER=m 106 104 CONFIG_NFT_LOG=m 107 105 CONFIG_NFT_LIMIT=m ··· 300 296 CONFIG_NET_L3_MASTER_DEV=y 301 297 CONFIG_AF_KCM=m 302 298 # CONFIG_WIRELESS is not set 299 + CONFIG_PSAMPLE=m 300 + CONFIG_NET_IFE=m 303 301 CONFIG_NET_DEVLINK=m 304 302 # CONFIG_UEVENT_HELPER is not set 305 303 CONFIG_DEVTMPFS=y ··· 359 353 CONFIG_MACVLAN=m 360 354 CONFIG_MACVTAP=m 361 355 CONFIG_IPVLAN=m 356 + CONFIG_IPVTAP=m 362 357 CONFIG_VXLAN=m 363 358 CONFIG_GENEVE=m 364 359 CONFIG_GTP=m ··· 369 362 CONFIG_VETH=m 370 363 # CONFIG_NET_VENDOR_ALACRITECH is not set 371 364 # CONFIG_NET_VENDOR_AMAZON is not set 365 + # CONFIG_NET_VENDOR_AQUANTIA is not set 372 366 # CONFIG_NET_VENDOR_ARC is not set 373 367 # CONFIG_NET_CADENCE is not set 374 368 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 386 378 # CONFIG_NET_VENDOR_SEEQ is not set 387 379 # CONFIG_NET_VENDOR_SOLARFLARE is not set 388 380 # CONFIG_NET_VENDOR_STMICRO is not set 389 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 390 381 # CONFIG_NET_VENDOR_VIA is not set 391 382 # CONFIG_NET_VENDOR_WIZNET is not set 392 383 CONFIG_PPP=m ··· 530 523 CONFIG_DLM=m 531 524 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 532 525 CONFIG_MAGIC_SYSRQ=y 526 + CONFIG_WW_MUTEX_SELFTEST=m 527 + CONFIG_ATOMIC64_SELFTEST=m 533 528 CONFIG_ASYNC_RAID6_TEST=m 534 529 CONFIG_TEST_HEXDUMP=m 535 530 CONFIG_TEST_STRING_HELPERS=m ··· 562 553 CONFIG_CRYPTO_LRW=m 563 554 CONFIG_CRYPTO_PCBC=m 564 555 CONFIG_CRYPTO_KEYWRAP=m 556 + CONFIG_CRYPTO_CMAC=m 565 557 CONFIG_CRYPTO_XCBC=m 566 558 CONFIG_CRYPTO_VMAC=m 567 559 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 574 564 CONFIG_CRYPTO_SHA3=m 575 565 CONFIG_CRYPTO_TGR192=m 576 566 CONFIG_CRYPTO_WP512=m 567 + CONFIG_CRYPTO_AES_TI=m 577 568 CONFIG_CRYPTO_ANUBIS=m 578 569 CONFIG_CRYPTO_BLOWFISH=m 579 570 CONFIG_CRYPTO_CAMELLIA=m ··· 599 588 CONFIG_CRYPTO_USER_API_RNG=m 600 589 CONFIG_CRYPTO_USER_API_AEAD=m 601 590 # CONFIG_CRYPTO_HW is not set 591 + CONFIG_CRC32_SELFTEST=m 602 592 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/atari_defconfig
··· 25 25 # CONFIG_EFI_PARTITION is not set 26 26 CONFIG_SYSV68_PARTITION=y 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68020=y ··· 59 58 CONFIG_NET_FOU_IP_TUNNELS=y 60 59 CONFIG_INET_AH=m 61 60 CONFIG_INET_ESP=m 61 + CONFIG_INET_ESP_OFFLOAD=m 62 62 CONFIG_INET_IPCOMP=m 63 63 CONFIG_INET_XFRM_MODE_TRANSPORT=m 64 64 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 71 69 CONFIG_IPV6_ROUTER_PREF=y 72 70 CONFIG_INET6_AH=m 73 71 CONFIG_INET6_ESP=m 72 + CONFIG_INET6_ESP_OFFLOAD=m 74 73 CONFIG_INET6_IPCOMP=m 75 74 CONFIG_IPV6_ILA=m 76 75 CONFIG_IPV6_VTI=m ··· 102 99 CONFIG_NFT_CT=m 103 100 CONFIG_NFT_SET_RBTREE=m 104 101 CONFIG_NFT_SET_HASH=m 102 + CONFIG_NFT_SET_BITMAP=m 105 103 CONFIG_NFT_COUNTER=m 106 104 CONFIG_NFT_LOG=m 107 105 CONFIG_NFT_LIMIT=m ··· 300 296 CONFIG_NET_L3_MASTER_DEV=y 301 297 CONFIG_AF_KCM=m 302 298 # CONFIG_WIRELESS is not set 299 + CONFIG_PSAMPLE=m 300 + CONFIG_NET_IFE=m 303 301 CONFIG_NET_DEVLINK=m 304 302 # CONFIG_UEVENT_HELPER is not set 305 303 CONFIG_DEVTMPFS=y ··· 368 362 CONFIG_MACVLAN=m 369 363 CONFIG_MACVTAP=m 370 364 CONFIG_IPVLAN=m 365 + CONFIG_IPVTAP=m 371 366 CONFIG_VXLAN=m 372 367 CONFIG_GENEVE=m 373 368 CONFIG_GTP=m ··· 379 372 # CONFIG_NET_VENDOR_ALACRITECH is not set 380 373 # CONFIG_NET_VENDOR_AMAZON is not set 381 374 CONFIG_ATARILANCE=y 375 + # CONFIG_NET_VENDOR_AQUANTIA is not set 382 376 # CONFIG_NET_VENDOR_ARC is not set 383 377 # CONFIG_NET_CADENCE is not set 384 378 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 397 389 # CONFIG_NET_VENDOR_SOLARFLARE is not set 398 390 CONFIG_SMC91X=y 399 391 # CONFIG_NET_VENDOR_STMICRO is not set 400 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 401 392 # CONFIG_NET_VENDOR_VIA is not set 402 393 # CONFIG_NET_VENDOR_WIZNET is not set 403 394 CONFIG_PPP=m ··· 551 544 CONFIG_DLM=m 552 545 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 553 546 CONFIG_MAGIC_SYSRQ=y 547 + CONFIG_WW_MUTEX_SELFTEST=m 548 + CONFIG_ATOMIC64_SELFTEST=m 554 549 CONFIG_ASYNC_RAID6_TEST=m 555 550 CONFIG_TEST_HEXDUMP=m 556 551 CONFIG_TEST_STRING_HELPERS=m ··· 583 574 CONFIG_CRYPTO_LRW=m 584 575 CONFIG_CRYPTO_PCBC=m 585 576 CONFIG_CRYPTO_KEYWRAP=m 577 + CONFIG_CRYPTO_CMAC=m 586 578 CONFIG_CRYPTO_XCBC=m 587 579 CONFIG_CRYPTO_VMAC=m 588 580 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 595 585 CONFIG_CRYPTO_SHA3=m 596 586 CONFIG_CRYPTO_TGR192=m 597 587 CONFIG_CRYPTO_WP512=m 588 + CONFIG_CRYPTO_AES_TI=m 598 589 CONFIG_CRYPTO_ANUBIS=m 599 590 CONFIG_CRYPTO_BLOWFISH=m 600 591 CONFIG_CRYPTO_CAMELLIA=m ··· 620 609 CONFIG_CRYPTO_USER_API_RNG=m 621 610 CONFIG_CRYPTO_USER_API_AEAD=m 622 611 # CONFIG_CRYPTO_HW is not set 612 + CONFIG_CRC32_SELFTEST=m 623 613 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/bvme6000_defconfig
··· 25 25 CONFIG_SUN_PARTITION=y 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68040=y ··· 57 56 CONFIG_NET_FOU_IP_TUNNELS=y 58 57 CONFIG_INET_AH=m 59 58 CONFIG_INET_ESP=m 59 + CONFIG_INET_ESP_OFFLOAD=m 60 60 CONFIG_INET_IPCOMP=m 61 61 CONFIG_INET_XFRM_MODE_TRANSPORT=m 62 62 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 69 67 CONFIG_IPV6_ROUTER_PREF=y 70 68 CONFIG_INET6_AH=m 71 69 CONFIG_INET6_ESP=m 70 + CONFIG_INET6_ESP_OFFLOAD=m 72 71 CONFIG_INET6_IPCOMP=m 73 72 CONFIG_IPV6_ILA=m 74 73 CONFIG_IPV6_VTI=m ··· 100 97 CONFIG_NFT_CT=m 101 98 CONFIG_NFT_SET_RBTREE=m 102 99 CONFIG_NFT_SET_HASH=m 100 + CONFIG_NFT_SET_BITMAP=m 103 101 CONFIG_NFT_COUNTER=m 104 102 CONFIG_NFT_LOG=m 105 103 CONFIG_NFT_LIMIT=m ··· 298 294 CONFIG_NET_L3_MASTER_DEV=y 299 295 CONFIG_AF_KCM=m 300 296 # CONFIG_WIRELESS is not set 297 + CONFIG_PSAMPLE=m 298 + CONFIG_NET_IFE=m 301 299 CONFIG_NET_DEVLINK=m 302 300 # CONFIG_UEVENT_HELPER is not set 303 301 CONFIG_DEVTMPFS=y ··· 358 352 CONFIG_MACVLAN=m 359 353 CONFIG_MACVTAP=m 360 354 CONFIG_IPVLAN=m 355 + CONFIG_IPVTAP=m 361 356 CONFIG_VXLAN=m 362 357 CONFIG_GENEVE=m 363 358 CONFIG_GTP=m ··· 368 361 CONFIG_VETH=m 369 362 # CONFIG_NET_VENDOR_ALACRITECH is not set 370 363 # CONFIG_NET_VENDOR_AMAZON is not set 364 + # CONFIG_NET_VENDOR_AQUANTIA is not set 371 365 # CONFIG_NET_VENDOR_ARC is not set 372 366 # CONFIG_NET_CADENCE is not set 373 367 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 385 377 # CONFIG_NET_VENDOR_SEEQ is not set 386 378 # CONFIG_NET_VENDOR_SOLARFLARE is not set 387 379 # CONFIG_NET_VENDOR_STMICRO is not set 388 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 389 380 # CONFIG_NET_VENDOR_VIA is not set 390 381 # CONFIG_NET_VENDOR_WIZNET is not set 391 382 CONFIG_PPP=m ··· 522 515 CONFIG_DLM=m 523 516 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 524 517 CONFIG_MAGIC_SYSRQ=y 518 + CONFIG_WW_MUTEX_SELFTEST=m 519 + CONFIG_ATOMIC64_SELFTEST=m 525 520 CONFIG_ASYNC_RAID6_TEST=m 526 521 CONFIG_TEST_HEXDUMP=m 527 522 CONFIG_TEST_STRING_HELPERS=m ··· 554 545 CONFIG_CRYPTO_LRW=m 555 546 CONFIG_CRYPTO_PCBC=m 556 547 CONFIG_CRYPTO_KEYWRAP=m 548 + CONFIG_CRYPTO_CMAC=m 557 549 CONFIG_CRYPTO_XCBC=m 558 550 CONFIG_CRYPTO_VMAC=m 559 551 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 566 556 CONFIG_CRYPTO_SHA3=m 567 557 CONFIG_CRYPTO_TGR192=m 568 558 CONFIG_CRYPTO_WP512=m 559 + CONFIG_CRYPTO_AES_TI=m 569 560 CONFIG_CRYPTO_ANUBIS=m 570 561 CONFIG_CRYPTO_BLOWFISH=m 571 562 CONFIG_CRYPTO_CAMELLIA=m ··· 591 580 CONFIG_CRYPTO_USER_API_RNG=m 592 581 CONFIG_CRYPTO_USER_API_AEAD=m 593 582 # CONFIG_CRYPTO_HW is not set 583 + CONFIG_CRC32_SELFTEST=m 594 584 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/hp300_defconfig
··· 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_SYSV68_PARTITION=y 28 28 CONFIG_IOSCHED_DEADLINE=m 29 + CONFIG_MQ_IOSCHED_DEADLINE=m 29 30 CONFIG_KEXEC=y 30 31 CONFIG_BOOTINFO_PROC=y 31 32 CONFIG_M68020=y ··· 59 58 CONFIG_NET_FOU_IP_TUNNELS=y 60 59 CONFIG_INET_AH=m 61 60 CONFIG_INET_ESP=m 61 + CONFIG_INET_ESP_OFFLOAD=m 62 62 CONFIG_INET_IPCOMP=m 63 63 CONFIG_INET_XFRM_MODE_TRANSPORT=m 64 64 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 71 69 CONFIG_IPV6_ROUTER_PREF=y 72 70 CONFIG_INET6_AH=m 73 71 CONFIG_INET6_ESP=m 72 + CONFIG_INET6_ESP_OFFLOAD=m 74 73 CONFIG_INET6_IPCOMP=m 75 74 CONFIG_IPV6_ILA=m 76 75 CONFIG_IPV6_VTI=m ··· 102 99 CONFIG_NFT_CT=m 103 100 CONFIG_NFT_SET_RBTREE=m 104 101 CONFIG_NFT_SET_HASH=m 102 + CONFIG_NFT_SET_BITMAP=m 105 103 CONFIG_NFT_COUNTER=m 106 104 CONFIG_NFT_LOG=m 107 105 CONFIG_NFT_LIMIT=m ··· 300 296 CONFIG_NET_L3_MASTER_DEV=y 301 297 CONFIG_AF_KCM=m 302 298 # CONFIG_WIRELESS is not set 299 + CONFIG_PSAMPLE=m 300 + CONFIG_NET_IFE=m 303 301 CONFIG_NET_DEVLINK=m 304 302 # CONFIG_UEVENT_HELPER is not set 305 303 CONFIG_DEVTMPFS=y ··· 359 353 CONFIG_MACVLAN=m 360 354 CONFIG_MACVTAP=m 361 355 CONFIG_IPVLAN=m 356 + CONFIG_IPVTAP=m 362 357 CONFIG_VXLAN=m 363 358 CONFIG_GENEVE=m 364 359 CONFIG_GTP=m ··· 370 363 # CONFIG_NET_VENDOR_ALACRITECH is not set 371 364 # CONFIG_NET_VENDOR_AMAZON is not set 372 365 CONFIG_HPLANCE=y 366 + # CONFIG_NET_VENDOR_AQUANTIA is not set 373 367 # CONFIG_NET_VENDOR_ARC is not set 374 368 # CONFIG_NET_CADENCE is not set 375 369 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 387 379 # CONFIG_NET_VENDOR_SEEQ is not set 388 380 # CONFIG_NET_VENDOR_SOLARFLARE is not set 389 381 # CONFIG_NET_VENDOR_STMICRO is not set 390 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 391 382 # CONFIG_NET_VENDOR_VIA is not set 392 383 # CONFIG_NET_VENDOR_WIZNET is not set 393 384 CONFIG_PPP=m ··· 532 525 CONFIG_DLM=m 533 526 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 534 527 CONFIG_MAGIC_SYSRQ=y 528 + CONFIG_WW_MUTEX_SELFTEST=m 529 + CONFIG_ATOMIC64_SELFTEST=m 535 530 CONFIG_ASYNC_RAID6_TEST=m 536 531 CONFIG_TEST_HEXDUMP=m 537 532 CONFIG_TEST_STRING_HELPERS=m ··· 564 555 CONFIG_CRYPTO_LRW=m 565 556 CONFIG_CRYPTO_PCBC=m 566 557 CONFIG_CRYPTO_KEYWRAP=m 558 + CONFIG_CRYPTO_CMAC=m 567 559 CONFIG_CRYPTO_XCBC=m 568 560 CONFIG_CRYPTO_VMAC=m 569 561 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 576 566 CONFIG_CRYPTO_SHA3=m 577 567 CONFIG_CRYPTO_TGR192=m 578 568 CONFIG_CRYPTO_WP512=m 569 + CONFIG_CRYPTO_AES_TI=m 579 570 CONFIG_CRYPTO_ANUBIS=m 580 571 CONFIG_CRYPTO_BLOWFISH=m 581 572 CONFIG_CRYPTO_CAMELLIA=m ··· 601 590 CONFIG_CRYPTO_USER_API_RNG=m 602 591 CONFIG_CRYPTO_USER_API_AEAD=m 603 592 # CONFIG_CRYPTO_HW is not set 593 + CONFIG_CRC32_SELFTEST=m 604 594 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/mac_defconfig
··· 25 25 # CONFIG_EFI_PARTITION is not set 26 26 CONFIG_SYSV68_PARTITION=y 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68020=y ··· 58 57 CONFIG_NET_FOU_IP_TUNNELS=y 59 58 CONFIG_INET_AH=m 60 59 CONFIG_INET_ESP=m 60 + CONFIG_INET_ESP_OFFLOAD=m 61 61 CONFIG_INET_IPCOMP=m 62 62 CONFIG_INET_XFRM_MODE_TRANSPORT=m 63 63 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 70 68 CONFIG_IPV6_ROUTER_PREF=y 71 69 CONFIG_INET6_AH=m 72 70 CONFIG_INET6_ESP=m 71 + CONFIG_INET6_ESP_OFFLOAD=m 73 72 CONFIG_INET6_IPCOMP=m 74 73 CONFIG_IPV6_ILA=m 75 74 CONFIG_IPV6_VTI=m ··· 101 98 CONFIG_NFT_CT=m 102 99 CONFIG_NFT_SET_RBTREE=m 103 100 CONFIG_NFT_SET_HASH=m 101 + CONFIG_NFT_SET_BITMAP=m 104 102 CONFIG_NFT_COUNTER=m 105 103 CONFIG_NFT_LOG=m 106 104 CONFIG_NFT_LIMIT=m ··· 302 298 CONFIG_NET_L3_MASTER_DEV=y 303 299 CONFIG_AF_KCM=m 304 300 # CONFIG_WIRELESS is not set 301 + CONFIG_PSAMPLE=m 302 + CONFIG_NET_IFE=m 305 303 CONFIG_NET_DEVLINK=m 306 304 # CONFIG_UEVENT_HELPER is not set 307 305 CONFIG_DEVTMPFS=y ··· 375 369 CONFIG_MACVLAN=m 376 370 CONFIG_MACVTAP=m 377 371 CONFIG_IPVLAN=m 372 + CONFIG_IPVTAP=m 378 373 CONFIG_VXLAN=m 379 374 CONFIG_GENEVE=m 380 375 CONFIG_GTP=m ··· 386 379 # CONFIG_NET_VENDOR_ALACRITECH is not set 387 380 # CONFIG_NET_VENDOR_AMAZON is not set 388 381 CONFIG_MACMACE=y 382 + # CONFIG_NET_VENDOR_AQUANTIA is not set 389 383 # CONFIG_NET_VENDOR_ARC is not set 390 384 # CONFIG_NET_CADENCE is not set 391 385 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 406 398 # CONFIG_NET_VENDOR_SOLARFLARE is not set 407 399 # CONFIG_NET_VENDOR_SMSC is not set 408 400 # CONFIG_NET_VENDOR_STMICRO is not set 409 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 410 401 # CONFIG_NET_VENDOR_VIA is not set 411 402 # CONFIG_NET_VENDOR_WIZNET is not set 412 403 CONFIG_PPP=m ··· 554 547 CONFIG_DLM=m 555 548 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 556 549 CONFIG_MAGIC_SYSRQ=y 550 + CONFIG_WW_MUTEX_SELFTEST=m 551 + CONFIG_ATOMIC64_SELFTEST=m 557 552 CONFIG_ASYNC_RAID6_TEST=m 558 553 CONFIG_TEST_HEXDUMP=m 559 554 CONFIG_TEST_STRING_HELPERS=m ··· 586 577 CONFIG_CRYPTO_LRW=m 587 578 CONFIG_CRYPTO_PCBC=m 588 579 CONFIG_CRYPTO_KEYWRAP=m 580 + CONFIG_CRYPTO_CMAC=m 589 581 CONFIG_CRYPTO_XCBC=m 590 582 CONFIG_CRYPTO_VMAC=m 591 583 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 598 588 CONFIG_CRYPTO_SHA3=m 599 589 CONFIG_CRYPTO_TGR192=m 600 590 CONFIG_CRYPTO_WP512=m 591 + CONFIG_CRYPTO_AES_TI=m 601 592 CONFIG_CRYPTO_ANUBIS=m 602 593 CONFIG_CRYPTO_BLOWFISH=m 603 594 CONFIG_CRYPTO_CAMELLIA=m ··· 623 612 CONFIG_CRYPTO_USER_API_RNG=m 624 613 CONFIG_CRYPTO_USER_API_AEAD=m 625 614 # CONFIG_CRYPTO_HW is not set 615 + CONFIG_CRC32_SELFTEST=m 626 616 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/multi_defconfig
··· 21 21 CONFIG_UNIXWARE_DISKLABEL=y 22 22 # CONFIG_EFI_PARTITION is not set 23 23 CONFIG_IOSCHED_DEADLINE=m 24 + CONFIG_MQ_IOSCHED_DEADLINE=m 24 25 CONFIG_KEXEC=y 25 26 CONFIG_BOOTINFO_PROC=y 26 27 CONFIG_M68020=y ··· 68 67 CONFIG_NET_FOU_IP_TUNNELS=y 69 68 CONFIG_INET_AH=m 70 69 CONFIG_INET_ESP=m 70 + CONFIG_INET_ESP_OFFLOAD=m 71 71 CONFIG_INET_IPCOMP=m 72 72 CONFIG_INET_XFRM_MODE_TRANSPORT=m 73 73 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 80 78 CONFIG_IPV6_ROUTER_PREF=y 81 79 CONFIG_INET6_AH=m 82 80 CONFIG_INET6_ESP=m 81 + CONFIG_INET6_ESP_OFFLOAD=m 83 82 CONFIG_INET6_IPCOMP=m 84 83 CONFIG_IPV6_ILA=m 85 84 CONFIG_IPV6_VTI=m ··· 111 108 CONFIG_NFT_CT=m 112 109 CONFIG_NFT_SET_RBTREE=m 113 110 CONFIG_NFT_SET_HASH=m 111 + CONFIG_NFT_SET_BITMAP=m 114 112 CONFIG_NFT_COUNTER=m 115 113 CONFIG_NFT_LOG=m 116 114 CONFIG_NFT_LIMIT=m ··· 312 308 CONFIG_NET_L3_MASTER_DEV=y 313 309 CONFIG_AF_KCM=m 314 310 # CONFIG_WIRELESS is not set 311 + CONFIG_PSAMPLE=m 312 + CONFIG_NET_IFE=m 315 313 CONFIG_NET_DEVLINK=m 316 314 # CONFIG_UEVENT_HELPER is not set 317 315 CONFIG_DEVTMPFS=y ··· 408 402 CONFIG_MACVLAN=m 409 403 CONFIG_MACVTAP=m 410 404 CONFIG_IPVLAN=m 405 + CONFIG_IPVTAP=m 411 406 CONFIG_VXLAN=m 412 407 CONFIG_GENEVE=m 413 408 CONFIG_GTP=m ··· 426 419 CONFIG_MVME147_NET=y 427 420 CONFIG_SUN3LANCE=y 428 421 CONFIG_MACMACE=y 422 + # CONFIG_NET_VENDOR_AQUANTIA is not set 429 423 # CONFIG_NET_VENDOR_ARC is not set 430 424 # CONFIG_NET_CADENCE is not set 431 425 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 452 444 # CONFIG_NET_VENDOR_SOLARFLARE is not set 453 445 CONFIG_SMC91X=y 454 446 # CONFIG_NET_VENDOR_STMICRO is not set 455 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 456 447 # CONFIG_NET_VENDOR_VIA is not set 457 448 # CONFIG_NET_VENDOR_WIZNET is not set 458 449 CONFIG_PLIP=m ··· 634 627 CONFIG_DLM=m 635 628 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 636 629 CONFIG_MAGIC_SYSRQ=y 630 + CONFIG_WW_MUTEX_SELFTEST=m 631 + CONFIG_ATOMIC64_SELFTEST=m 637 632 CONFIG_ASYNC_RAID6_TEST=m 638 633 CONFIG_TEST_HEXDUMP=m 639 634 CONFIG_TEST_STRING_HELPERS=m ··· 666 657 CONFIG_CRYPTO_LRW=m 667 658 CONFIG_CRYPTO_PCBC=m 668 659 CONFIG_CRYPTO_KEYWRAP=m 660 + CONFIG_CRYPTO_CMAC=m 669 661 CONFIG_CRYPTO_XCBC=m 670 662 CONFIG_CRYPTO_VMAC=m 671 663 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 678 668 CONFIG_CRYPTO_SHA3=m 679 669 CONFIG_CRYPTO_TGR192=m 680 670 CONFIG_CRYPTO_WP512=m 671 + CONFIG_CRYPTO_AES_TI=m 681 672 CONFIG_CRYPTO_ANUBIS=m 682 673 CONFIG_CRYPTO_BLOWFISH=m 683 674 CONFIG_CRYPTO_CAMELLIA=m ··· 703 692 CONFIG_CRYPTO_USER_API_RNG=m 704 693 CONFIG_CRYPTO_USER_API_AEAD=m 705 694 # CONFIG_CRYPTO_HW is not set 695 + CONFIG_CRC32_SELFTEST=m 706 696 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/mvme147_defconfig
··· 25 25 CONFIG_SUN_PARTITION=y 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68030=y ··· 56 55 CONFIG_NET_FOU_IP_TUNNELS=y 57 56 CONFIG_INET_AH=m 58 57 CONFIG_INET_ESP=m 58 + CONFIG_INET_ESP_OFFLOAD=m 59 59 CONFIG_INET_IPCOMP=m 60 60 CONFIG_INET_XFRM_MODE_TRANSPORT=m 61 61 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 68 66 CONFIG_IPV6_ROUTER_PREF=y 69 67 CONFIG_INET6_AH=m 70 68 CONFIG_INET6_ESP=m 69 + CONFIG_INET6_ESP_OFFLOAD=m 71 70 CONFIG_INET6_IPCOMP=m 72 71 CONFIG_IPV6_ILA=m 73 72 CONFIG_IPV6_VTI=m ··· 99 96 CONFIG_NFT_CT=m 100 97 CONFIG_NFT_SET_RBTREE=m 101 98 CONFIG_NFT_SET_HASH=m 99 + CONFIG_NFT_SET_BITMAP=m 102 100 CONFIG_NFT_COUNTER=m 103 101 CONFIG_NFT_LOG=m 104 102 CONFIG_NFT_LIMIT=m ··· 297 293 CONFIG_NET_L3_MASTER_DEV=y 298 294 CONFIG_AF_KCM=m 299 295 # CONFIG_WIRELESS is not set 296 + CONFIG_PSAMPLE=m 297 + CONFIG_NET_IFE=m 300 298 CONFIG_NET_DEVLINK=m 301 299 # CONFIG_UEVENT_HELPER is not set 302 300 CONFIG_DEVTMPFS=y ··· 357 351 CONFIG_MACVLAN=m 358 352 CONFIG_MACVTAP=m 359 353 CONFIG_IPVLAN=m 354 + CONFIG_IPVTAP=m 360 355 CONFIG_VXLAN=m 361 356 CONFIG_GENEVE=m 362 357 CONFIG_GTP=m ··· 368 361 # CONFIG_NET_VENDOR_ALACRITECH is not set 369 362 # CONFIG_NET_VENDOR_AMAZON is not set 370 363 CONFIG_MVME147_NET=y 364 + # CONFIG_NET_VENDOR_AQUANTIA is not set 371 365 # CONFIG_NET_VENDOR_ARC is not set 372 366 # CONFIG_NET_CADENCE is not set 373 367 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 385 377 # CONFIG_NET_VENDOR_SEEQ is not set 386 378 # CONFIG_NET_VENDOR_SOLARFLARE is not set 387 379 # CONFIG_NET_VENDOR_STMICRO is not set 388 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 389 380 # CONFIG_NET_VENDOR_VIA is not set 390 381 # CONFIG_NET_VENDOR_WIZNET is not set 391 382 CONFIG_PPP=m ··· 522 515 CONFIG_DLM=m 523 516 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 524 517 CONFIG_MAGIC_SYSRQ=y 518 + CONFIG_WW_MUTEX_SELFTEST=m 519 + CONFIG_ATOMIC64_SELFTEST=m 525 520 CONFIG_ASYNC_RAID6_TEST=m 526 521 CONFIG_TEST_HEXDUMP=m 527 522 CONFIG_TEST_STRING_HELPERS=m ··· 554 545 CONFIG_CRYPTO_LRW=m 555 546 CONFIG_CRYPTO_PCBC=m 556 547 CONFIG_CRYPTO_KEYWRAP=m 548 + CONFIG_CRYPTO_CMAC=m 557 549 CONFIG_CRYPTO_XCBC=m 558 550 CONFIG_CRYPTO_VMAC=m 559 551 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 566 556 CONFIG_CRYPTO_SHA3=m 567 557 CONFIG_CRYPTO_TGR192=m 568 558 CONFIG_CRYPTO_WP512=m 559 + CONFIG_CRYPTO_AES_TI=m 569 560 CONFIG_CRYPTO_ANUBIS=m 570 561 CONFIG_CRYPTO_BLOWFISH=m 571 562 CONFIG_CRYPTO_CAMELLIA=m ··· 591 580 CONFIG_CRYPTO_USER_API_RNG=m 592 581 CONFIG_CRYPTO_USER_API_AEAD=m 593 582 # CONFIG_CRYPTO_HW is not set 583 + CONFIG_CRC32_SELFTEST=m 594 584 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/mvme16x_defconfig
··· 25 25 CONFIG_SUN_PARTITION=y 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_M68040=y ··· 57 56 CONFIG_NET_FOU_IP_TUNNELS=y 58 57 CONFIG_INET_AH=m 59 58 CONFIG_INET_ESP=m 59 + CONFIG_INET_ESP_OFFLOAD=m 60 60 CONFIG_INET_IPCOMP=m 61 61 CONFIG_INET_XFRM_MODE_TRANSPORT=m 62 62 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 69 67 CONFIG_IPV6_ROUTER_PREF=y 70 68 CONFIG_INET6_AH=m 71 69 CONFIG_INET6_ESP=m 70 + CONFIG_INET6_ESP_OFFLOAD=m 72 71 CONFIG_INET6_IPCOMP=m 73 72 CONFIG_IPV6_ILA=m 74 73 CONFIG_IPV6_VTI=m ··· 100 97 CONFIG_NFT_CT=m 101 98 CONFIG_NFT_SET_RBTREE=m 102 99 CONFIG_NFT_SET_HASH=m 100 + CONFIG_NFT_SET_BITMAP=m 103 101 CONFIG_NFT_COUNTER=m 104 102 CONFIG_NFT_LOG=m 105 103 CONFIG_NFT_LIMIT=m ··· 298 294 CONFIG_NET_L3_MASTER_DEV=y 299 295 CONFIG_AF_KCM=m 300 296 # CONFIG_WIRELESS is not set 297 + CONFIG_PSAMPLE=m 298 + CONFIG_NET_IFE=m 301 299 CONFIG_NET_DEVLINK=m 302 300 # CONFIG_UEVENT_HELPER is not set 303 301 CONFIG_DEVTMPFS=y ··· 358 352 CONFIG_MACVLAN=m 359 353 CONFIG_MACVTAP=m 360 354 CONFIG_IPVLAN=m 355 + CONFIG_IPVTAP=m 361 356 CONFIG_VXLAN=m 362 357 CONFIG_GENEVE=m 363 358 CONFIG_GTP=m ··· 368 361 CONFIG_VETH=m 369 362 # CONFIG_NET_VENDOR_ALACRITECH is not set 370 363 # CONFIG_NET_VENDOR_AMAZON is not set 364 + # CONFIG_NET_VENDOR_AQUANTIA is not set 371 365 # CONFIG_NET_VENDOR_ARC is not set 372 366 # CONFIG_NET_CADENCE is not set 373 367 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 385 377 # CONFIG_NET_VENDOR_SEEQ is not set 386 378 # CONFIG_NET_VENDOR_SOLARFLARE is not set 387 379 # CONFIG_NET_VENDOR_STMICRO is not set 388 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 389 380 # CONFIG_NET_VENDOR_VIA is not set 390 381 # CONFIG_NET_VENDOR_WIZNET is not set 391 382 CONFIG_PPP=m ··· 522 515 CONFIG_DLM=m 523 516 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 524 517 CONFIG_MAGIC_SYSRQ=y 518 + CONFIG_WW_MUTEX_SELFTEST=m 519 + CONFIG_ATOMIC64_SELFTEST=m 525 520 CONFIG_ASYNC_RAID6_TEST=m 526 521 CONFIG_TEST_HEXDUMP=m 527 522 CONFIG_TEST_STRING_HELPERS=m ··· 554 545 CONFIG_CRYPTO_LRW=m 555 546 CONFIG_CRYPTO_PCBC=m 556 547 CONFIG_CRYPTO_KEYWRAP=m 548 + CONFIG_CRYPTO_CMAC=m 557 549 CONFIG_CRYPTO_XCBC=m 558 550 CONFIG_CRYPTO_VMAC=m 559 551 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 566 556 CONFIG_CRYPTO_SHA3=m 567 557 CONFIG_CRYPTO_TGR192=m 568 558 CONFIG_CRYPTO_WP512=m 559 + CONFIG_CRYPTO_AES_TI=m 569 560 CONFIG_CRYPTO_ANUBIS=m 570 561 CONFIG_CRYPTO_BLOWFISH=m 571 562 CONFIG_CRYPTO_CAMELLIA=m ··· 591 580 CONFIG_CRYPTO_USER_API_RNG=m 592 581 CONFIG_CRYPTO_USER_API_AEAD=m 593 582 # CONFIG_CRYPTO_HW is not set 583 + CONFIG_CRC32_SELFTEST=m 594 584 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/q40_defconfig
··· 26 26 # CONFIG_EFI_PARTITION is not set 27 27 CONFIG_SYSV68_PARTITION=y 28 28 CONFIG_IOSCHED_DEADLINE=m 29 + CONFIG_MQ_IOSCHED_DEADLINE=m 29 30 CONFIG_KEXEC=y 30 31 CONFIG_BOOTINFO_PROC=y 31 32 CONFIG_M68040=y ··· 57 56 CONFIG_NET_FOU_IP_TUNNELS=y 58 57 CONFIG_INET_AH=m 59 58 CONFIG_INET_ESP=m 59 + CONFIG_INET_ESP_OFFLOAD=m 60 60 CONFIG_INET_IPCOMP=m 61 61 CONFIG_INET_XFRM_MODE_TRANSPORT=m 62 62 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 69 67 CONFIG_IPV6_ROUTER_PREF=y 70 68 CONFIG_INET6_AH=m 71 69 CONFIG_INET6_ESP=m 70 + CONFIG_INET6_ESP_OFFLOAD=m 72 71 CONFIG_INET6_IPCOMP=m 73 72 CONFIG_IPV6_ILA=m 74 73 CONFIG_IPV6_VTI=m ··· 100 97 CONFIG_NFT_CT=m 101 98 CONFIG_NFT_SET_RBTREE=m 102 99 CONFIG_NFT_SET_HASH=m 100 + CONFIG_NFT_SET_BITMAP=m 103 101 CONFIG_NFT_COUNTER=m 104 102 CONFIG_NFT_LOG=m 105 103 CONFIG_NFT_LIMIT=m ··· 298 294 CONFIG_NET_L3_MASTER_DEV=y 299 295 CONFIG_AF_KCM=m 300 296 # CONFIG_WIRELESS is not set 297 + CONFIG_PSAMPLE=m 298 + CONFIG_NET_IFE=m 301 299 CONFIG_NET_DEVLINK=m 302 300 # CONFIG_UEVENT_HELPER is not set 303 301 CONFIG_DEVTMPFS=y ··· 364 358 CONFIG_MACVLAN=m 365 359 CONFIG_MACVTAP=m 366 360 CONFIG_IPVLAN=m 361 + CONFIG_IPVTAP=m 367 362 CONFIG_VXLAN=m 368 363 CONFIG_GENEVE=m 369 364 CONFIG_GTP=m ··· 376 369 # CONFIG_NET_VENDOR_ALACRITECH is not set 377 370 # CONFIG_NET_VENDOR_AMAZON is not set 378 371 # CONFIG_NET_VENDOR_AMD is not set 372 + # CONFIG_NET_VENDOR_AQUANTIA is not set 379 373 # CONFIG_NET_VENDOR_ARC is not set 380 374 # CONFIG_NET_CADENCE is not set 381 375 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 396 388 # CONFIG_NET_VENDOR_SOLARFLARE is not set 397 389 # CONFIG_NET_VENDOR_SMSC is not set 398 390 # CONFIG_NET_VENDOR_STMICRO is not set 399 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 400 391 # CONFIG_NET_VENDOR_VIA is not set 401 392 # CONFIG_NET_VENDOR_WIZNET is not set 402 393 CONFIG_PLIP=m ··· 545 538 CONFIG_DLM=m 546 539 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 547 540 CONFIG_MAGIC_SYSRQ=y 541 + CONFIG_WW_MUTEX_SELFTEST=m 542 + CONFIG_ATOMIC64_SELFTEST=m 548 543 CONFIG_ASYNC_RAID6_TEST=m 549 544 CONFIG_TEST_HEXDUMP=m 550 545 CONFIG_TEST_STRING_HELPERS=m ··· 577 568 CONFIG_CRYPTO_LRW=m 578 569 CONFIG_CRYPTO_PCBC=m 579 570 CONFIG_CRYPTO_KEYWRAP=m 571 + CONFIG_CRYPTO_CMAC=m 580 572 CONFIG_CRYPTO_XCBC=m 581 573 CONFIG_CRYPTO_VMAC=m 582 574 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 589 579 CONFIG_CRYPTO_SHA3=m 590 580 CONFIG_CRYPTO_TGR192=m 591 581 CONFIG_CRYPTO_WP512=m 582 + CONFIG_CRYPTO_AES_TI=m 592 583 CONFIG_CRYPTO_ANUBIS=m 593 584 CONFIG_CRYPTO_BLOWFISH=m 594 585 CONFIG_CRYPTO_CAMELLIA=m ··· 614 603 CONFIG_CRYPTO_USER_API_RNG=m 615 604 CONFIG_CRYPTO_USER_API_AEAD=m 616 605 # CONFIG_CRYPTO_HW is not set 606 + CONFIG_CRC32_SELFTEST=m 617 607 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/sun3_defconfig
··· 25 25 # CONFIG_EFI_PARTITION is not set 26 26 CONFIG_SYSV68_PARTITION=y 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_SUN3=y ··· 54 53 CONFIG_NET_FOU_IP_TUNNELS=y 55 54 CONFIG_INET_AH=m 56 55 CONFIG_INET_ESP=m 56 + CONFIG_INET_ESP_OFFLOAD=m 57 57 CONFIG_INET_IPCOMP=m 58 58 CONFIG_INET_XFRM_MODE_TRANSPORT=m 59 59 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 66 64 CONFIG_IPV6_ROUTER_PREF=y 67 65 CONFIG_INET6_AH=m 68 66 CONFIG_INET6_ESP=m 67 + CONFIG_INET6_ESP_OFFLOAD=m 69 68 CONFIG_INET6_IPCOMP=m 70 69 CONFIG_IPV6_ILA=m 71 70 CONFIG_IPV6_VTI=m ··· 97 94 CONFIG_NFT_CT=m 98 95 CONFIG_NFT_SET_RBTREE=m 99 96 CONFIG_NFT_SET_HASH=m 97 + CONFIG_NFT_SET_BITMAP=m 100 98 CONFIG_NFT_COUNTER=m 101 99 CONFIG_NFT_LOG=m 102 100 CONFIG_NFT_LIMIT=m ··· 295 291 CONFIG_NET_L3_MASTER_DEV=y 296 292 CONFIG_AF_KCM=m 297 293 # CONFIG_WIRELESS is not set 294 + CONFIG_PSAMPLE=m 295 + CONFIG_NET_IFE=m 298 296 CONFIG_NET_DEVLINK=m 299 297 # CONFIG_UEVENT_HELPER is not set 300 298 CONFIG_DEVTMPFS=y ··· 355 349 CONFIG_MACVLAN=m 356 350 CONFIG_MACVTAP=m 357 351 CONFIG_IPVLAN=m 352 + CONFIG_IPVTAP=m 358 353 CONFIG_VXLAN=m 359 354 CONFIG_GENEVE=m 360 355 CONFIG_GTP=m ··· 366 359 # CONFIG_NET_VENDOR_ALACRITECH is not set 367 360 # CONFIG_NET_VENDOR_AMAZON is not set 368 361 CONFIG_SUN3LANCE=y 362 + # CONFIG_NET_VENDOR_AQUANTIA is not set 369 363 # CONFIG_NET_VENDOR_ARC is not set 370 364 # CONFIG_NET_CADENCE is not set 371 365 # CONFIG_NET_VENDOR_EZCHIP is not set ··· 383 375 # CONFIG_NET_VENDOR_SOLARFLARE is not set 384 376 # CONFIG_NET_VENDOR_STMICRO is not set 385 377 # CONFIG_NET_VENDOR_SUN is not set 386 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 387 378 # CONFIG_NET_VENDOR_VIA is not set 388 379 # CONFIG_NET_VENDOR_WIZNET is not set 389 380 CONFIG_PPP=m ··· 524 517 CONFIG_DLM=m 525 518 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 526 519 CONFIG_MAGIC_SYSRQ=y 520 + CONFIG_WW_MUTEX_SELFTEST=m 521 + CONFIG_ATOMIC64_SELFTEST=m 527 522 CONFIG_ASYNC_RAID6_TEST=m 528 523 CONFIG_TEST_HEXDUMP=m 529 524 CONFIG_TEST_STRING_HELPERS=m ··· 555 546 CONFIG_CRYPTO_LRW=m 556 547 CONFIG_CRYPTO_PCBC=m 557 548 CONFIG_CRYPTO_KEYWRAP=m 549 + CONFIG_CRYPTO_CMAC=m 558 550 CONFIG_CRYPTO_XCBC=m 559 551 CONFIG_CRYPTO_VMAC=m 560 552 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 567 557 CONFIG_CRYPTO_SHA3=m 568 558 CONFIG_CRYPTO_TGR192=m 569 559 CONFIG_CRYPTO_WP512=m 560 + CONFIG_CRYPTO_AES_TI=m 570 561 CONFIG_CRYPTO_ANUBIS=m 571 562 CONFIG_CRYPTO_BLOWFISH=m 572 563 CONFIG_CRYPTO_CAMELLIA=m ··· 592 581 CONFIG_CRYPTO_USER_API_RNG=m 593 582 CONFIG_CRYPTO_USER_API_AEAD=m 594 583 # CONFIG_CRYPTO_HW is not set 584 + CONFIG_CRC32_SELFTEST=m 595 585 CONFIG_XZ_DEC_TEST=m
+13 -1
arch/m68k/configs/sun3x_defconfig
··· 25 25 # CONFIG_EFI_PARTITION is not set 26 26 CONFIG_SYSV68_PARTITION=y 27 27 CONFIG_IOSCHED_DEADLINE=m 28 + CONFIG_MQ_IOSCHED_DEADLINE=m 28 29 CONFIG_KEXEC=y 29 30 CONFIG_BOOTINFO_PROC=y 30 31 CONFIG_SUN3X=y ··· 54 53 CONFIG_NET_FOU_IP_TUNNELS=y 55 54 CONFIG_INET_AH=m 56 55 CONFIG_INET_ESP=m 56 + CONFIG_INET_ESP_OFFLOAD=m 57 57 CONFIG_INET_IPCOMP=m 58 58 CONFIG_INET_XFRM_MODE_TRANSPORT=m 59 59 CONFIG_INET_XFRM_MODE_TUNNEL=m ··· 66 64 CONFIG_IPV6_ROUTER_PREF=y 67 65 CONFIG_INET6_AH=m 68 66 CONFIG_INET6_ESP=m 67 + CONFIG_INET6_ESP_OFFLOAD=m 69 68 CONFIG_INET6_IPCOMP=m 70 69 CONFIG_IPV6_ILA=m 71 70 CONFIG_IPV6_VTI=m ··· 97 94 CONFIG_NFT_CT=m 98 95 CONFIG_NFT_SET_RBTREE=m 99 96 CONFIG_NFT_SET_HASH=m 97 + CONFIG_NFT_SET_BITMAP=m 100 98 CONFIG_NFT_COUNTER=m 101 99 CONFIG_NFT_LOG=m 102 100 CONFIG_NFT_LIMIT=m ··· 295 291 CONFIG_NET_L3_MASTER_DEV=y 296 292 CONFIG_AF_KCM=m 297 293 # CONFIG_WIRELESS is not set 294 + CONFIG_PSAMPLE=m 295 + CONFIG_NET_IFE=m 298 296 CONFIG_NET_DEVLINK=m 299 297 # CONFIG_UEVENT_HELPER is not set 300 298 CONFIG_DEVTMPFS=y ··· 355 349 CONFIG_MACVLAN=m 356 350 CONFIG_MACVTAP=m 357 351 CONFIG_IPVLAN=m 352 + CONFIG_IPVTAP=m 358 353 CONFIG_VXLAN=m 359 354 CONFIG_GENEVE=m 360 355 CONFIG_GTP=m ··· 366 359 # CONFIG_NET_VENDOR_ALACRITECH is not set 367 360 # CONFIG_NET_VENDOR_AMAZON is not set 368 361 CONFIG_SUN3LANCE=y 362 + # CONFIG_NET_VENDOR_AQUANTIA is not set 369 363 # CONFIG_NET_VENDOR_ARC is not set 370 364 # CONFIG_NET_CADENCE is not set 371 365 # CONFIG_NET_VENDOR_BROADCOM is not set ··· 383 375 # CONFIG_NET_VENDOR_SEEQ is not set 384 376 # CONFIG_NET_VENDOR_SOLARFLARE is not set 385 377 # CONFIG_NET_VENDOR_STMICRO is not set 386 - # CONFIG_NET_VENDOR_SYNOPSYS is not set 387 378 # CONFIG_NET_VENDOR_VIA is not set 388 379 # CONFIG_NET_VENDOR_WIZNET is not set 389 380 CONFIG_PPP=m ··· 524 517 CONFIG_DLM=m 525 518 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 526 519 CONFIG_MAGIC_SYSRQ=y 520 + CONFIG_WW_MUTEX_SELFTEST=m 521 + CONFIG_ATOMIC64_SELFTEST=m 527 522 CONFIG_ASYNC_RAID6_TEST=m 528 523 CONFIG_TEST_HEXDUMP=m 529 524 CONFIG_TEST_STRING_HELPERS=m ··· 556 547 CONFIG_CRYPTO_LRW=m 557 548 CONFIG_CRYPTO_PCBC=m 558 549 CONFIG_CRYPTO_KEYWRAP=m 550 + CONFIG_CRYPTO_CMAC=m 559 551 CONFIG_CRYPTO_XCBC=m 560 552 CONFIG_CRYPTO_VMAC=m 561 553 CONFIG_CRYPTO_MICHAEL_MIC=m ··· 568 558 CONFIG_CRYPTO_SHA3=m 569 559 CONFIG_CRYPTO_TGR192=m 570 560 CONFIG_CRYPTO_WP512=m 561 + CONFIG_CRYPTO_AES_TI=m 571 562 CONFIG_CRYPTO_ANUBIS=m 572 563 CONFIG_CRYPTO_BLOWFISH=m 573 564 CONFIG_CRYPTO_CAMELLIA=m ··· 593 582 CONFIG_CRYPTO_USER_API_RNG=m 594 583 CONFIG_CRYPTO_USER_API_AEAD=m 595 584 # CONFIG_CRYPTO_HW is not set 585 + CONFIG_CRC32_SELFTEST=m 596 586 CONFIG_XZ_DEC_TEST=m
+1 -1
arch/m68k/include/asm/bitops.h
··· 148 148 #define __change_bit(nr, vaddr) change_bit(nr, vaddr) 149 149 150 150 151 - static inline int test_bit(int nr, const unsigned long *vaddr) 151 + static inline int test_bit(int nr, const volatile unsigned long *vaddr) 152 152 { 153 153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; 154 154 }
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 379 7 + #define NR_syscalls 380 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+1
arch/m68k/include/uapi/asm/unistd.h
··· 384 384 #define __NR_copy_file_range 376 385 385 #define __NR_preadv2 377 386 386 #define __NR_pwritev2 378 387 + #define __NR_statx 379 387 388 388 389 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+1
arch/m68k/kernel/syscalltable.S
··· 399 399 .long sys_copy_file_range 400 400 .long sys_preadv2 401 401 .long sys_pwritev2 402 + .long sys_statx
+15 -4
arch/metag/kernel/ptrace.c
··· 26 26 * user_regset definitions. 27 27 */ 28 28 29 + static unsigned long user_txstatus(const struct pt_regs *regs) 30 + { 31 + unsigned long data = (unsigned long)regs->ctx.Flags; 32 + 33 + if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) 34 + data |= USER_GP_REGS_STATUS_CATCH_BIT; 35 + 36 + return data; 37 + } 38 + 29 39 int metag_gp_regs_copyout(const struct pt_regs *regs, 30 40 unsigned int pos, unsigned int count, 31 41 void *kbuf, void __user *ubuf) ··· 74 64 if (ret) 75 65 goto out; 76 66 /* TXSTATUS */ 77 - data = (unsigned long)regs->ctx.Flags; 78 - if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) 79 - data |= USER_GP_REGS_STATUS_CATCH_BIT; 67 + data = user_txstatus(regs); 80 68 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 81 69 &data, 4*25, 4*26); 82 70 if (ret) ··· 129 121 if (ret) 130 122 goto out; 131 123 /* TXSTATUS */ 124 + data = user_txstatus(regs); 132 125 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 133 126 &data, 4*25, 4*26); 134 127 if (ret) ··· 255 246 unsigned long long *ptr; 256 247 int ret, i; 257 248 249 + if (count < 4*13) 250 + return -EINVAL; 258 251 /* Read the entire pipeline before making any changes */ 259 252 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 260 253 &rp, 0, 4*13); ··· 316 305 const void *kbuf, const void __user *ubuf) 317 306 { 318 307 int ret; 319 - void __user *tls; 308 + void __user *tls = target->thread.tls_ptr; 320 309 321 310 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 322 311 if (ret)
+2 -1
arch/mips/kernel/ptrace.c
··· 456 456 &target->thread.fpu, 457 457 0, sizeof(elf_fpregset_t)); 458 458 459 - for (i = 0; i < NUM_FPU_REGS; i++) { 459 + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 460 + for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { 460 461 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 461 462 &fpr_val, i * sizeof(elf_fpreg_t), 462 463 (i + 1) * sizeof(elf_fpreg_t));
+34 -25
arch/parisc/include/asm/uaccess.h
··· 65 65 ".previous\n" 66 66 67 67 /* 68 + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 69 + * (with lowest bit set) for which the fault handler in fixup_exception() will 70 + * load -EFAULT into %r8 for a read or write fault, and zeroes the target 71 + * register in case of a read fault in get_user(). 72 + */ 73 + #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 74 + ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 75 + 76 + /* 68 77 * The page fault handler stores, in a per-cpu area, the following information 69 78 * if a fixup routine is available. 70 79 */ ··· 100 91 #define __get_user(x, ptr) \ 101 92 ({ \ 102 93 register long __gu_err __asm__ ("r8") = 0; \ 103 - register long __gu_val __asm__ ("r9") = 0; \ 94 + register long __gu_val; \ 104 95 \ 105 96 load_sr2(); \ 106 97 switch (sizeof(*(ptr))) { \ ··· 116 107 }) 117 108 118 109 #define __get_user_asm(ldx, ptr) \ 119 - __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 120 - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 110 + __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 111 + "9:\n" \ 112 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 121 113 : "=r"(__gu_val), "=r"(__gu_err) \ 122 - : "r"(ptr), "1"(__gu_err) \ 123 - : "r1"); 114 + : "r"(ptr), "1"(__gu_err)); 124 115 125 116 #if !defined(CONFIG_64BIT) 126 117 127 118 #define __get_user_asm64(ptr) \ 128 - __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 129 - "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 130 - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 131 - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 119 + __asm__(" copy %%r0,%R0\n" \ 120 + "1: ldw 0(%%sr2,%2),%0\n" \ 121 + "2: ldw 4(%%sr2,%2),%R0\n" \ 122 + "9:\n" \ 123 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 124 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 132 125 : "=r"(__gu_val), "=r"(__gu_err) \ 133 - : "r"(ptr), "1"(__gu_err) \ 134 - : "r1"); 126 + : "r"(ptr), "1"(__gu_err)); 135 127 136 128 #endif /* !defined(CONFIG_64BIT) */ 137 129 ··· 158 148 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 159 149 * instead of writing. This is because they do not write to any memory 160 150 * gcc knows about, so there are no aliasing issues. These macros must 161 - * also be aware that "fixup_put_user_skip_[12]" are executed in the 162 - * context of the fault, and any registers used there must be listed 163 - * as clobbers. In this case only "r1" is used by the current routines. 164 - * r8/r9 are already listed as err/val. 151 + * also be aware that fixups are executed in the context of the fault, 152 + * and any registers used there must be listed as clobbers. 153 + * r8 is already listed as err. 165 154 */ 166 155 167 156 #define __put_user_asm(stx, x, ptr) \ 168 157 __asm__ __volatile__ ( \ 169 - "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 170 - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 158 + "1: " stx " %2,0(%%sr2,%1)\n" \ 159 + "9:\n" \ 160 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 171 161 : "=r"(__pu_err) \ 172 - : "r"(ptr), "r"(x), "0"(__pu_err) \ 173 - : "r1") 162 + : "r"(ptr), "r"(x), "0"(__pu_err)) 174 163 175 164 176 165 #if !defined(CONFIG_64BIT) 177 166 178 167 #define __put_user_asm64(__val, ptr) do { \ 179 168 __asm__ __volatile__ ( \ 180 - "\n1:\tstw %2,0(%%sr2,%1)" \ 181 - "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 182 - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 183 - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 169 + "1: stw %2,0(%%sr2,%1)\n" \ 170 + "2: stw %R2,4(%%sr2,%1)\n" \ 171 + "9:\n" \ 172 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 173 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 184 174 : "=r"(__pu_err) \ 185 - : "r"(ptr), "r"(__val), "0"(__pu_err) \ 186 - : "r1"); \ 175 + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 187 176 } while (0) 188 177 189 178 #endif /* !defined(CONFIG_64BIT) */
-10
arch/parisc/kernel/parisc_ksyms.c
··· 47 47 EXPORT_SYMBOL(lclear_user); 48 48 EXPORT_SYMBOL(lstrnlen_user); 49 49 50 - /* Global fixups - defined as int to avoid creation of function pointers */ 51 - extern int fixup_get_user_skip_1; 52 - extern int fixup_get_user_skip_2; 53 - extern int fixup_put_user_skip_1; 54 - extern int fixup_put_user_skip_2; 55 - EXPORT_SYMBOL(fixup_get_user_skip_1); 56 - EXPORT_SYMBOL(fixup_get_user_skip_2); 57 - EXPORT_SYMBOL(fixup_put_user_skip_1); 58 - EXPORT_SYMBOL(fixup_put_user_skip_2); 59 - 60 50 #ifndef CONFIG_64BIT 61 51 /* Needed so insmod can set dp value */ 62 52 extern int $global$;
+2
arch/parisc/kernel/process.c
··· 143 143 printk(KERN_EMERG "System shut down completed.\n" 144 144 "Please power this system off now."); 145 145 146 + /* prevent soft lockup/stalled CPU messages for endless loop. */ 147 + rcu_sysrq_start(); 146 148 for (;;); 147 149 } 148 150
+1 -1
arch/parisc/lib/Makefile
··· 2 2 # Makefile for parisc-specific library files 3 3 # 4 4 5 - lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5 + lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \ 6 6 ucmpdi2.o delay.o 7 7 8 8 obj-y := iomap.o
-98
arch/parisc/lib/fixup.S
··· 1 - /* 2 - * Linux/PA-RISC Project (http://www.parisc-linux.org/) 3 - * 4 - * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2, or (at your option) 9 - * any later version. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 - * 16 - * You should have received a copy of the GNU General Public License 17 - * along with this program; if not, write to the Free Software 18 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 - * 20 - * Fixup routines for kernel exception handling. 21 - */ 22 - #include <asm/asm-offsets.h> 23 - #include <asm/assembly.h> 24 - #include <asm/errno.h> 25 - #include <linux/linkage.h> 26 - 27 - #ifdef CONFIG_SMP 28 - .macro get_fault_ip t1 t2 29 - loadgp 30 - addil LT%__per_cpu_offset,%r27 31 - LDREG RT%__per_cpu_offset(%r1),\t1 32 - /* t2 = smp_processor_id() */ 33 - mfctl 30,\t2 34 - ldw TI_CPU(\t2),\t2 35 - #ifdef CONFIG_64BIT 36 - extrd,u \t2,63,32,\t2 37 - #endif 38 - /* t2 = &__per_cpu_offset[smp_processor_id()]; */ 39 - LDREGX \t2(\t1),\t2 40 - addil LT%exception_data,%r27 41 - LDREG RT%exception_data(%r1),\t1 42 - /* t1 = this_cpu_ptr(&exception_data) */ 43 - add,l \t1,\t2,\t1 44 - /* %r27 = t1->fault_gp - restore gp */ 45 - LDREG EXCDATA_GP(\t1), %r27 46 - /* t1 = t1->fault_ip */ 47 - LDREG EXCDATA_IP(\t1), \t1 48 - .endm 49 - #else 50 - .macro get_fault_ip t1 t2 51 - loadgp 52 - /* t1 = this_cpu_ptr(&exception_data) */ 53 - addil LT%exception_data,%r27 54 - LDREG RT%exception_data(%r1),\t2 55 - /* %r27 = t2->fault_gp - restore gp */ 56 - LDREG EXCDATA_GP(\t2), %r27 57 - /* t1 = t2->fault_ip */ 58 - LDREG EXCDATA_IP(\t2), \t1 59 - .endm 60 - #endif 61 - 62 - .level LEVEL 63 - 64 - .text 65 - .section .fixup, "ax" 66 - 67 - /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ 68 - ENTRY_CFI(fixup_get_user_skip_1) 69 - get_fault_ip %r1,%r8 70 - ldo 4(%r1), %r1 71 - ldi -EFAULT, %r8 72 - bv %r0(%r1) 73 - copy %r0, %r9 74 - ENDPROC_CFI(fixup_get_user_skip_1) 75 - 76 - ENTRY_CFI(fixup_get_user_skip_2) 77 - get_fault_ip %r1,%r8 78 - ldo 8(%r1), %r1 79 - ldi -EFAULT, %r8 80 - bv %r0(%r1) 81 - copy %r0, %r9 82 - ENDPROC_CFI(fixup_get_user_skip_2) 83 - 84 - /* put_user() fixups, store -EFAULT in r8 */ 85 - ENTRY_CFI(fixup_put_user_skip_1) 86 - get_fault_ip %r1,%r8 87 - ldo 4(%r1), %r1 88 - bv %r0(%r1) 89 - ldi -EFAULT, %r8 90 - ENDPROC_CFI(fixup_put_user_skip_1) 91 - 92 - ENTRY_CFI(fixup_put_user_skip_2) 93 - get_fault_ip %r1,%r8 94 - ldo 8(%r1), %r1 95 - bv %r0(%r1) 96 - ldi -EFAULT, %r8 97 - ENDPROC_CFI(fixup_put_user_skip_2) 98 -
+318
arch/parisc/lib/lusercopy.S
··· 5 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 6 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 7 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 8 + * Copyright (C) 2017 Helge Deller <deller@gmx.de> 9 + * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net> 8 10 * 9 11 * 10 12 * This program is free software; you can redistribute it and/or modify ··· 132 130 ASM_EXCEPTIONTABLE_ENTRY(1b,3b) 133 131 ASM_EXCEPTIONTABLE_ENTRY(2b,3b) 134 132 133 + .procend 134 + 135 + 136 + 137 + /* 138 + * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 139 + * 140 + * Inputs: 141 + * - sr1 already contains space of source region 142 + * - sr2 already contains space of destination region 143 + * 144 + * Returns: 145 + * - number of bytes that could not be copied. 146 + * On success, this will be zero. 147 + * 148 + * This code is based on a C-implementation of a copy routine written by 149 + * Randolph Chung, which in turn was derived from the glibc. 150 + * 151 + * Several strategies are tried to try to get the best performance for various 152 + * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes 153 + * at a time using general registers. Unaligned copies are handled either by 154 + * aligning the destination and then using shift-and-write method, or in a few 155 + * cases by falling back to a byte-at-a-time copy. 156 + * 157 + * Testing with various alignments and buffer sizes shows that this code is 158 + * often >10x faster than a simple byte-at-a-time copy, even for strangely 159 + * aligned operands. It is interesting to note that the glibc version of memcpy 160 + * (written in C) is actually quite fast already. This routine is able to beat 161 + * it by 30-40% for aligned copies because of the loop unrolling, but in some 162 + * cases the glibc version is still slightly faster. This lends more 163 + * credibility that gcc can generate very good code as long as we are careful. 164 + * 165 + * Possible optimizations: 166 + * - add cache prefetching 167 + * - try not to use the post-increment address modifiers; they may create 168 + * additional interlocks. Assumption is that those were only efficient on old 169 + * machines (pre PA8000 processors) 170 + */ 171 + 172 + dst = arg0 173 + src = arg1 174 + len = arg2 175 + end = arg3 176 + t1 = r19 177 + t2 = r20 178 + t3 = r21 179 + t4 = r22 180 + srcspc = sr1 181 + dstspc = sr2 182 + 183 + t0 = r1 184 + a1 = t1 185 + a2 = t2 186 + a3 = t3 187 + a0 = t4 188 + 189 + save_src = ret0 190 + save_dst = ret1 191 + save_len = r31 192 + 193 + ENTRY_CFI(pa_memcpy) 194 + .proc 195 + .callinfo NO_CALLS 196 + .entry 197 + 198 + /* Last destination address */ 199 + add dst,len,end 200 + 201 + /* short copy with less than 16 bytes? */ 202 + cmpib,>>=,n 15,len,.Lbyte_loop 203 + 204 + /* same alignment? */ 205 + xor src,dst,t0 206 + extru t0,31,2,t1 207 + cmpib,<>,n 0,t1,.Lunaligned_copy 208 + 209 + #ifdef CONFIG_64BIT 210 + /* only do 64-bit copies if we can get aligned. */ 211 + extru t0,31,3,t1 212 + cmpib,<>,n 0,t1,.Lalign_loop32 213 + 214 + /* loop until we are 64-bit aligned */ 215 + .Lalign_loop64: 216 + extru dst,31,3,t1 217 + cmpib,=,n 0,t1,.Lcopy_loop_16 218 + 20: ldb,ma 1(srcspc,src),t1 219 + 21: stb,ma t1,1(dstspc,dst) 220 + b .Lalign_loop64 221 + ldo -1(len),len 222 + 223 + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 224 + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 225 + 226 + ldi 31,t0 227 + .Lcopy_loop_16: 228 + cmpb,COND(>>=),n t0,len,.Lword_loop 229 + 230 + 10: ldd 0(srcspc,src),t1 231 + 11: ldd 8(srcspc,src),t2 232 + ldo 16(src),src 233 + 12: std,ma t1,8(dstspc,dst) 234 + 13: std,ma t2,8(dstspc,dst) 235 + 14: ldd 0(srcspc,src),t1 236 + 15: ldd 8(srcspc,src),t2 237 + ldo 16(src),src 238 + 16: std,ma t1,8(dstspc,dst) 239 + 17: std,ma t2,8(dstspc,dst) 240 + 241 + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 242 + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault) 243 + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) 244 + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) 245 + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) 246 + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault) 247 + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) 248 + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) 249 + 250 + b .Lcopy_loop_16 251 + ldo -32(len),len 252 + 253 + .Lword_loop: 254 + cmpib,COND(>>=),n 3,len,.Lbyte_loop 255 + 20: ldw,ma 4(srcspc,src),t1 256 + 21: stw,ma t1,4(dstspc,dst) 257 + b .Lword_loop 258 + ldo -4(len),len 259 + 260 + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 261 + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 262 + 263 + #endif /* CONFIG_64BIT */ 264 + 265 + /* loop until we are 32-bit aligned */ 266 + .Lalign_loop32: 267 + extru dst,31,2,t1 268 + cmpib,=,n 0,t1,.Lcopy_loop_4 269 + 20: ldb,ma 1(srcspc,src),t1 270 + 21: stb,ma t1,1(dstspc,dst) 271 + b .Lalign_loop32 272 + ldo -1(len),len 273 + 274 + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 275 + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 276 + 277 + 278 + .Lcopy_loop_4: 279 + cmpib,COND(>>=),n 15,len,.Lbyte_loop 280 + 281 + 10: ldw 0(srcspc,src),t1 282 + 11: ldw 4(srcspc,src),t2 283 + 12: stw,ma t1,4(dstspc,dst) 284 + 13: stw,ma t2,4(dstspc,dst) 285 + 14: ldw 8(srcspc,src),t1 286 + 15: ldw 12(srcspc,src),t2 287 + ldo 16(src),src 288 + 16: stw,ma t1,4(dstspc,dst) 289 + 17: stw,ma t2,4(dstspc,dst) 290 + 291 + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 292 + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault) 293 + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) 294 + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) 295 + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) 296 + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault) 297 + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) 298 + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) 299 + 300 + b .Lcopy_loop_4 301 + ldo -16(len),len 302 + 303 + .Lbyte_loop: 304 + cmpclr,COND(<>) len,%r0,%r0 305 + b,n .Lcopy_done 306 + 20: ldb 0(srcspc,src),t1 307 + ldo 1(src),src 308 + 21: stb,ma t1,1(dstspc,dst) 309 + b .Lbyte_loop 310 + ldo -1(len),len 311 + 312 + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 313 + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 314 + 315 + .Lcopy_done: 316 + bv %r0(%r2) 317 + sub end,dst,ret0 318 + 319 + 320 + /* src and dst are not aligned the same way. */ 321 + /* need to go the hard way */ 322 + .Lunaligned_copy: 323 + /* align until dst is 32bit-word-aligned */ 324 + extru dst,31,2,t1 325 + cmpib,COND(=),n 0,t1,.Lcopy_dstaligned 326 + 20: ldb 0(srcspc,src),t1 327 + ldo 1(src),src 328 + 21: stb,ma t1,1(dstspc,dst) 329 + b .Lunaligned_copy 330 + ldo -1(len),len 331 + 332 + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 333 + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 334 + 335 + .Lcopy_dstaligned: 336 + 337 + /* store src, dst and len in safe place */ 338 + copy src,save_src 339 + copy dst,save_dst 340 + copy len,save_len 341 + 342 + /* len now needs give number of words to copy */ 343 + SHRREG len,2,len 344 + 345 + /* 346 + * Copy from a not-aligned src to an aligned dst using shifts. 347 + * Handles 4 words per loop. 348 + */ 349 + 350 + depw,z src,28,2,t0 351 + subi 32,t0,t0 352 + mtsar t0 353 + extru len,31,2,t0 354 + cmpib,= 2,t0,.Lcase2 355 + /* Make src aligned by rounding it down. */ 356 + depi 0,31,2,src 357 + 358 + cmpiclr,<> 3,t0,%r0 359 + b,n .Lcase3 360 + cmpiclr,<> 1,t0,%r0 361 + b,n .Lcase1 362 + .Lcase0: 363 + cmpb,= %r0,len,.Lcda_finish 364 + nop 365 + 366 + 1: ldw,ma 4(srcspc,src), a3 367 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 368 + 1: ldw,ma 4(srcspc,src), a0 369 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 370 + b,n .Ldo3 371 + .Lcase1: 372 + 1: ldw,ma 4(srcspc,src), a2 373 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 374 + 1: ldw,ma 4(srcspc,src), a3 375 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 376 + ldo -1(len),len 377 + cmpb,=,n %r0,len,.Ldo0 378 + .Ldo4: 379 + 1: ldw,ma 4(srcspc,src), a0 380 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 381 + shrpw a2, a3, %sar, t0 382 + 1: stw,ma t0, 4(dstspc,dst) 383 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 384 + .Ldo3: 385 + 1: ldw,ma 4(srcspc,src), a1 386 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 387 + shrpw a3, a0, %sar, t0 388 + 1: stw,ma t0, 4(dstspc,dst) 389 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 390 + .Ldo2: 391 + 1: ldw,ma 4(srcspc,src), a2 392 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 393 + shrpw a0, a1, %sar, t0 394 + 1: stw,ma t0, 4(dstspc,dst) 395 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 396 + .Ldo1: 397 + 1: ldw,ma 4(srcspc,src), a3 398 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 399 + shrpw a1, a2, %sar, t0 400 + 1: stw,ma t0, 4(dstspc,dst) 401 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 402 + ldo -4(len),len 403 + cmpb,<> %r0,len,.Ldo4 404 + nop 405 + .Ldo0: 406 + shrpw a2, a3, %sar, t0 407 + 1: stw,ma t0, 4(dstspc,dst) 408 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 409 + 410 + .Lcda_rdfault: 411 + .Lcda_finish: 412 + /* calculate new src, dst and len and jump to byte-copy loop */ 413 + sub dst,save_dst,t0 414 + add save_src,t0,src 415 + b .Lbyte_loop 416 + sub save_len,t0,len 417 + 418 + .Lcase3: 419 + 1: ldw,ma 4(srcspc,src), a0 420 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 421 + 1: ldw,ma 4(srcspc,src), a1 422 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 423 + b .Ldo2 424 + ldo 1(len),len 425 + .Lcase2: 426 + 1: ldw,ma 4(srcspc,src), a1 427 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 428 + 1: ldw,ma 4(srcspc,src), a2 429 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 430 + b .Ldo1 431 + ldo 2(len),len 432 + 433 + 434 + /* fault exception fixup handlers: */ 435 + #ifdef CONFIG_64BIT 436 + .Lcopy16_fault: 437 + 10: b .Lcopy_done 438 + std,ma t1,8(dstspc,dst) 439 + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 440 + #endif 441 + 442 + .Lcopy8_fault: 443 + 10: b .Lcopy_done 444 + stw,ma t1,4(dstspc,dst) 445 + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 446 + 447 + .exit 448 + ENDPROC_CFI(pa_memcpy) 135 449 .procend 136 450 137 451 .end
+3 -458
arch/parisc/lib/memcpy.c
··· 2 2 * Optimized memory copy routines. 3 3 * 4 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 5 - * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 + * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de> 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License as published by ··· 21 21 * Portions derived from the GNU C Library 22 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 23 23 * 24 - * Several strategies are tried to try to get the best performance for various 25 - * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using 26 - * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using 27 - * general registers. Unaligned copies are handled either by aligning the 28 - * destination and then using shift-and-write method, or in a few cases by 29 - * falling back to a byte-at-a-time copy. 30 - * 31 - * I chose to implement this in C because it is easier to maintain and debug, 32 - * and in my experiments it appears that the C code generated by gcc (3.3/3.4 33 - * at the time of writing) is fairly optimal. Unfortunately some of the 34 - * semantics of the copy routine (exception handling) is difficult to express 35 - * in C, so we have to play some tricks to get it to work. 36 - * 37 - * All the loads and stores are done via explicit asm() code in order to use 38 - * the right space registers. 39 - * 40 - * Testing with various alignments and buffer sizes shows that this code is 41 - * often >10x faster than a simple byte-at-a-time copy, even for strangely 42 - * aligned operands. It is interesting to note that the glibc version 43 - * of memcpy (written in C) is actually quite fast already. This routine is 44 - * able to beat it by 30-40% for aligned copies because of the loop unrolling, 45 - * but in some cases the glibc version is still slightly faster. This lends 46 - * more credibility that gcc can generate very good code as long as we are 47 - * careful. 48 - * 49 - * TODO: 50 - * - cache prefetching needs more experimentation to get optimal settings 51 - * - try not to use the post-increment address modifiers; they create additional 52 - * interlocks 53 - * - replace byte-copy loops with stybs sequences 54 24 */ 55 25 56 - #ifdef __KERNEL__ 57 26 #include <linux/module.h> 58 27 #include <linux/compiler.h> 59 28 #include <linux/uaccess.h> 60 - #define s_space "%%sr1" 61 - #define d_space "%%sr2" 62 - #else 63 - #include "memcpy.h" 64 - #define s_space "%%sr0" 65 - #define d_space "%%sr0" 66 - #define pa_memcpy new2_copy 67 - #endif 68 29 69 30 DECLARE_PER_CPU(struct exception_data, exception_data); 70 - 71 - #define preserve_branch(label) do { \ 72 - volatile int dummy = 0; \ 73 - /* The following branch is never taken, it's just here to */ \ 74 - /* prevent gcc from optimizing away our exception code. */ \ 75 - if (unlikely(dummy != dummy)) \ 76 - goto label; \ 77 - } while (0) 78 31 79 32 #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 80 33 #define get_kernel_space() (0) 81 34 82 - #define MERGE(w0, sh_1, w1, sh_2) ({ \ 83 - unsigned int _r; \ 84 - asm volatile ( \ 85 - "mtsar %3\n" \ 86 - "shrpw %1, %2, %%sar, %0\n" \ 87 - : "=r"(_r) \ 88 - : "r"(w0), "r"(w1), "r"(sh_2) \ 89 - ); \ 90 - _r; \ 91 - }) 92 - #define THRESHOLD 16 93 - 94 - #ifdef DEBUG_MEMCPY 95 - #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) 96 - #else 97 - #define DPRINTF(fmt, args...) 98 - #endif 99 - 100 - #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ 101 - __asm__ __volatile__ ( \ 102 - "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ 103 - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 104 - : _tt(_t), "+r"(_a) \ 105 - : \ 106 - : "r8") 107 - 108 - #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ 109 - __asm__ __volatile__ ( \ 110 - "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ 111 - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 112 - : "+r"(_a) \ 113 - : _tt(_t) \ 114 - : "r8") 115 - 116 - #define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e) 117 - #define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e) 118 - #define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e) 119 - #define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e) 120 - #define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e) 121 - #define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e) 122 - 123 - #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ 124 - __asm__ __volatile__ ( \ 125 - "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ 126 - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 127 - : _tt(_t) \ 128 - : "r"(_a) \ 129 - : "r8") 130 - 131 - #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ 132 - __asm__ __volatile__ ( \ 133 - "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ 134 - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 135 - : \ 136 - : _tt(_t), "r"(_a) \ 137 - : "r8") 138 - 139 - #define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e) 140 - #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) 141 - 142 - #ifdef CONFIG_PREFETCH 143 - static inline void prefetch_src(const void *addr) 144 - { 145 - __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); 146 - } 147 - 148 - static inline void prefetch_dst(const void *addr) 149 - { 150 - __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); 151 - } 152 - #else 153 - #define prefetch_src(addr) do { } while(0) 154 - #define prefetch_dst(addr) do { } while(0) 155 - #endif 156 - 157 - #define PA_MEMCPY_OK 0 158 - #define PA_MEMCPY_LOAD_ERROR 1 159 - #define PA_MEMCPY_STORE_ERROR 2 160 - 161 - /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words 162 - * per loop. This code is derived from glibc. 163 - */ 164 - static noinline unsigned long copy_dstaligned(unsigned long dst, 165 - unsigned long src, unsigned long len) 166 - { 167 - /* gcc complains that a2 and a3 may be uninitialized, but actually 168 - * they cannot be. Initialize a2/a3 to shut gcc up. 169 - */ 170 - register unsigned int a0, a1, a2 = 0, a3 = 0; 171 - int sh_1, sh_2; 172 - 173 - /* prefetch_src((const void *)src); */ 174 - 175 - /* Calculate how to shift a word read at the memory operation 176 - aligned srcp to make it aligned for copy. */ 177 - sh_1 = 8 * (src % sizeof(unsigned int)); 178 - sh_2 = 8 * sizeof(unsigned int) - sh_1; 179 - 180 - /* Make src aligned by rounding it down. */ 181 - src &= -sizeof(unsigned int); 182 - 183 - switch (len % 4) 184 - { 185 - case 2: 186 - /* a1 = ((unsigned int *) src)[0]; 187 - a2 = ((unsigned int *) src)[1]; */ 188 - ldw(s_space, 0, src, a1, cda_ldw_exc); 189 - ldw(s_space, 4, src, a2, cda_ldw_exc); 190 - src -= 1 * sizeof(unsigned int); 191 - dst -= 3 * sizeof(unsigned int); 192 - len += 2; 193 - goto do1; 194 - case 3: 195 - /* a0 = ((unsigned int *) src)[0]; 196 - a1 = ((unsigned int *) src)[1]; */ 197 - ldw(s_space, 0, src, a0, cda_ldw_exc); 198 - ldw(s_space, 4, src, a1, cda_ldw_exc); 199 - src -= 0 * sizeof(unsigned int); 200 - dst -= 2 * sizeof(unsigned int); 201 - len += 1; 202 - goto do2; 203 - case 0: 204 - if (len == 0) 205 - return PA_MEMCPY_OK; 206 - /* a3 = ((unsigned int *) src)[0]; 207 - a0 = ((unsigned int *) src)[1]; */ 208 - ldw(s_space, 0, src, a3, cda_ldw_exc); 209 - ldw(s_space, 4, src, a0, cda_ldw_exc); 210 - src -=-1 * sizeof(unsigned int); 211 - dst -= 1 * sizeof(unsigned int); 212 - len += 0; 213 - goto do3; 214 - case 1: 215 - /* a2 = ((unsigned int *) src)[0]; 216 - a3 = ((unsigned int *) src)[1]; */ 217 - ldw(s_space, 0, src, a2, cda_ldw_exc); 218 - ldw(s_space, 4, src, a3, cda_ldw_exc); 219 - src -=-2 * sizeof(unsigned int); 220 - dst -= 0 * sizeof(unsigned int); 221 - len -= 1; 222 - if (len == 0) 223 - goto do0; 224 - goto do4; /* No-op. */ 225 - } 226 - 227 - do 228 - { 229 - /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */ 230 - do4: 231 - /* a0 = ((unsigned int *) src)[0]; */ 232 - ldw(s_space, 0, src, a0, cda_ldw_exc); 233 - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ 234 - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); 235 - do3: 236 - /* a1 = ((unsigned int *) src)[1]; */ 237 - ldw(s_space, 4, src, a1, cda_ldw_exc); 238 - /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */ 239 - stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc); 240 - do2: 241 - /* a2 = ((unsigned int *) src)[2]; */ 242 - ldw(s_space, 8, src, a2, cda_ldw_exc); 243 - /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */ 244 - stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc); 245 - do1: 246 - /* a3 = ((unsigned int *) src)[3]; */ 247 - ldw(s_space, 12, src, a3, cda_ldw_exc); 248 - /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */ 249 - stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc); 250 - 251 - src += 4 * sizeof(unsigned int); 252 - dst += 4 * sizeof(unsigned int); 253 - len -= 4; 254 - } 255 - while (len != 0); 256 - 257 - do0: 258 - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ 259 - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); 260 - 261 - preserve_branch(handle_load_error); 262 - preserve_branch(handle_store_error); 263 - 264 - return PA_MEMCPY_OK; 265 - 266 - handle_load_error: 267 - __asm__ __volatile__ ("cda_ldw_exc:\n"); 268 - return PA_MEMCPY_LOAD_ERROR; 269 - 270 - handle_store_error: 271 - __asm__ __volatile__ ("cda_stw_exc:\n"); 272 - return PA_MEMCPY_STORE_ERROR; 273 - } 274 - 275 - 276 - /* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. 277 - * In case of an access fault the faulty address can be read from the per_cpu 278 - * exception data struct. */ 279 - static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, 280 - unsigned long len) 281 - { 282 - register unsigned long src, dst, t1, t2, t3; 283 - register unsigned char *pcs, *pcd; 284 - register unsigned int *pws, *pwd; 285 - register double *pds, *pdd; 286 - unsigned long ret; 287 - 288 - src = (unsigned long)srcp; 289 - dst = (unsigned long)dstp; 290 - pcs = (unsigned char *)srcp; 291 - pcd = (unsigned char *)dstp; 292 - 293 - /* prefetch_src((const void *)srcp); */ 294 - 295 - if (len < THRESHOLD) 296 - goto byte_copy; 297 - 298 - /* Check alignment */ 299 - t1 = (src ^ dst); 300 - if (unlikely(t1 & (sizeof(double)-1))) 301 - goto unaligned_copy; 302 - 303 - /* src and dst have same alignment. */ 304 - 305 - /* Copy bytes till we are double-aligned. */ 306 - t2 = src & (sizeof(double) - 1); 307 - if (unlikely(t2 != 0)) { 308 - t2 = sizeof(double) - t2; 309 - while (t2 && len) { 310 - /* *pcd++ = *pcs++; */ 311 - ldbma(s_space, pcs, t3, pmc_load_exc); 312 - len--; 313 - stbma(d_space, t3, pcd, pmc_store_exc); 314 - t2--; 315 - } 316 - } 317 - 318 - pds = (double *)pcs; 319 - pdd = (double *)pcd; 320 - 321 - #if 0 322 - /* Copy 8 doubles at a time */ 323 - while (len >= 8*sizeof(double)) { 324 - register double r1, r2, r3, r4, r5, r6, r7, r8; 325 - /* prefetch_src((char *)pds + L1_CACHE_BYTES); */ 326 - flddma(s_space, pds, r1, pmc_load_exc); 327 - flddma(s_space, pds, r2, pmc_load_exc); 328 - flddma(s_space, pds, r3, pmc_load_exc); 329 - flddma(s_space, pds, r4, pmc_load_exc); 330 - fstdma(d_space, r1, pdd, pmc_store_exc); 331 - fstdma(d_space, r2, pdd, pmc_store_exc); 332 - fstdma(d_space, r3, pdd, pmc_store_exc); 333 - fstdma(d_space, r4, pdd, pmc_store_exc); 334 - 335 - #if 0 336 - if (L1_CACHE_BYTES <= 32) 337 - prefetch_src((char *)pds + L1_CACHE_BYTES); 338 - #endif 339 - flddma(s_space, pds, r5, pmc_load_exc); 340 - flddma(s_space, pds, r6, pmc_load_exc); 341 - flddma(s_space, pds, r7, pmc_load_exc); 342 - flddma(s_space, pds, r8, pmc_load_exc); 343 - fstdma(d_space, r5, pdd, pmc_store_exc); 344 - fstdma(d_space, r6, pdd, pmc_store_exc); 345 - fstdma(d_space, r7, pdd, pmc_store_exc); 346 - fstdma(d_space, r8, pdd, pmc_store_exc); 347 - len -= 8*sizeof(double); 348 - } 349 - #endif 350 - 351 - pws = (unsigned int *)pds; 352 - pwd = (unsigned int *)pdd; 353 - 354 - word_copy: 355 - while (len >= 8*sizeof(unsigned int)) { 356 - register unsigned int r1,r2,r3,r4,r5,r6,r7,r8; 357 - /* prefetch_src((char *)pws + L1_CACHE_BYTES); */ 358 - ldwma(s_space, pws, r1, pmc_load_exc); 359 - ldwma(s_space, pws, r2, pmc_load_exc); 360 - ldwma(s_space, pws, r3, pmc_load_exc); 361 - ldwma(s_space, pws, r4, pmc_load_exc); 362 - stwma(d_space, r1, pwd, pmc_store_exc); 363 - stwma(d_space, r2, pwd, pmc_store_exc); 364 - stwma(d_space, r3, pwd, pmc_store_exc); 365 - stwma(d_space, r4, pwd, pmc_store_exc); 366 - 367 - ldwma(s_space, pws, r5, pmc_load_exc); 368 - ldwma(s_space, pws, r6, pmc_load_exc); 369 - ldwma(s_space, pws, r7, pmc_load_exc); 370 - ldwma(s_space, pws, r8, pmc_load_exc); 371 - stwma(d_space, r5, pwd, pmc_store_exc); 372 - stwma(d_space, r6, pwd, pmc_store_exc); 373 - stwma(d_space, r7, pwd, pmc_store_exc); 374 - stwma(d_space, r8, pwd, pmc_store_exc); 375 - len -= 8*sizeof(unsigned int); 376 - } 377 - 378 - while (len >= 4*sizeof(unsigned int)) { 379 - register unsigned int r1,r2,r3,r4; 380 - ldwma(s_space, pws, r1, pmc_load_exc); 381 - ldwma(s_space, pws, r2, pmc_load_exc); 382 - ldwma(s_space, pws, r3, pmc_load_exc); 383 - ldwma(s_space, pws, r4, pmc_load_exc); 384 - stwma(d_space, r1, pwd, pmc_store_exc); 385 - stwma(d_space, r2, pwd, pmc_store_exc); 386 - stwma(d_space, r3, pwd, pmc_store_exc); 387 - stwma(d_space, r4, pwd, pmc_store_exc); 388 - len -= 4*sizeof(unsigned int); 389 - } 390 - 391 - pcs = (unsigned char *)pws; 392 - pcd = (unsigned char *)pwd; 393 - 394 - byte_copy: 395 - while (len) { 396 - /* *pcd++ = *pcs++; */ 397 - ldbma(s_space, pcs, t3, pmc_load_exc); 398 - stbma(d_space, t3, pcd, pmc_store_exc); 399 - len--; 400 - } 401 - 402 - return PA_MEMCPY_OK; 403 - 404 - unaligned_copy: 405 - /* possibly we are aligned on a word, but not on a double... */ 406 - if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { 407 - t2 = src & (sizeof(unsigned int) - 1); 408 - 409 - if (unlikely(t2 != 0)) { 410 - t2 = sizeof(unsigned int) - t2; 411 - while (t2) { 412 - /* *pcd++ = *pcs++; */ 413 - ldbma(s_space, pcs, t3, pmc_load_exc); 414 - stbma(d_space, t3, pcd, pmc_store_exc); 415 - len--; 416 - t2--; 417 - } 418 - } 419 - 420 - pws = (unsigned int *)pcs; 421 - pwd = (unsigned int *)pcd; 422 - goto word_copy; 423 - } 424 - 425 - /* Align the destination. */ 426 - if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) { 427 - t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1)); 428 - while (t2) { 429 - /* *pcd++ = *pcs++; */ 430 - ldbma(s_space, pcs, t3, pmc_load_exc); 431 - stbma(d_space, t3, pcd, pmc_store_exc); 432 - len--; 433 - t2--; 434 - } 435 - dst = (unsigned long)pcd; 436 - src = (unsigned long)pcs; 437 - } 438 - 439 - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); 440 - if (ret) 441 - return ret; 442 - 443 - pcs += (len & -sizeof(unsigned int)); 444 - pcd += (len & -sizeof(unsigned int)); 445 - len %= sizeof(unsigned int); 446 - 447 - preserve_branch(handle_load_error); 448 - preserve_branch(handle_store_error); 449 - 450 - goto byte_copy; 451 - 452 - handle_load_error: 453 - __asm__ __volatile__ ("pmc_load_exc:\n"); 454 - return PA_MEMCPY_LOAD_ERROR; 455 - 456 - handle_store_error: 457 - __asm__ __volatile__ ("pmc_store_exc:\n"); 458 - return PA_MEMCPY_STORE_ERROR; 459 - } 460 - 461 - 462 35 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 463 - static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 464 - { 465 - unsigned long ret, fault_addr, reference; 466 - struct exception_data *d; 36 + extern unsigned long pa_memcpy(void *dst, const void *src, 37 + unsigned long len); 467 38 468 - ret = pa_memcpy_internal(dstp, srcp, len); 469 - if (likely(ret == PA_MEMCPY_OK)) 470 - return 0; 471 - 472 - /* if a load or store fault occured we can get the faulty addr */ 473 - d = this_cpu_ptr(&exception_data); 474 - fault_addr = d->fault_addr; 475 - 476 - /* error in load or store? */ 477 - if (ret == PA_MEMCPY_LOAD_ERROR) 478 - reference = (unsigned long) srcp; 479 - else 480 - reference = (unsigned long) dstp; 481 - 482 - DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", 483 - ret, len, fault_addr, reference); 484 - 485 - if (fault_addr >= reference) 486 - return len - (fault_addr - reference); 487 - else 488 - return len; 489 - } 490 - 491 - #ifdef __KERNEL__ 492 39 unsigned long __copy_to_user(void __user *dst, const void *src, 493 40 unsigned long len) 494 41 { ··· 84 537 85 538 return __probe_kernel_read(dst, src, size); 86 539 } 87 - 88 - #endif
+17
arch/parisc/mm/fault.c
··· 150 150 d->fault_space = regs->isr; 151 151 d->fault_addr = regs->ior; 152 152 153 + /* 154 + * Fix up get_user() and put_user(). 155 + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant 156 + * bit in the relative address of the fixup routine to indicate 157 + * that %r8 should be loaded with -EFAULT to report a userspace 158 + * access error. 159 + */ 160 + if (fix->fixup & 1) { 161 + regs->gr[8] = -EFAULT; 162 + 163 + /* zero target register for get_user() */ 164 + if (parisc_acctyp(0, regs->iir) == VM_READ) { 165 + int treg = regs->iir & 0x1f; 166 + regs->gr[treg] = 0; 167 + } 168 + } 169 + 153 170 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; 154 171 regs->iaoq[0] &= ~3; 155 172 /*
-1
arch/s390/include/asm/sections.h
··· 4 4 #include <asm-generic/sections.h> 5 5 6 6 extern char _eshared[], _ehead[]; 7 - extern char __start_ro_after_init[], __end_ro_after_init[]; 8 7 9 8 #endif
-2
arch/s390/kernel/vmlinux.lds.S
··· 63 63 64 64 . = ALIGN(PAGE_SIZE); 65 65 __start_ro_after_init = .; 66 - __start_data_ro_after_init = .; 67 66 .data..ro_after_init : { 68 67 *(.data..ro_after_init) 69 68 } 70 - __end_data_ro_after_init = .; 71 69 EXCEPTION_TABLE(16) 72 70 . = ALIGN(PAGE_SIZE); 73 71 __end_ro_after_init = .;
+1 -1
arch/sparc/kernel/ptrace_64.c
··· 351 351 } 352 352 353 353 if (!ret) { 354 - unsigned long y; 354 + unsigned long y = regs->y; 355 355 356 356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 357 357 &y,
+31 -4
arch/x86/Makefile
··· 120 120 # -funit-at-a-time shrinks the kernel .text considerably 121 121 # unfortunately it makes reading oopses harder. 122 122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 123 - 124 - # this works around some issues with generating unwind tables in older gccs 125 - # newer gccs do it by default 126 - KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args) 127 123 endif 128 124 129 125 ifdef CONFIG_X86_X32 ··· 141 145 # Don't unroll struct assignments with kmemcheck enabled 142 146 ifeq ($(CONFIG_KMEMCHECK),y) 143 147 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 148 + endif 149 + 150 + # 151 + # If the function graph tracer is used with mcount instead of fentry, 152 + # '-maccumulate-outgoing-args' is needed to prevent a GCC bug 153 + # (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109) 154 + # 155 + ifdef CONFIG_FUNCTION_GRAPH_TRACER 156 + ifndef CONFIG_HAVE_FENTRY 157 + ACCUMULATE_OUTGOING_ARGS := 1 158 + else 159 + ifeq ($(call cc-option-yn, -mfentry), n) 160 + ACCUMULATE_OUTGOING_ARGS := 1 161 + endif 162 + endif 163 + endif 164 + 165 + # 166 + # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a 167 + # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way 168 + # to test for this bug at compile-time because the test case needs to execute, 169 + # which is a no-go for cross compilers. So check the GCC version instead. 170 + # 171 + ifdef CONFIG_JUMP_LABEL 172 + ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) 173 + ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) 174 + endif 175 + endif 176 + 177 + ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) 178 + KBUILD_CFLAGS += -maccumulate-outgoing-args 144 179 endif 145 180 146 181 # Stackpointer is addressed different for 32 bit and 64 bit x86
-18
arch/x86/Makefile_32.cpu
··· 45 45 # cpu entries 46 46 cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) 47 47 48 - # Work around the pentium-mmx code generator madness of gcc4.4.x which 49 - # does stack alignment by generating horrible code _before_ the mcount 50 - # prologue (push %ebp, mov %esp, %ebp) which breaks the function graph 51 - # tracer assumptions. For i686, generic, core2 this is set by the 52 - # compiler anyway 53 - ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y) 54 - ADD_ACCUMULATE_OUTGOING_ARGS := y 55 - endif 56 - 57 - # Work around to a bug with asm goto with first implementations of it 58 - # in gcc causing gcc to mess up the push and pop of the stack in some 59 - # uses of asm goto. 60 - ifeq ($(CONFIG_JUMP_LABEL), y) 61 - ADD_ACCUMULATE_OUTGOING_ARGS := y 62 - endif 63 - 64 - cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args) 65 - 66 48 # Bug fix for binutils: this option is required in order to keep 67 49 # binutils from generating NOPL instructions against our will. 68 50 ifneq ($(CONFIG_X86_P6_NOP),y)
+1
arch/x86/boot/compressed/error.c
··· 4 4 * memcpy() and memmove() are defined for the compressed boot environment. 5 5 */ 6 6 #include "misc.h" 7 + #include "error.h" 7 8 8 9 void warn(char *m) 9 10 {
+6 -3
arch/x86/events/core.c
··· 2256 2256 struct perf_event_mmap_page *userpg, u64 now) 2257 2257 { 2258 2258 struct cyc2ns_data *data; 2259 + u64 offset; 2259 2260 2260 2261 userpg->cap_user_time = 0; 2261 2262 userpg->cap_user_time_zero = 0; ··· 2264 2263 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2265 2264 userpg->pmc_width = x86_pmu.cntval_bits; 2266 2265 2267 - if (!sched_clock_stable()) 2266 + if (!using_native_sched_clock() || !sched_clock_stable()) 2268 2267 return; 2269 2268 2270 2269 data = cyc2ns_read_begin(); 2270 + 2271 + offset = data->cyc2ns_offset + __sched_clock_offset; 2271 2272 2272 2273 /* 2273 2274 * Internal timekeeping for enabled/running/stopped times ··· 2278 2275 userpg->cap_user_time = 1; 2279 2276 userpg->time_mult = data->cyc2ns_mul; 2280 2277 userpg->time_shift = data->cyc2ns_shift; 2281 - userpg->time_offset = data->cyc2ns_offset - now; 2278 + userpg->time_offset = offset - now; 2282 2279 2283 2280 /* 2284 2281 * cap_user_time_zero doesn't make sense when we're using a different ··· 2286 2283 */ 2287 2284 if (!event->attr.use_clockid) { 2288 2285 userpg->cap_user_time_zero = 1; 2289 - userpg->time_zero = data->cyc2ns_offset; 2286 + userpg->time_zero = offset; 2290 2287 } 2291 2288 2292 2289 cyc2ns_read_end(data);
+1
arch/x86/include/asm/kvm_page_track.h
··· 46 46 }; 47 47 48 48 void kvm_page_track_init(struct kvm *kvm); 49 + void kvm_page_track_cleanup(struct kvm *kvm); 49 50 50 51 void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 51 52 struct kvm_memory_slot *dont);
+2
arch/x86/include/asm/timer.h
··· 12 12 13 13 extern int no_timer_check; 14 14 15 + extern bool using_native_sched_clock(void); 16 + 15 17 /* 16 18 * We use the full linear equation: f(x) = a + b*x, in order to allow 17 19 * a continuous function in the face of dynamic freq changes.
+5 -3
arch/x86/include/asm/uv/uv_hub.h
··· 485 485 486 486 if (paddr < uv_hub_info->lowmem_remap_top) 487 487 paddr |= uv_hub_info->lowmem_remap_base; 488 - paddr |= uv_hub_info->gnode_upper; 489 - if (m_val) 488 + 489 + if (m_val) { 490 + paddr |= uv_hub_info->gnode_upper; 490 491 paddr = ((paddr << uv_hub_info->m_shift) 491 492 >> uv_hub_info->m_shift) | 492 493 ((paddr >> uv_hub_info->m_val) 493 494 << uv_hub_info->n_lshift); 494 - else 495 + } else { 495 496 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 497 << uv_hub_info->gpa_shift; 498 + } 497 499 return paddr; 498 500 } 499 501
+2 -1
arch/x86/kernel/apic/x2apic_uv_x.c
··· 1105 1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1106 1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); 1107 1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; 1108 - hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; 1108 + if (mn.m_val) 1109 + hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val; 1109 1110 1110 1111 if (uv_gp_table) { 1111 1112 hi->global_mmr_base = uv_gp_table->mmr_base;
+1 -1
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 60 60 "load_store", 61 61 "insn_fetch", 62 62 "combined_unit", 63 - "", 63 + "decode_unit", 64 64 "northbridge", 65 65 "execution_unit", 66 66 };
+6
arch/x86/kernel/ftrace.c
··· 29 29 #include <asm/ftrace.h> 30 30 #include <asm/nops.h> 31 31 32 + #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \ 33 + !defined(CC_USING_FENTRY) && \ 34 + !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE) 35 + # error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE 36 + #endif 37 + 32 38 #ifdef CONFIG_DYNAMIC_FTRACE 33 39 34 40 int ftrace_arch_code_modify_prepare(void)
+2 -2
arch/x86/kernel/tsc.c
··· 328 328 return paravirt_sched_clock(); 329 329 } 330 330 331 - static inline bool using_native_sched_clock(void) 331 + bool using_native_sched_clock(void) 332 332 { 333 333 return pv_time_ops.sched_clock == native_sched_clock; 334 334 } ··· 336 336 unsigned long long 337 337 sched_clock(void) __attribute__((alias("native_sched_clock"))); 338 338 339 - static inline bool using_native_sched_clock(void) { return true; } 339 + bool using_native_sched_clock(void) { return true; } 340 340 #endif 341 341 342 342 int check_tsc_unstable(void)
+3
arch/x86/kvm/i8259.c
··· 657 657 { 658 658 struct kvm_pic *vpic = kvm->arch.vpic; 659 659 660 + if (!vpic) 661 + return; 662 + 660 663 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 661 664 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 662 665 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+3
arch/x86/kvm/ioapic.c
··· 635 635 { 636 636 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 637 637 638 + if (!ioapic) 639 + return; 640 + 638 641 cancel_delayed_work_sync(&ioapic->eoi_inject); 639 642 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 640 643 kvm->arch.vioapic = NULL;
+8
arch/x86/kvm/page_track.c
··· 160 160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 161 161 } 162 162 163 + void kvm_page_track_cleanup(struct kvm *kvm) 164 + { 165 + struct kvm_page_track_notifier_head *head; 166 + 167 + head = &kvm->arch.track_notifier_head; 168 + cleanup_srcu_struct(&head->track_srcu); 169 + } 170 + 163 171 void kvm_page_track_init(struct kvm *kvm) 164 172 { 165 173 struct kvm_page_track_notifier_head *head;
+3
arch/x86/kvm/svm.c
··· 1379 1379 unsigned long flags; 1380 1380 struct kvm_arch *vm_data = &kvm->arch; 1381 1381 1382 + if (!avic) 1383 + return; 1384 + 1382 1385 avic_free_vm_id(vm_data->avic_vm_id); 1383 1386 1384 1387 if (vm_data->avic_logical_id_table_page)
+32 -12
arch/x86/kvm/vmx.c
··· 1239 1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1240 1240 } 1241 1241 1242 + static inline bool cpu_has_vmx_invvpid(void) 1243 + { 1244 + return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; 1245 + } 1246 + 1242 1247 static inline bool cpu_has_vmx_ept(void) 1243 1248 { 1244 1249 return vmcs_config.cpu_based_2nd_exec_ctrl & ··· 2758 2753 SECONDARY_EXEC_RDTSCP | 2759 2754 SECONDARY_EXEC_DESC | 2760 2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2761 - SECONDARY_EXEC_ENABLE_VPID | 2762 2756 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2763 2757 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2764 2758 SECONDARY_EXEC_WBINVD_EXITING | ··· 2785 2781 * though it is treated as global context. The alternative is 2786 2782 * not failing the single-context invvpid, and it is worse. 2787 2783 */ 2788 - if (enable_vpid) 2784 + if (enable_vpid) { 2785 + vmx->nested.nested_vmx_secondary_ctls_high |= 2786 + SECONDARY_EXEC_ENABLE_VPID; 2789 2787 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2790 2788 VMX_VPID_EXTENT_SUPPORTED_MASK; 2791 - else 2789 + } else 2792 2790 vmx->nested.nested_vmx_vpid_caps = 0; 2793 2791 2794 2792 if (enable_unrestricted_guest) ··· 4028 4022 static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 4029 4023 { 4030 4024 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 4025 + } 4026 + 4027 + static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) 4028 + { 4029 + if (enable_ept) 4030 + vmx_flush_tlb(vcpu); 4031 4031 } 4032 4032 4033 4033 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) ··· 6529 6517 if (boot_cpu_has(X86_FEATURE_NX)) 6530 6518 kvm_enable_efer_bits(EFER_NX); 6531 6519 6532 - if (!cpu_has_vmx_vpid()) 6520 + if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 6521 + !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 6533 6522 enable_vpid = 0; 6523 + 6534 6524 if (!cpu_has_vmx_shadow_vmcs()) 6535 6525 enable_shadow_vmcs = 0; 6536 6526 if (enable_shadow_vmcs) ··· 8515 8501 && kvm_vmx_exit_handlers[exit_reason]) 8516 8502 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8517 8503 else { 8518 - WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8504 + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 8505 + exit_reason); 8519 8506 kvm_queue_exception(vcpu, UD_VECTOR); 8520 8507 return 1; 8521 8508 } ··· 8562 8547 } else { 8563 8548 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8564 8549 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8550 + vmx_flush_tlb_ept_only(vcpu); 8565 8551 } 8566 8552 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8567 8553 ··· 8588 8572 */ 8589 8573 if (!is_guest_mode(vcpu) || 8590 8574 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8591 - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8575 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 8592 8576 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8577 + vmx_flush_tlb_ept_only(vcpu); 8578 + } 8593 8579 } 8594 8580 8595 8581 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) ··· 9992 9974 { 9993 9975 struct vcpu_vmx *vmx = to_vmx(vcpu); 9994 9976 u32 exec_control; 9995 - bool nested_ept_enabled = false; 9996 9977 9997 9978 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9998 9979 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); ··· 10138 10121 vmcs12->guest_intr_status); 10139 10122 } 10140 10123 10141 - nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0; 10142 - 10143 10124 /* 10144 10125 * Write an illegal value to APIC_ACCESS_ADDR. Later, 10145 10126 * nested_get_vmcs12_pages will either fix it up or ··· 10270 10255 if (nested_cpu_has_ept(vmcs12)) { 10271 10256 kvm_mmu_unload(vcpu); 10272 10257 nested_ept_init_mmu_context(vcpu); 10258 + } else if (nested_cpu_has2(vmcs12, 10259 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 10260 + vmx_flush_tlb_ept_only(vcpu); 10273 10261 } 10274 10262 10275 10263 /* ··· 10300 10282 vmx_set_efer(vcpu, vcpu->arch.efer); 10301 10283 10302 10284 /* Shadow page tables on either EPT or shadow page tables. */ 10303 - if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, 10285 + if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 10304 10286 entry_failure_code)) 10305 10287 return 1; 10306 - 10307 - kvm_mmu_reset_context(vcpu); 10308 10288 10309 10289 if (!enable_ept) 10310 10290 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; ··· 11072 11056 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 11073 11057 vmx_set_virtual_x2apic_mode(vcpu, 11074 11058 vcpu->arch.apic_base & X2APIC_ENABLE); 11059 + } else if (!nested_cpu_has_ept(vmcs12) && 11060 + nested_cpu_has2(vmcs12, 11061 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 11062 + vmx_flush_tlb_ept_only(vcpu); 11075 11063 } 11076 11064 11077 11065 /* This is needed for same reason as it was needed in prepare_vmcs02 */
+4 -3
arch/x86/kvm/x86.c
··· 8153 8153 if (kvm_x86_ops->vm_destroy) 8154 8154 kvm_x86_ops->vm_destroy(kvm); 8155 8155 kvm_iommu_unmap_guest(kvm); 8156 - kfree(kvm->arch.vpic); 8157 - kfree(kvm->arch.vioapic); 8156 + kvm_pic_destroy(kvm); 8157 + kvm_ioapic_destroy(kvm); 8158 8158 kvm_free_vcpus(kvm); 8159 8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 8160 8160 kvm_mmu_uninit_vm(kvm); 8161 + kvm_page_track_cleanup(kvm); 8161 8162 } 8162 8163 8163 8164 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ··· 8567 8566 { 8568 8567 struct x86_exception fault; 8569 8568 8570 - trace_kvm_async_pf_ready(work->arch.token, work->gva); 8571 8569 if (work->wakeup_all) 8572 8570 work->arch.token = ~0; /* broadcast wakeup */ 8573 8571 else 8574 8572 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8573 + trace_kvm_async_pf_ready(work->arch.token, work->gva); 8575 8574 8576 8575 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8577 8576 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+1 -1
arch/x86/lib/memcpy_64.S
··· 290 290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 291 291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 292 292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 293 - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 293 + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) 294 294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 295 295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 296 296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
+2 -2
arch/x86/mm/kaslr.c
··· 48 48 #if defined(CONFIG_X86_ESPFIX64) 49 49 static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; 50 50 #elif defined(CONFIG_EFI) 51 - static const unsigned long vaddr_end = EFI_VA_START; 51 + static const unsigned long vaddr_end = EFI_VA_END; 52 52 #else 53 53 static const unsigned long vaddr_end = __START_KERNEL_map; 54 54 #endif ··· 105 105 */ 106 106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 107 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 108 - vaddr_end >= EFI_VA_START); 108 + vaddr_end >= EFI_VA_END); 109 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 110 110 IS_ENABLED(CONFIG_EFI)) && 111 111 vaddr_end >= __START_KERNEL_map);
+1
arch/x86/purgatory/Makefile
··· 8 8 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 9 9 targets += purgatory.ro 10 10 11 + KASAN_SANITIZE := n 11 12 KCOV_INSTRUMENT := n 12 13 13 14 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+4 -3
block/blk-mq.c
··· 969 969 struct request *rq; 970 970 LIST_HEAD(driver_list); 971 971 struct list_head *dptr; 972 - int queued, ret = BLK_MQ_RQ_QUEUE_OK; 972 + int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; 973 973 974 974 /* 975 975 * Start off with dptr being NULL, so we start the first request ··· 980 980 /* 981 981 * Now process all the entries, sending them to the driver. 982 982 */ 983 - queued = 0; 983 + errors = queued = 0; 984 984 while (!list_empty(list)) { 985 985 struct blk_mq_queue_data bd; 986 986 ··· 1037 1037 default: 1038 1038 pr_err("blk-mq: bad return on queue: %d\n", ret); 1039 1039 case BLK_MQ_RQ_QUEUE_ERROR: 1040 + errors++; 1040 1041 rq->errors = -EIO; 1041 1042 blk_mq_end_request(rq, rq->errors); 1042 1043 break; ··· 1089 1088 blk_mq_run_hw_queue(hctx, true); 1090 1089 } 1091 1090 1092 - return queued != 0; 1091 + return (queued + errors) != 0; 1093 1092 } 1094 1093 1095 1094 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+5 -2
crypto/lrw.c
··· 286 286 287 287 subreq->cryptlen = LRW_BUFFER_SIZE; 288 288 if (req->cryptlen > LRW_BUFFER_SIZE) { 289 - subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 290 - rctx->ext = kmalloc(subreq->cryptlen, gfp); 289 + unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); 290 + 291 + rctx->ext = kmalloc(n, gfp); 292 + if (rctx->ext) 293 + subreq->cryptlen = n; 291 294 } 292 295 293 296 rctx->src = req->src;
+5 -2
crypto/xts.c
··· 230 230 231 231 subreq->cryptlen = XTS_BUFFER_SIZE; 232 232 if (req->cryptlen > XTS_BUFFER_SIZE) { 233 - subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 234 - rctx->ext = kmalloc(subreq->cryptlen, gfp); 233 + unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); 234 + 235 + rctx->ext = kmalloc(n, gfp); 236 + if (rctx->ext) 237 + subreq->cryptlen = n; 235 238 } 236 239 237 240 rctx->src = req->src;
-1
drivers/acpi/Makefile
··· 2 2 # Makefile for the Linux ACPI interpreter 3 3 # 4 4 5 - ccflags-y := -Os 6 5 ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT 7 6 8 7 #
+5 -3
drivers/acpi/acpi_platform.c
··· 25 25 ACPI_MODULE_NAME("platform"); 26 26 27 27 static const struct acpi_device_id forbidden_id_list[] = { 28 - {"PNP0000", 0}, /* PIC */ 29 - {"PNP0100", 0}, /* Timer */ 30 - {"PNP0200", 0}, /* AT DMA Controller */ 28 + {"PNP0000", 0}, /* PIC */ 29 + {"PNP0100", 0}, /* Timer */ 30 + {"PNP0200", 0}, /* AT DMA Controller */ 31 + {"ACPI0009", 0}, /* IOxAPIC */ 32 + {"ACPI000A", 0}, /* IOAPIC */ 31 33 {"", 0}, 32 34 }; 33 35
+1
drivers/acpi/apei/ghes.c
··· 1073 1073 if (list_empty(&ghes_sci)) 1074 1074 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1075 1075 mutex_unlock(&ghes_list_mutex); 1076 + synchronize_rcu(); 1076 1077 break; 1077 1078 case ACPI_HEST_NOTIFY_NMI: 1078 1079 ghes_nmi_remove(ghes);
+6
drivers/acpi/ioapic.c
··· 45 45 struct resource *res = data; 46 46 struct resource_win win; 47 47 48 + /* 49 + * We might assign this to 'res' later, make sure all pointers are 50 + * cleared before the resource is added to the global list 51 + */ 52 + memset(&win, 0, sizeof(win)); 53 + 48 54 res->flags = 0; 49 55 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) 50 56 return AE_OK;
+103 -33
drivers/block/nbd.c
··· 47 47 struct nbd_sock { 48 48 struct socket *sock; 49 49 struct mutex tx_lock; 50 + struct request *pending; 51 + int sent; 50 52 }; 51 53 52 54 #define NBD_TIMEDOUT 0 ··· 126 124 127 125 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 128 126 { 129 - bd_set_size(bdev, 0); 127 + if (bdev->bd_openers <= 1) 128 + bd_set_size(bdev, 0); 130 129 set_capacity(nbd->disk, 0); 131 130 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 132 131 ··· 193 190 194 191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 195 192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 196 - req->errors++; 193 + req->errors = -EIO; 197 194 198 195 mutex_lock(&nbd->config_lock); 199 196 sock_shutdown(nbd); ··· 205 202 * Send or receive packet. 206 203 */ 207 204 static int sock_xmit(struct nbd_device *nbd, int index, int send, 208 - struct iov_iter *iter, int msg_flags) 205 + struct iov_iter *iter, int msg_flags, int *sent) 209 206 { 210 207 struct socket *sock = nbd->socks[index]->sock; 211 208 int result; ··· 240 237 result = -EPIPE; /* short read */ 241 238 break; 242 239 } 240 + if (sent) 241 + *sent += result; 243 242 } while (msg_data_left(&msg)); 244 243 245 244 tsk_restore_flags(current, pflags, PF_MEMALLOC); ··· 253 248 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 254 249 { 255 250 struct request *req = blk_mq_rq_from_pdu(cmd); 251 + struct nbd_sock *nsock = nbd->socks[index]; 256 252 int result; 257 253 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 258 254 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; ··· 262 256 struct bio *bio; 263 257 u32 type; 264 258 u32 tag = blk_mq_unique_tag(req); 259 + int sent = nsock->sent, skip = 0; 265 260 266 261 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 267 262 ··· 290 283 return -EIO; 291 284 } 292 285 286 + /* We did a partial send previously, and we at least sent the whole 287 + * request struct, so just go and send the rest of the pages in the 288 + * request. 289 + */ 290 + if (sent) { 291 + if (sent >= sizeof(request)) { 292 + skip = sent - sizeof(request); 293 + goto send_pages; 294 + } 295 + iov_iter_advance(&from, sent); 296 + } 293 297 request.type = htonl(type); 294 298 if (type != NBD_CMD_FLUSH) { 295 299 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); ··· 312 294 cmd, nbdcmd_to_ascii(type), 313 295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 314 296 result = sock_xmit(nbd, index, 1, &from, 315 - (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 297 + (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 316 298 if (result <= 0) { 299 + if (result == -ERESTARTSYS) { 300 + /* If we havne't sent anything we can just return BUSY, 301 + * however if we have sent something we need to make 302 + * sure we only allow this req to be sent until we are 303 + * completely done. 304 + */ 305 + if (sent) { 306 + nsock->pending = req; 307 + nsock->sent = sent; 308 + } 309 + return BLK_MQ_RQ_QUEUE_BUSY; 310 + } 317 311 dev_err_ratelimited(disk_to_dev(nbd->disk), 318 312 "Send control failed (result %d)\n", result); 319 313 return -EIO; 320 314 } 321 - 315 + send_pages: 322 316 if (type != NBD_CMD_WRITE) 323 - return 0; 317 + goto out; 324 318 325 319 bio = req->bio; 326 320 while (bio) { ··· 348 318 cmd, bvec.bv_len); 349 319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 350 320 &bvec, 1, bvec.bv_len); 351 - result = sock_xmit(nbd, index, 1, &from, flags); 321 + if (skip) { 322 + if (skip >= iov_iter_count(&from)) { 323 + skip -= iov_iter_count(&from); 324 + continue; 325 + } 326 + iov_iter_advance(&from, skip); 327 + skip = 0; 328 + } 329 + result = sock_xmit(nbd, index, 1, &from, flags, &sent); 352 330 if (result <= 0) { 331 + if (result == -ERESTARTSYS) { 332 + /* We've already sent the header, we 333 + * have no choice but to set pending and 334 + * return BUSY. 335 + */ 336 + nsock->pending = req; 337 + nsock->sent = sent; 338 + return BLK_MQ_RQ_QUEUE_BUSY; 339 + } 353 340 dev_err(disk_to_dev(nbd->disk), 354 341 "Send data failed (result %d)\n", 355 342 result); ··· 383 336 } 384 337 bio = next; 385 338 } 339 + out: 340 + nsock->pending = NULL; 341 + nsock->sent = 0; 386 342 return 0; 387 343 } 388 344 ··· 403 353 404 354 reply.magic = 0; 405 355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 406 - result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 356 + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 407 357 if (result <= 0) { 408 358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 409 359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) ··· 433 383 if (ntohl(reply.error)) { 434 384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 435 385 ntohl(reply.error)); 436 - req->errors++; 386 + req->errors = -EIO; 437 387 return cmd; 438 388 } 439 389 ··· 445 395 rq_for_each_segment(bvec, req, iter) { 446 396 iov_iter_bvec(&to, ITER_BVEC | READ, 447 397 &bvec, 1, bvec.bv_len); 448 - result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 398 + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 449 399 if (result <= 0) { 450 400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 451 401 result); 452 - req->errors++; 402 + req->errors = -EIO; 453 403 return cmd; 454 404 } 455 405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", ··· 519 469 if (!blk_mq_request_started(req)) 520 470 return; 521 471 cmd = blk_mq_rq_to_pdu(req); 522 - req->errors++; 472 + req->errors = -EIO; 523 473 nbd_end_request(cmd); 524 474 } 525 475 ··· 532 482 } 533 483 534 484 535 - static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 485 + static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) 536 486 { 537 487 struct request *req = blk_mq_rq_from_pdu(cmd); 538 488 struct nbd_device *nbd = cmd->nbd; 539 489 struct nbd_sock *nsock; 490 + int ret; 540 491 541 492 if (index >= nbd->num_connections) { 542 493 dev_err_ratelimited(disk_to_dev(nbd->disk), 543 494 "Attempted send on invalid socket\n"); 544 - goto error_out; 495 + return -EINVAL; 545 496 } 546 497 547 498 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 548 499 dev_err_ratelimited(disk_to_dev(nbd->disk), 549 500 "Attempted send on closed socket\n"); 550 - goto error_out; 501 + return -EINVAL; 551 502 } 552 503 553 504 req->errors = 0; ··· 559 508 mutex_unlock(&nsock->tx_lock); 560 509 dev_err_ratelimited(disk_to_dev(nbd->disk), 561 510 "Attempted send on closed socket\n"); 562 - goto error_out; 511 + return -EINVAL; 563 512 } 564 513 565 - if (nbd_send_cmd(nbd, cmd, index) != 0) { 566 - dev_err_ratelimited(disk_to_dev(nbd->disk), 567 - "Request send failed\n"); 568 - req->errors++; 569 - nbd_end_request(cmd); 514 + /* Handle the case that we have a pending request that was partially 515 + * transmitted that _has_ to be serviced first. We need to call requeue 516 + * here so that it gets put _after_ the request that is already on the 517 + * dispatch list. 518 + */ 519 + if (unlikely(nsock->pending && nsock->pending != req)) { 520 + blk_mq_requeue_request(req, true); 521 + ret = 0; 522 + goto out; 570 523 } 571 - 524 + ret = nbd_send_cmd(nbd, cmd, index); 525 + out: 572 526 mutex_unlock(&nsock->tx_lock); 573 - 574 - return; 575 - 576 - error_out: 577 - req->errors++; 578 - nbd_end_request(cmd); 527 + return ret; 579 528 } 580 529 581 530 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 582 531 const struct blk_mq_queue_data *bd) 583 532 { 584 533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 534 + int ret; 585 535 586 536 /* 587 537 * Since we look at the bio's to send the request over the network we ··· 595 543 */ 596 544 init_completion(&cmd->send_complete); 597 545 blk_mq_start_request(bd->rq); 598 - nbd_handle_cmd(cmd, hctx->queue_num); 546 + 547 + /* We can be called directly from the user space process, which means we 548 + * could possibly have signals pending so our sendmsg will fail. In 549 + * this case we need to return that we are busy, otherwise error out as 550 + * appropriate. 551 + */ 552 + ret = nbd_handle_cmd(cmd, hctx->queue_num); 553 + if (ret < 0) 554 + ret = BLK_MQ_RQ_QUEUE_ERROR; 555 + if (!ret) 556 + ret = BLK_MQ_RQ_QUEUE_OK; 599 557 complete(&cmd->send_complete); 600 558 601 - return BLK_MQ_RQ_QUEUE_OK; 559 + return ret; 602 560 } 603 561 604 562 static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, ··· 643 581 644 582 mutex_init(&nsock->tx_lock); 645 583 nsock->sock = sock; 584 + nsock->pending = NULL; 585 + nsock->sent = 0; 646 586 socks[nbd->num_connections++] = nsock; 647 587 648 588 if (max_part) ··· 666 602 667 603 static void nbd_bdev_reset(struct block_device *bdev) 668 604 { 605 + if (bdev->bd_openers > 1) 606 + return; 669 607 set_device_ro(bdev, false); 670 608 bdev->bd_inode->i_size = 0; 671 609 if (max_part > 0) { ··· 700 634 701 635 for (i = 0; i < nbd->num_connections; i++) { 702 636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 703 - ret = sock_xmit(nbd, i, 1, &from, 0); 637 + ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 704 638 if (ret <= 0) 705 639 dev_err(disk_to_dev(nbd->disk), 706 640 "Send disconnect failed %d\n", ret); ··· 731 665 { 732 666 sock_shutdown(nbd); 733 667 nbd_clear_que(nbd); 734 - kill_bdev(bdev); 668 + 669 + __invalidate_device(bdev, true); 735 670 nbd_bdev_reset(bdev); 736 671 /* 737 672 * We want to give the run thread a chance to wait for everybody ··· 848 781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 849 782 return 0; 850 783 case NBD_SET_TIMEOUT: 851 - nbd->tag_set.timeout = arg * HZ; 784 + if (arg) { 785 + nbd->tag_set.timeout = arg * HZ; 786 + blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); 787 + } 852 788 return 0; 853 789 854 790 case NBD_SET_FLAGS:
+1 -1
drivers/clocksource/clkevt-probe.c
··· 17 17 18 18 #include <linux/init.h> 19 19 #include <linux/of.h> 20 - #include <linux/clockchip.h> 20 + #include <linux/clockchips.h> 21 21 22 22 extern struct of_device_id __clkevt_of_table[]; 23 23
+21 -17
drivers/cpufreq/cpufreq.c
··· 918 918 .release = cpufreq_sysfs_release, 919 919 }; 920 920 921 - static int add_cpu_dev_symlink(struct cpufreq_policy *policy, 922 - struct device *dev) 921 + static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) 923 922 { 923 + struct device *dev = get_cpu_device(cpu); 924 + 925 + if (!dev) 926 + return; 927 + 928 + if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 929 + return; 930 + 924 931 dev_dbg(dev, "%s: Adding symlink\n", __func__); 925 - return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 932 + if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) 933 + dev_err(dev, "cpufreq symlink creation failed\n"); 926 934 } 927 935 928 936 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, ··· 1188 1180 policy->user_policy.min = policy->min; 1189 1181 policy->user_policy.max = policy->max; 1190 1182 1191 - write_lock_irqsave(&cpufreq_driver_lock, flags); 1192 - for_each_cpu(j, policy->related_cpus) 1183 + for_each_cpu(j, policy->related_cpus) { 1193 1184 per_cpu(cpufreq_cpu_data, j) = policy; 1194 - write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1185 + add_cpu_dev_symlink(policy, j); 1186 + } 1195 1187 } else { 1196 1188 policy->min = policy->user_policy.min; 1197 1189 policy->max = policy->user_policy.max; ··· 1283 1275 1284 1276 if (cpufreq_driver->exit) 1285 1277 cpufreq_driver->exit(policy); 1278 + 1279 + for_each_cpu(j, policy->real_cpus) 1280 + remove_cpu_dev_symlink(policy, get_cpu_device(j)); 1281 + 1286 1282 out_free_policy: 1287 1283 cpufreq_policy_free(policy); 1288 1284 return ret; 1289 1285 } 1290 - 1291 - static int cpufreq_offline(unsigned int cpu); 1292 1286 1293 1287 /** 1294 1288 * cpufreq_add_dev - the cpufreq interface for a CPU device. ··· 1313 1303 1314 1304 /* Create sysfs link on CPU registration */ 1315 1305 policy = per_cpu(cpufreq_cpu_data, cpu); 1316 - if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1317 - return 0; 1306 + if (policy) 1307 + add_cpu_dev_symlink(policy, cpu); 1318 1308 1319 - ret = add_cpu_dev_symlink(policy, dev); 1320 - if (ret) { 1321 - cpumask_clear_cpu(cpu, policy->real_cpus); 1322 - cpufreq_offline(cpu); 1323 - } 1324 - 1325 - return ret; 1309 + return 0; 1326 1310 } 1327 1311 1328 1312 static int cpufreq_offline(unsigned int cpu)
+18
drivers/cpuidle/cpuidle-powernv.c
··· 175 175 drv->state_count += 1; 176 176 } 177 177 178 + /* 179 + * On the PowerNV platform cpu_present may be less than cpu_possible in 180 + * cases when firmware detects the CPU, but it is not available to the 181 + * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at 182 + * run time and hence cpu_devices are not created for those CPUs by the 183 + * generic topology_init(). 184 + * 185 + * drv->cpumask defaults to cpu_possible_mask in 186 + * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where 187 + * cpu_devices are not created for CPUs in cpu_possible_mask that 188 + * cannot be hot-added later at run time. 189 + * 190 + * Trying cpuidle_register_device() on a CPU without a cpu_device is 191 + * incorrect, so pass a correct CPU mask to the generic cpuidle driver. 192 + */ 193 + 194 + drv->cpumask = (struct cpumask *)cpu_present_mask; 195 + 178 196 return 0; 179 197 } 180 198
+1
drivers/crypto/ccp/ccp-dev-v5.c
··· 1015 1015 1016 1016 const struct ccp_vdata ccpv5b = { 1017 1017 .version = CCP_VERSION(5, 0), 1018 + .dma_chan_attr = DMA_PRIVATE, 1018 1019 .setup = ccp5other_config, 1019 1020 .perform = &ccp5_actions, 1020 1021 .bar = 2,
+5
drivers/crypto/ccp/ccp-dev.h
··· 179 179 180 180 /* ------------------------ General CCP Defines ------------------------ */ 181 181 182 + #define CCP_DMA_DFLT 0x0 183 + #define CCP_DMA_PRIV 0x1 184 + #define CCP_DMA_PUB 0x2 185 + 182 186 #define CCP_DMAPOOL_MAX_SIZE 64 183 187 #define CCP_DMAPOOL_ALIGN BIT(5) 184 188 ··· 640 636 /* Structure to hold CCP version-specific values */ 641 637 struct ccp_vdata { 642 638 const unsigned int version; 639 + const unsigned int dma_chan_attr; 643 640 void (*setup)(struct ccp_device *); 644 641 const struct ccp_actions *perform; 645 642 const unsigned int bar;
+41
drivers/crypto/ccp/ccp-dmaengine.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/module.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/dmaengine.h> 15 16 #include <linux/spinlock.h> ··· 25 24 u64 mask = _mask + 1; \ 26 25 (mask == 0) ? 64 : fls64(mask); \ 27 26 }) 27 + 28 + /* The CCP as a DMA provider can be configured for public or private 29 + * channels. Default is specified in the vdata for the device (PCI ID). 30 + * This module parameter will override for all channels on all devices: 31 + * dma_chan_attr = 0x2 to force all channels public 32 + * = 0x1 to force all channels private 33 + * = 0x0 to defer to the vdata setting 34 + * = any other value: warning, revert to 0x0 35 + */ 36 + static unsigned int dma_chan_attr = CCP_DMA_DFLT; 37 + module_param(dma_chan_attr, uint, 0444); 38 + MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); 39 + 40 + unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) 41 + { 42 + switch (dma_chan_attr) { 43 + case CCP_DMA_DFLT: 44 + return ccp->vdata->dma_chan_attr; 45 + 46 + case CCP_DMA_PRIV: 47 + return DMA_PRIVATE; 48 + 49 + case CCP_DMA_PUB: 50 + return 0; 51 + 52 + default: 53 + dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n", 54 + dma_chan_attr); 55 + return ccp->vdata->dma_chan_attr; 56 + } 57 + } 28 58 29 59 static void ccp_free_cmd_resources(struct ccp_device *ccp, 30 60 struct list_head *list) ··· 706 674 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 707 675 dma_cap_set(DMA_SG, dma_dev->cap_mask); 708 676 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 677 + 678 + /* The DMA channels for this device can be set to public or private, 679 + * and overridden by the module parameter dma_chan_attr. 680 + * Default: according to the value in vdata (dma_chan_attr=0) 681 + * dma_chan_attr=0x1: all channels private (override vdata) 682 + * dma_chan_attr=0x2: all channels public (override vdata) 683 + */ 684 + if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE) 685 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 709 686 710 687 INIT_LIST_HEAD(&dma_dev->channels); 711 688 for (i = 0; i < ccp->cmd_q_count; i++) {
+4 -1
drivers/dma/bcm2835-dma.c
··· 251 251 */ 252 252 253 253 /* have we filled in period_length yet? */ 254 - if (*total_len + control_block->length < period_len) 254 + if (*total_len + control_block->length < period_len) { 255 + /* update number of bytes in this period so far */ 256 + *total_len += control_block->length; 255 257 return; 258 + } 256 259 257 260 /* calculate the length that remains to reach period_length */ 258 261 control_block->length = period_len - *total_len;
+2
drivers/dma/dmaengine.c
··· 1108 1108 switch (order) { 1109 1109 case 0 ... 1: 1110 1110 return &unmap_pool[0]; 1111 + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1111 1112 case 2 ... 4: 1112 1113 return &unmap_pool[1]; 1113 1114 case 5 ... 7: 1114 1115 return &unmap_pool[2]; 1115 1116 case 8: 1116 1117 return &unmap_pool[3]; 1118 + #endif 1117 1119 default: 1118 1120 BUG(); 1119 1121 return NULL;
+10
drivers/edac/Kconfig
··· 43 43 44 44 config EDAC_DEBUG 45 45 bool "Debugging" 46 + select DEBUG_FS 46 47 help 47 48 This turns on debugging information for the entire EDAC subsystem. 48 49 You do so by inserting edac_module with "edac_debug_level=x." Valid ··· 259 258 help 260 259 Support for error detection and correction the Intel 261 260 Skylake server Integrated Memory Controllers. 261 + 262 + config EDAC_PND2 263 + tristate "Intel Pondicherry2" 264 + depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL 265 + help 266 + Support for error detection and correction on the Intel 267 + Pondicherry2 Integrated Memory Controller. This SoC IP is 268 + first used on the Apollo Lake platform and Denverton 269 + micro-server but may appear on others in the future. 262 270 263 271 config EDAC_MPC85XX 264 272 tristate "Freescale MPC83xx / MPC85xx"
+1
drivers/edac/Makefile
··· 32 32 obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 33 33 obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 34 34 obj-$(CONFIG_EDAC_SKX) += skx_edac.o 35 + obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o 35 36 obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 36 37 obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 37 38 obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
+1 -1
drivers/edac/i5000_edac.c
··· 1293 1293 dimm->mtype = MEM_FB_DDR2; 1294 1294 1295 1295 /* ask what device type on this row */ 1296 - if (MTR_DRAM_WIDTH(mtr)) 1296 + if (MTR_DRAM_WIDTH(mtr) == 8) 1297 1297 dimm->dtype = DEV_X8; 1298 1298 else 1299 1299 dimm->dtype = DEV_X4;
+3 -2
drivers/edac/i5400_edac.c
··· 1207 1207 1208 1208 dimm->nr_pages = size_mb << 8; 1209 1209 dimm->grain = 8; 1210 - dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1210 + dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ? 1211 + DEV_X8 : DEV_X4; 1211 1212 dimm->mtype = MEM_FB_DDR2; 1212 1213 /* 1213 1214 * The eccc mechanism is SDDC (aka SECC), with 1214 1215 * is similar to Chipkill. 1215 1216 */ 1216 - dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1217 + dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ? 1217 1218 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1218 1219 ndimms++; 1219 1220 }
+1546
drivers/edac/pnd2_edac.c
··· 1 + /* 2 + * Driver for Pondicherry2 memory controller. 3 + * 4 + * Copyright (c) 2016, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * [Derived from sb_edac.c] 16 + * 17 + * Translation of system physical addresses to DIMM addresses 18 + * is a two stage process: 19 + * 20 + * First the Pondicherry 2 memory controller handles slice and channel interleaving 21 + * in "sys2pmi()". This is (almost) completley common between platforms. 22 + * 23 + * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM, 24 + * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters. 25 + */ 26 + 27 + #include <linux/module.h> 28 + #include <linux/init.h> 29 + #include <linux/pci.h> 30 + #include <linux/pci_ids.h> 31 + #include <linux/slab.h> 32 + #include <linux/delay.h> 33 + #include <linux/edac.h> 34 + #include <linux/mmzone.h> 35 + #include <linux/smp.h> 36 + #include <linux/bitmap.h> 37 + #include <linux/math64.h> 38 + #include <linux/mod_devicetable.h> 39 + #include <asm/cpu_device_id.h> 40 + #include <asm/intel-family.h> 41 + #include <asm/processor.h> 42 + #include <asm/mce.h> 43 + 44 + #include "edac_mc.h" 45 + #include "edac_module.h" 46 + #include "pnd2_edac.h" 47 + 48 + #define APL_NUM_CHANNELS 4 49 + #define DNV_NUM_CHANNELS 2 50 + #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */ 51 + 52 + enum type { 53 + APL, 54 + DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */ 55 + }; 56 + 57 + struct dram_addr { 58 + int chan; 59 + int dimm; 60 + int rank; 61 + int bank; 62 + int row; 63 + int col; 64 + }; 65 + 66 + struct pnd2_pvt { 67 + int dimm_geom[APL_NUM_CHANNELS]; 68 + u64 tolm, tohm; 69 + }; 70 + 71 + /* 72 + * System address space is divided into multiple regions with 73 + * different interleave rules in each. The as0/as1 regions 74 + * have no interleaving at all. The as2 region is interleaved 75 + * between two channels. The mot region is magic and may overlap 76 + * other regions, with its interleave rules taking precedence. 77 + * Addresses not in any of these regions are interleaved across 78 + * all four channels. 79 + */ 80 + static struct region { 81 + u64 base; 82 + u64 limit; 83 + u8 enabled; 84 + } mot, as0, as1, as2; 85 + 86 + static struct dunit_ops { 87 + char *name; 88 + enum type type; 89 + int pmiaddr_shift; 90 + int pmiidx_shift; 91 + int channels; 92 + int dimms_per_channel; 93 + int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name); 94 + int (*get_registers)(void); 95 + int (*check_ecc)(void); 96 + void (*mk_region)(char *name, struct region *rp, void *asym); 97 + void (*get_dimm_config)(struct mem_ctl_info *mci); 98 + int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, 99 + struct dram_addr *daddr, char *msg); 100 + } *ops; 101 + 102 + static struct mem_ctl_info *pnd2_mci; 103 + 104 + #define PND2_MSG_SIZE 256 105 + 106 + /* Debug macros */ 107 + #define pnd2_printk(level, fmt, arg...) \ 108 + edac_printk(level, "pnd2", fmt, ##arg) 109 + 110 + #define pnd2_mc_printk(mci, level, fmt, arg...) \ 111 + edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg) 112 + 113 + #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12 114 + #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13 115 + #define SELECTOR_DISABLED (-1) 116 + #define _4GB (1ul << 32) 117 + 118 + #define PMI_ADDRESS_WIDTH 31 119 + #define PND_MAX_PHYS_BIT 39 120 + 121 + #define APL_ASYMSHIFT 28 122 + #define DNV_ASYMSHIFT 31 123 + #define CH_HASH_MASK_LSB 6 124 + #define SLICE_HASH_MASK_LSB 6 125 + #define MOT_SLC_INTLV_BIT 12 126 + #define LOG2_PMI_ADDR_GRANULARITY 5 127 + #define MOT_SHIFT 24 128 + 129 + #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 130 + #define U64_LSHIFT(val, s) ((u64)(val) << (s)) 131 + 132 + #ifdef CONFIG_X86_INTEL_SBI_APL 133 + #include "linux/platform_data/sbi_apl.h" 134 + int sbi_send(int port, int off, int op, u32 *data) 135 + { 136 + struct sbi_apl_message sbi_arg; 137 + int ret, read = 0; 138 + 139 + memset(&sbi_arg, 0, sizeof(sbi_arg)); 140 + 141 + if (op == 0 || op == 4 || op == 6) 142 + read = 1; 143 + else 144 + sbi_arg.data = *data; 145 + 146 + sbi_arg.opcode = op; 147 + sbi_arg.port_address = port; 148 + sbi_arg.register_offset = off; 149 + ret = sbi_apl_commit(&sbi_arg); 150 + if (ret || sbi_arg.status) 151 + edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n", 152 + sbi_arg.status, ret, sbi_arg.data); 153 + 154 + if (ret == 0) 155 + ret = sbi_arg.status; 156 + 157 + if (ret == 0 && read) 158 + *data = sbi_arg.data; 159 + 160 + return ret; 161 + } 162 + #else 163 + int sbi_send(int port, int off, int op, u32 *data) 164 + { 165 + return -EUNATCH; 166 + } 167 + #endif 168 + 169 + static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) 170 + { 171 + int ret = 0; 172 + 173 + edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); 174 + switch (sz) { 175 + case 8: 176 + ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); 177 + case 4: 178 + ret = sbi_send(port, off, op, (u32 *)data); 179 + pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, 180 + sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); 181 + break; 182 + } 183 + 184 + return ret; 185 + } 186 + 187 + static u64 get_mem_ctrl_hub_base_addr(void) 188 + { 189 + struct b_cr_mchbar_lo_pci lo; 190 + struct b_cr_mchbar_hi_pci hi; 191 + struct pci_dev *pdev; 192 + 193 + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); 194 + if (pdev) { 195 + pci_read_config_dword(pdev, 0x48, (u32 *)&lo); 196 + pci_read_config_dword(pdev, 0x4c, (u32 *)&hi); 197 + pci_dev_put(pdev); 198 + } else { 199 + return 0; 200 + } 201 + 202 + if (!lo.enable) { 203 + edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n"); 204 + return 0; 205 + } 206 + 207 + return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15); 208 + } 209 + 210 + static u64 get_sideband_reg_base_addr(void) 211 + { 212 + struct pci_dev *pdev; 213 + u32 hi, lo; 214 + 215 + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL); 216 + if (pdev) { 217 + pci_read_config_dword(pdev, 0x10, &lo); 218 + pci_read_config_dword(pdev, 0x14, &hi); 219 + pci_dev_put(pdev); 220 + return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0)); 221 + } else { 222 + return 0xfd000000; 223 + } 224 + } 225 + 226 + static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) 227 + { 228 + struct pci_dev *pdev; 229 + char *base; 230 + u64 addr; 231 + 232 + if (op == 4) { 233 + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); 234 + if (!pdev) 235 + return -ENODEV; 236 + 237 + pci_read_config_dword(pdev, off, data); 238 + pci_dev_put(pdev); 239 + } else { 240 + /* MMIO via memory controller hub base address */ 241 + if (op == 0 && port == 0x4c) { 242 + addr = get_mem_ctrl_hub_base_addr(); 243 + if (!addr) 244 + return -ENODEV; 245 + } else { 246 + /* MMIO via sideband register base address */ 247 + addr = get_sideband_reg_base_addr(); 248 + if (!addr) 249 + return -ENODEV; 250 + addr += (port << 16); 251 + } 252 + 253 + base = ioremap((resource_size_t)addr, 0x10000); 254 + if (!base) 255 + return -ENODEV; 256 + 257 + if (sz == 8) 258 + *(u32 *)(data + 4) = *(u32 *)(base + off + 4); 259 + *(u32 *)data = *(u32 *)(base + off); 260 + 261 + iounmap(base); 262 + } 263 + 264 + edac_dbg(2, "Read %s=%.8x_%.8x\n", name, 265 + (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data); 266 + 267 + return 0; 268 + } 269 + 270 + #define RD_REGP(regp, regname, port) \ 271 + ops->rd_reg(port, \ 272 + regname##_offset, \ 273 + regname##_r_opcode, \ 274 + regp, sizeof(struct regname), \ 275 + #regname) 276 + 277 + #define RD_REG(regp, regname) \ 278 + ops->rd_reg(regname ## _port, \ 279 + regname##_offset, \ 280 + regname##_r_opcode, \ 281 + regp, sizeof(struct regname), \ 282 + #regname) 283 + 284 + static u64 top_lm, top_hm; 285 + static bool two_slices; 286 + static bool two_channels; /* Both PMI channels in one slice enabled */ 287 + 288 + static u8 sym_chan_mask; 289 + static u8 asym_chan_mask; 290 + static u8 chan_mask; 291 + 292 + static int slice_selector = -1; 293 + static int chan_selector = -1; 294 + static u64 slice_hash_mask; 295 + static u64 chan_hash_mask; 296 + 297 + static void mk_region(char *name, struct region *rp, u64 base, u64 limit) 298 + { 299 + rp->enabled = 1; 300 + rp->base = base; 301 + rp->limit = limit; 302 + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit); 303 + } 304 + 305 + static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask) 306 + { 307 + if (mask == 0) { 308 + pr_info(FW_BUG "MOT mask cannot be zero\n"); 309 + return; 310 + } 311 + if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) { 312 + pr_info(FW_BUG "MOT mask not power of two\n"); 313 + return; 314 + } 315 + if (base & ~mask) { 316 + pr_info(FW_BUG "MOT region base/mask alignment error\n"); 317 + return; 318 + } 319 + rp->base = base; 320 + rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0); 321 + rp->enabled = 1; 322 + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit); 323 + } 324 + 325 + static bool in_region(struct region *rp, u64 addr) 326 + { 327 + if (!rp->enabled) 328 + return false; 329 + 330 + return rp->base <= addr && addr <= rp->limit; 331 + } 332 + 333 + static int gen_sym_mask(struct b_cr_slice_channel_hash *p) 334 + { 335 + int mask = 0; 336 + 337 + if (!p->slice_0_mem_disabled) 338 + mask |= p->sym_slice0_channel_enabled; 339 + 340 + if (!p->slice_1_disabled) 341 + mask |= p->sym_slice1_channel_enabled << 2; 342 + 343 + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) 344 + mask &= 0x5; 345 + 346 + return mask; 347 + } 348 + 349 + static int gen_asym_mask(struct b_cr_slice_channel_hash *p, 350 + struct b_cr_asym_mem_region0_mchbar *as0, 351 + struct b_cr_asym_mem_region1_mchbar *as1, 352 + struct b_cr_asym_2way_mem_region_mchbar *as2way) 353 + { 354 + const int intlv[] = { 0x5, 0xA, 0x3, 0xC }; 355 + int mask = 0; 356 + 357 + if (as2way->asym_2way_interleave_enable) 358 + mask = intlv[as2way->asym_2way_intlv_mode]; 359 + if (as0->slice0_asym_enable) 360 + mask |= (1 << as0->slice0_asym_channel_select); 361 + if (as1->slice1_asym_enable) 362 + mask |= (4 << as1->slice1_asym_channel_select); 363 + if (p->slice_0_mem_disabled) 364 + mask &= 0xc; 365 + if (p->slice_1_disabled) 366 + mask &= 0x3; 367 + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) 368 + mask &= 0x5; 369 + 370 + return mask; 371 + } 372 + 373 + static struct b_cr_tolud_pci tolud; 374 + static struct b_cr_touud_lo_pci touud_lo; 375 + static struct b_cr_touud_hi_pci touud_hi; 376 + static struct b_cr_asym_mem_region0_mchbar asym0; 377 + static struct b_cr_asym_mem_region1_mchbar asym1; 378 + static struct b_cr_asym_2way_mem_region_mchbar asym_2way; 379 + static struct b_cr_mot_out_base_mchbar mot_base; 380 + static struct b_cr_mot_out_mask_mchbar mot_mask; 381 + static struct b_cr_slice_channel_hash chash; 382 + 383 + /* Apollo Lake dunit */ 384 + /* 385 + * Validated on board with just two DIMMs in the [0] and [2] positions 386 + * in this array. Other port number matches documentation, but caution 387 + * advised. 388 + */ 389 + static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 }; 390 + static struct d_cr_drp0 drp0[APL_NUM_CHANNELS]; 391 + 392 + /* Denverton dunit */ 393 + static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 }; 394 + static struct d_cr_dsch dsch; 395 + static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS]; 396 + static struct d_cr_drp drp[DNV_NUM_CHANNELS]; 397 + static struct d_cr_dmap dmap[DNV_NUM_CHANNELS]; 398 + static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS]; 399 + static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS]; 400 + static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS]; 401 + static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS]; 402 + static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS]; 403 + 404 + static void apl_mk_region(char *name, struct region *rp, void *asym) 405 + { 406 + struct b_cr_asym_mem_region0_mchbar *a = asym; 407 + 408 + mk_region(name, rp, 409 + U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT), 410 + U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) + 411 + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); 412 + } 413 + 414 + static void dnv_mk_region(char *name, struct region *rp, void *asym) 415 + { 416 + struct b_cr_asym_mem_region_denverton *a = asym; 417 + 418 + mk_region(name, rp, 419 + U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT), 420 + U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) + 421 + GENMASK_ULL(DNV_ASYMSHIFT - 1, 0)); 422 + } 423 + 424 + static int apl_get_registers(void) 425 + { 426 + int i; 427 + 428 + if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) 429 + return -ENODEV; 430 + 431 + for (i = 0; i < APL_NUM_CHANNELS; i++) 432 + if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) 433 + return -ENODEV; 434 + 435 + return 0; 436 + } 437 + 438 + static int dnv_get_registers(void) 439 + { 440 + int i; 441 + 442 + if (RD_REG(&dsch, d_cr_dsch)) 443 + return -ENODEV; 444 + 445 + for (i = 0; i < DNV_NUM_CHANNELS; i++) 446 + if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) || 447 + RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) || 448 + RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) || 449 + RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) || 450 + RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) || 451 + RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) || 452 + RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) || 453 + RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i])) 454 + return -ENODEV; 455 + 456 + return 0; 457 + } 458 + 459 + /* 460 + * Read all the h/w config registers once here (they don't 461 + * change at run time. Figure out which address ranges have 462 + * which interleave characteristics. 463 + */ 464 + static int get_registers(void) 465 + { 466 + const int intlv[] = { 10, 11, 12, 12 }; 467 + 468 + if (RD_REG(&tolud, b_cr_tolud_pci) || 469 + RD_REG(&touud_lo, b_cr_touud_lo_pci) || 470 + RD_REG(&touud_hi, b_cr_touud_hi_pci) || 471 + RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) || 472 + RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) || 473 + RD_REG(&mot_base, b_cr_mot_out_base_mchbar) || 474 + RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) || 475 + RD_REG(&chash, b_cr_slice_channel_hash)) 476 + return -ENODEV; 477 + 478 + if (ops->get_registers()) 479 + return -ENODEV; 480 + 481 + if (ops->type == DNV) { 482 + /* PMI channel idx (always 0) for asymmetric region */ 483 + asym0.slice0_asym_channel_select = 0; 484 + asym1.slice1_asym_channel_select = 0; 485 + /* PMI channel bitmap (always 1) for symmetric region */ 486 + chash.sym_slice0_channel_enabled = 0x1; 487 + chash.sym_slice1_channel_enabled = 0x1; 488 + } 489 + 490 + if (asym0.slice0_asym_enable) 491 + ops->mk_region("as0", &as0, &asym0); 492 + 493 + if (asym1.slice1_asym_enable) 494 + ops->mk_region("as1", &as1, &asym1); 495 + 496 + if (asym_2way.asym_2way_interleave_enable) { 497 + mk_region("as2way", &as2, 498 + U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT), 499 + U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) + 500 + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); 501 + } 502 + 503 + if (mot_base.imr_en) { 504 + mk_region_mask("mot", &mot, 505 + U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT), 506 + U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT)); 507 + } 508 + 509 + top_lm = U64_LSHIFT(tolud.tolud, 20); 510 + top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20); 511 + 512 + two_slices = !chash.slice_1_disabled && 513 + !chash.slice_0_mem_disabled && 514 + (chash.sym_slice0_channel_enabled != 0) && 515 + (chash.sym_slice1_channel_enabled != 0); 516 + two_channels = !chash.ch_1_disabled && 517 + !chash.enable_pmi_dual_data_mode && 518 + ((chash.sym_slice0_channel_enabled == 3) || 519 + (chash.sym_slice1_channel_enabled == 3)); 520 + 521 + sym_chan_mask = gen_sym_mask(&chash); 522 + asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way); 523 + chan_mask = sym_chan_mask | asym_chan_mask; 524 + 525 + if (two_slices && !two_channels) { 526 + if (chash.hvm_mode) 527 + slice_selector = 29; 528 + else 529 + slice_selector = intlv[chash.interleave_mode]; 530 + } else if (!two_slices && two_channels) { 531 + if (chash.hvm_mode) 532 + chan_selector = 29; 533 + else 534 + chan_selector = intlv[chash.interleave_mode]; 535 + } else if (two_slices && two_channels) { 536 + if (chash.hvm_mode) { 537 + slice_selector = 29; 538 + chan_selector = 30; 539 + } else { 540 + slice_selector = intlv[chash.interleave_mode]; 541 + chan_selector = intlv[chash.interleave_mode] + 1; 542 + } 543 + } 544 + 545 + if (two_slices) { 546 + if (!chash.hvm_mode) 547 + slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB; 548 + if (!two_channels) 549 + slice_hash_mask |= BIT_ULL(slice_selector); 550 + } 551 + 552 + if (two_channels) { 553 + if (!chash.hvm_mode) 554 + chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB; 555 + if (!two_slices) 556 + chan_hash_mask |= BIT_ULL(chan_selector); 557 + } 558 + 559 + return 0; 560 + } 561 + 562 + /* Get a contiguous memory address (remove the MMIO gap) */ 563 + static u64 remove_mmio_gap(u64 sys) 564 + { 565 + return (sys < _4GB) ? sys : sys - (_4GB - top_lm); 566 + } 567 + 568 + /* Squeeze out one address bit, shift upper part down to fill gap */ 569 + static void remove_addr_bit(u64 *addr, int bitidx) 570 + { 571 + u64 mask; 572 + 573 + if (bitidx == -1) 574 + return; 575 + 576 + mask = (1ull << bitidx) - 1; 577 + *addr = ((*addr >> 1) & ~mask) | (*addr & mask); 578 + } 579 + 580 + /* XOR all the bits from addr specified in mask */ 581 + static int hash_by_mask(u64 addr, u64 mask) 582 + { 583 + u64 result = addr & mask; 584 + 585 + result = (result >> 32) ^ result; 586 + result = (result >> 16) ^ result; 587 + result = (result >> 8) ^ result; 588 + result = (result >> 4) ^ result; 589 + result = (result >> 2) ^ result; 590 + result = (result >> 1) ^ result; 591 + 592 + return (int)result & 1; 593 + } 594 + 595 + /* 596 + * First stage decode. Take the system address and figure out which 597 + * second stage will deal with it based on interleave modes. 598 + */ 599 + static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) 600 + { 601 + u64 contig_addr, contig_base, contig_offset, contig_base_adj; 602 + int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : 603 + MOT_CHAN_INTLV_BIT_1SLC_2CH; 604 + int slice_intlv_bit_rm = SELECTOR_DISABLED; 605 + int chan_intlv_bit_rm = SELECTOR_DISABLED; 606 + /* Determine if address is in the MOT region. */ 607 + bool mot_hit = in_region(&mot, addr); 608 + /* Calculate the number of symmetric regions enabled. */ 609 + int sym_channels = hweight8(sym_chan_mask); 610 + 611 + /* 612 + * The amount we need to shift the asym base can be determined by the 613 + * number of enabled symmetric channels. 614 + * NOTE: This can only work because symmetric memory is not supposed 615 + * to do a 3-way interleave. 616 + */ 617 + int sym_chan_shift = sym_channels >> 1; 618 + 619 + /* Give up if address is out of range, or in MMIO gap */ 620 + if (addr >= (1ul << PND_MAX_PHYS_BIT) || 621 + (addr >= top_lm && addr < _4GB) || addr >= top_hm) { 622 + snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr); 623 + return -EINVAL; 624 + } 625 + 626 + /* Get a contiguous memory address (remove the MMIO gap) */ 627 + contig_addr = remove_mmio_gap(addr); 628 + 629 + if (in_region(&as0, addr)) { 630 + *pmiidx = asym0.slice0_asym_channel_select; 631 + 632 + contig_base = remove_mmio_gap(as0.base); 633 + contig_offset = contig_addr - contig_base; 634 + contig_base_adj = (contig_base >> sym_chan_shift) * 635 + ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1); 636 + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); 637 + } else if (in_region(&as1, addr)) { 638 + *pmiidx = 2u + asym1.slice1_asym_channel_select; 639 + 640 + contig_base = remove_mmio_gap(as1.base); 641 + contig_offset = contig_addr - contig_base; 642 + contig_base_adj = (contig_base >> sym_chan_shift) * 643 + ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1); 644 + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); 645 + } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) { 646 + bool channel1; 647 + 648 + mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH; 649 + *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1; 650 + channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) : 651 + hash_by_mask(contig_addr, chan_hash_mask); 652 + *pmiidx |= (u32)channel1; 653 + 654 + contig_base = remove_mmio_gap(as2.base); 655 + chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector; 656 + contig_offset = contig_addr - contig_base; 657 + remove_addr_bit(&contig_offset, chan_intlv_bit_rm); 658 + contig_addr = (contig_base >> sym_chan_shift) + contig_offset; 659 + } else { 660 + /* Otherwise we're in normal, boring symmetric mode. */ 661 + *pmiidx = 0u; 662 + 663 + if (two_slices) { 664 + bool slice1; 665 + 666 + if (mot_hit) { 667 + slice_intlv_bit_rm = MOT_SLC_INTLV_BIT; 668 + slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1; 669 + } else { 670 + slice_intlv_bit_rm = slice_selector; 671 + slice1 = hash_by_mask(addr, slice_hash_mask); 672 + } 673 + 674 + *pmiidx = (u32)slice1 << 1; 675 + } 676 + 677 + if (two_channels) { 678 + bool channel1; 679 + 680 + mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : 681 + MOT_CHAN_INTLV_BIT_1SLC_2CH; 682 + 683 + if (mot_hit) { 684 + chan_intlv_bit_rm = mot_intlv_bit; 685 + channel1 = (addr >> mot_intlv_bit) & 1; 686 + } else { 687 + chan_intlv_bit_rm = chan_selector; 688 + channel1 = hash_by_mask(contig_addr, chan_hash_mask); 689 + } 690 + 691 + *pmiidx |= (u32)channel1; 692 + } 693 + } 694 + 695 + /* Remove the chan_selector bit first */ 696 + remove_addr_bit(&contig_addr, chan_intlv_bit_rm); 697 + /* Remove the slice bit (we remove it second because it must be lower */ 698 + remove_addr_bit(&contig_addr, slice_intlv_bit_rm); 699 + *pmiaddr = contig_addr; 700 + 701 + return 0; 702 + } 703 + 704 + /* Translate PMI address to memory (rank, row, bank, column) */ 705 + #define C(n) (0x10 | (n)) /* column */ 706 + #define B(n) (0x20 | (n)) /* bank */ 707 + #define R(n) (0x40 | (n)) /* row */ 708 + #define RS (0x80) /* rank */ 709 + 710 + /* addrdec values */ 711 + #define AMAP_1KB 0 712 + #define AMAP_2KB 1 713 + #define AMAP_4KB 2 714 + #define AMAP_RSVD 3 715 + 716 + /* dden values */ 717 + #define DEN_4Gb 0 718 + #define DEN_8Gb 2 719 + 720 + /* dwid values */ 721 + #define X8 0 722 + #define X16 1 723 + 724 + static struct dimm_geometry { 725 + u8 addrdec; 726 + u8 dden; 727 + u8 dwid; 728 + u8 rowbits, colbits; 729 + u16 bits[PMI_ADDRESS_WIDTH]; 730 + } dimms[] = { 731 + { 732 + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16, 733 + .rowbits = 15, .colbits = 10, 734 + .bits = { 735 + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), 736 + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), 737 + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), 738 + 0, 0, 0, 0 739 + } 740 + }, 741 + { 742 + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8, 743 + .rowbits = 16, .colbits = 10, 744 + .bits = { 745 + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), 746 + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), 747 + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), 748 + R(15), 0, 0, 0 749 + } 750 + }, 751 + { 752 + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16, 753 + .rowbits = 16, .colbits = 10, 754 + .bits = { 755 + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), 756 + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), 757 + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), 758 + R(15), 0, 0, 0 759 + } 760 + }, 761 + { 762 + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8, 763 + .rowbits = 16, .colbits = 11, 764 + .bits = { 765 + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), 766 + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), 767 + R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13), 768 + R(14), R(15), 0, 0 769 + } 770 + }, 771 + { 772 + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16, 773 + .rowbits = 15, .colbits = 10, 774 + .bits = { 775 + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), 776 + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), 777 + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), 778 + 0, 0, 0, 0 779 + } 780 + }, 781 + { 782 + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8, 783 + .rowbits = 16, .colbits = 10, 784 + .bits = { 785 + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), 786 + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), 787 + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), 788 + R(15), 0, 0, 0 789 + } 790 + }, 791 + { 792 + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16, 793 + .rowbits = 16, .colbits = 10, 794 + .bits = { 795 + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), 796 + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), 797 + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), 798 + R(15), 0, 0, 0 799 + } 800 + }, 801 + { 802 + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8, 803 + .rowbits = 16, .colbits = 11, 804 + .bits = { 805 + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), 806 + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), 807 + R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13), 808 + R(14), R(15), 0, 0 809 + } 810 + }, 811 + { 812 + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16, 813 + .rowbits = 15, .colbits = 10, 814 + .bits = { 815 + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), 816 + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), 817 + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), 818 + 0, 0, 0, 0 819 + } 820 + }, 821 + { 822 + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8, 823 + .rowbits = 16, .colbits = 10, 824 + .bits = { 825 + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), 826 + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), 827 + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), 828 + R(15), 0, 0, 0 829 + } 830 + }, 831 + { 832 + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16, 833 + .rowbits = 16, .colbits = 10, 834 + .bits = { 835 + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), 836 + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), 837 + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), 838 + R(15), 0, 0, 0 839 + } 840 + }, 841 + { 842 + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8, 843 + .rowbits = 16, .colbits = 11, 844 + .bits = { 845 + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), 846 + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), 847 + R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13), 848 + R(14), R(15), 0, 0 849 + } 850 + } 851 + }; 852 + 853 + static int bank_hash(u64 pmiaddr, int idx, int shft) 854 + { 855 + int bhash = 0; 856 + 857 + switch (idx) { 858 + case 0: 859 + bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1; 860 + break; 861 + case 1: 862 + bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1; 863 + bhash ^= ((pmiaddr >> 22) & 1) << 1; 864 + break; 865 + case 2: 866 + bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2; 867 + break; 868 + } 869 + 870 + return bhash; 871 + } 872 + 873 + static int rank_hash(u64 pmiaddr) 874 + { 875 + return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1; 876 + } 877 + 878 + /* Second stage decode. Compute rank, bank, row & column. */ 879 + static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, 880 + struct dram_addr *daddr, char *msg) 881 + { 882 + struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx]; 883 + struct pnd2_pvt *pvt = mci->pvt_info; 884 + int g = pvt->dimm_geom[pmiidx]; 885 + struct dimm_geometry *d = &dimms[g]; 886 + int column = 0, bank = 0, row = 0, rank = 0; 887 + int i, idx, type, skiprs = 0; 888 + 889 + for (i = 0; i < PMI_ADDRESS_WIDTH; i++) { 890 + int bit = (pmiaddr >> i) & 1; 891 + 892 + if (i + skiprs >= PMI_ADDRESS_WIDTH) { 893 + snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n"); 894 + return -EINVAL; 895 + } 896 + 897 + type = d->bits[i + skiprs] & ~0xf; 898 + idx = d->bits[i + skiprs] & 0xf; 899 + 900 + /* 901 + * On single rank DIMMs ignore the rank select bit 902 + * and shift remainder of "bits[]" down one place. 903 + */ 904 + if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) { 905 + skiprs = 1; 906 + type = d->bits[i + skiprs] & ~0xf; 907 + idx = d->bits[i + skiprs] & 0xf; 908 + } 909 + 910 + switch (type) { 911 + case C(0): 912 + column |= (bit << idx); 913 + break; 914 + case B(0): 915 + bank |= (bit << idx); 916 + if (cr_drp0->bahen) 917 + bank ^= bank_hash(pmiaddr, idx, d->addrdec); 918 + break; 919 + case R(0): 920 + row |= (bit << idx); 921 + break; 922 + case RS: 923 + rank = bit; 924 + if (cr_drp0->rsien) 925 + rank ^= rank_hash(pmiaddr); 926 + break; 927 + default: 928 + if (bit) { 929 + snprintf(msg, PND2_MSG_SIZE, "Bad translation\n"); 930 + return -EINVAL; 931 + } 932 + goto done; 933 + } 934 + } 935 + 936 + done: 937 + daddr->col = column; 938 + daddr->bank = bank; 939 + daddr->row = row; 940 + daddr->rank = rank; 941 + daddr->dimm = 0; 942 + 943 + return 0; 944 + } 945 + 946 + /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */ 947 + #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out)) 948 + 949 + static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, 950 + struct dram_addr *daddr, char *msg) 951 + { 952 + /* Rank 0 or 1 */ 953 + daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0); 954 + /* Rank 2 or 3 */ 955 + daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1); 956 + 957 + /* 958 + * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we 959 + * flip them if DIMM1 is larger than DIMM0. 960 + */ 961 + daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip; 962 + 963 + daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0); 964 + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1); 965 + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2); 966 + if (dsch.ddr4en) 967 + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3); 968 + if (dmap1[pmiidx].bxor) { 969 + if (dsch.ddr4en) { 970 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0); 971 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1); 972 + if (dsch.chan_width == 0) 973 + /* 64/72 bit dram channel width */ 974 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); 975 + else 976 + /* 32/40 bit dram channel width */ 977 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); 978 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3); 979 + } else { 980 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0); 981 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1); 982 + if (dsch.chan_width == 0) 983 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); 984 + else 985 + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); 986 + } 987 + } 988 + 989 + daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0); 990 + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1); 991 + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2); 992 + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3); 993 + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4); 994 + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5); 995 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6); 996 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7); 997 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8); 998 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9); 999 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10); 1000 + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11); 1001 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12); 1002 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13); 1003 + if (dmap4[pmiidx].row14 != 31) 1004 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14); 1005 + if (dmap4[pmiidx].row15 != 31) 1006 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15); 1007 + if (dmap4[pmiidx].row16 != 31) 1008 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16); 1009 + if (dmap4[pmiidx].row17 != 31) 1010 + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17); 1011 + 1012 + daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3); 1013 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4); 1014 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5); 1015 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6); 1016 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7); 1017 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8); 1018 + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9); 1019 + if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f) 1020 + daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11); 1021 + 1022 + return 0; 1023 + } 1024 + 1025 + static int check_channel(int ch) 1026 + { 1027 + if (drp0[ch].dramtype != 0) { 1028 + pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch); 1029 + return 1; 1030 + } else if (drp0[ch].eccen == 0) { 1031 + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); 1032 + return 1; 1033 + } 1034 + return 0; 1035 + } 1036 + 1037 + static int apl_check_ecc_active(void) 1038 + { 1039 + int i, ret = 0; 1040 + 1041 + /* Check dramtype and ECC mode for each present DIMM */ 1042 + for (i = 0; i < APL_NUM_CHANNELS; i++) 1043 + if (chan_mask & BIT(i)) 1044 + ret += check_channel(i); 1045 + return ret ? -EINVAL : 0; 1046 + } 1047 + 1048 + #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3) 1049 + 1050 + static int check_unit(int ch) 1051 + { 1052 + struct d_cr_drp *d = &drp[ch]; 1053 + 1054 + if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) { 1055 + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); 1056 + return 1; 1057 + } 1058 + return 0; 1059 + } 1060 + 1061 + static int dnv_check_ecc_active(void) 1062 + { 1063 + int i, ret = 0; 1064 + 1065 + for (i = 0; i < DNV_NUM_CHANNELS; i++) 1066 + ret += check_unit(i); 1067 + return ret ? -EINVAL : 0; 1068 + } 1069 + 1070 + static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr, 1071 + struct dram_addr *daddr, char *msg) 1072 + { 1073 + u64 pmiaddr; 1074 + u32 pmiidx; 1075 + int ret; 1076 + 1077 + ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg); 1078 + if (ret) 1079 + return ret; 1080 + 1081 + pmiaddr >>= ops->pmiaddr_shift; 1082 + /* pmi channel idx to dimm channel idx */ 1083 + pmiidx >>= ops->pmiidx_shift; 1084 + daddr->chan = pmiidx; 1085 + 1086 + ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg); 1087 + if (ret) 1088 + return ret; 1089 + 1090 + edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", 1091 + addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col); 1092 + 1093 + return 0; 1094 + } 1095 + 1096 + static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, 1097 + struct dram_addr *daddr) 1098 + { 1099 + enum hw_event_mc_err_type tp_event; 1100 + char *optype, msg[PND2_MSG_SIZE]; 1101 + bool ripv = m->mcgstatus & MCG_STATUS_RIPV; 1102 + bool overflow = m->status & MCI_STATUS_OVER; 1103 + bool uc_err = m->status & MCI_STATUS_UC; 1104 + bool recov = m->status & MCI_STATUS_S; 1105 + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 1106 + u32 mscod = GET_BITFIELD(m->status, 16, 31); 1107 + u32 errcode = GET_BITFIELD(m->status, 0, 15); 1108 + u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1109 + int rc; 1110 + 1111 + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : 1112 + HW_EVENT_ERR_CORRECTED; 1113 + 1114 + /* 1115 + * According with Table 15-9 of the Intel Architecture spec vol 3A, 1116 + * memory errors should fit in this mask: 1117 + * 000f 0000 1mmm cccc (binary) 1118 + * where: 1119 + * f = Correction Report Filtering Bit. If 1, subsequent errors 1120 + * won't be shown 1121 + * mmm = error type 1122 + * cccc = channel 1123 + * If the mask doesn't match, report an error to the parsing logic 1124 + */ 1125 + if (!((errcode & 0xef80) == 0x80)) { 1126 + optype = "Can't parse: it is not a mem"; 1127 + } else { 1128 + switch (optypenum) { 1129 + case 0: 1130 + optype = "generic undef request error"; 1131 + break; 1132 + case 1: 1133 + optype = "memory read error"; 1134 + break; 1135 + case 2: 1136 + optype = "memory write error"; 1137 + break; 1138 + case 3: 1139 + optype = "addr/cmd error"; 1140 + break; 1141 + case 4: 1142 + optype = "memory scrubbing error"; 1143 + break; 1144 + default: 1145 + optype = "reserved"; 1146 + break; 1147 + } 1148 + } 1149 + 1150 + /* Only decode errors with an valid address (ADDRV) */ 1151 + if (!(m->status & MCI_STATUS_ADDRV)) 1152 + return; 1153 + 1154 + rc = get_memory_error_data(mci, m->addr, daddr, msg); 1155 + if (rc) 1156 + goto address_error; 1157 + 1158 + snprintf(msg, sizeof(msg), 1159 + "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d", 1160 + overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod, 1161 + errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col); 1162 + 1163 + edac_dbg(0, "%s\n", msg); 1164 + 1165 + /* Call the helper to output message */ 1166 + edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, 1167 + m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg); 1168 + 1169 + return; 1170 + 1171 + address_error: 1172 + edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, ""); 1173 + } 1174 + 1175 + static void apl_get_dimm_config(struct mem_ctl_info *mci) 1176 + { 1177 + struct pnd2_pvt *pvt = mci->pvt_info; 1178 + struct dimm_info *dimm; 1179 + struct d_cr_drp0 *d; 1180 + u64 capacity; 1181 + int i, g; 1182 + 1183 + for (i = 0; i < APL_NUM_CHANNELS; i++) { 1184 + if (!(chan_mask & BIT(i))) 1185 + continue; 1186 + 1187 + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0); 1188 + if (!dimm) { 1189 + edac_dbg(0, "No allocated DIMM for channel %d\n", i); 1190 + continue; 1191 + } 1192 + 1193 + d = &drp0[i]; 1194 + for (g = 0; g < ARRAY_SIZE(dimms); g++) 1195 + if (dimms[g].addrdec == d->addrdec && 1196 + dimms[g].dden == d->dden && 1197 + dimms[g].dwid == d->dwid) 1198 + break; 1199 + 1200 + if (g == ARRAY_SIZE(dimms)) { 1201 + edac_dbg(0, "Channel %d: unrecognized DIMM\n", i); 1202 + continue; 1203 + } 1204 + 1205 + pvt->dimm_geom[i] = g; 1206 + capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) * 1207 + (1ul << dimms[g].colbits); 1208 + edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3)); 1209 + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); 1210 + dimm->grain = 32; 1211 + dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16; 1212 + dimm->mtype = MEM_DDR3; 1213 + dimm->edac_mode = EDAC_SECDED; 1214 + snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2); 1215 + } 1216 + } 1217 + 1218 + static const int dnv_dtypes[] = { 1219 + DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN 1220 + }; 1221 + 1222 + static void dnv_get_dimm_config(struct mem_ctl_info *mci) 1223 + { 1224 + int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype; 1225 + struct dimm_info *dimm; 1226 + struct d_cr_drp *d; 1227 + u64 capacity; 1228 + 1229 + if (dsch.ddr4en) { 1230 + memtype = MEM_DDR4; 1231 + banks = 16; 1232 + colbits = 10; 1233 + } else { 1234 + memtype = MEM_DDR3; 1235 + banks = 8; 1236 + } 1237 + 1238 + for (i = 0; i < DNV_NUM_CHANNELS; i++) { 1239 + if (dmap4[i].row14 == 31) 1240 + rowbits = 14; 1241 + else if (dmap4[i].row15 == 31) 1242 + rowbits = 15; 1243 + else if (dmap4[i].row16 == 31) 1244 + rowbits = 16; 1245 + else if (dmap4[i].row17 == 31) 1246 + rowbits = 17; 1247 + else 1248 + rowbits = 18; 1249 + 1250 + if (memtype == MEM_DDR3) { 1251 + if (dmap1[i].ca11 != 0x3f) 1252 + colbits = 12; 1253 + else 1254 + colbits = 10; 1255 + } 1256 + 1257 + d = &drp[i]; 1258 + /* DIMM0 is present if rank0 and/or rank1 is enabled */ 1259 + ranks_of_dimm[0] = d->rken0 + d->rken1; 1260 + /* DIMM1 is present if rank2 and/or rank3 is enabled */ 1261 + ranks_of_dimm[1] = d->rken2 + d->rken3; 1262 + 1263 + for (j = 0; j < DNV_MAX_DIMMS; j++) { 1264 + if (!ranks_of_dimm[j]) 1265 + continue; 1266 + 1267 + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); 1268 + if (!dimm) { 1269 + edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j); 1270 + continue; 1271 + } 1272 + 1273 + capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits); 1274 + edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3)); 1275 + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); 1276 + dimm->grain = 32; 1277 + dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1]; 1278 + dimm->mtype = memtype; 1279 + dimm->edac_mode = EDAC_SECDED; 1280 + snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j); 1281 + } 1282 + } 1283 + } 1284 + 1285 + static int pnd2_register_mci(struct mem_ctl_info **ppmci) 1286 + { 1287 + struct edac_mc_layer layers[2]; 1288 + struct mem_ctl_info *mci; 1289 + struct pnd2_pvt *pvt; 1290 + int rc; 1291 + 1292 + rc = ops->check_ecc(); 1293 + if (rc < 0) 1294 + return rc; 1295 + 1296 + /* Allocate a new MC control structure */ 1297 + layers[0].type = EDAC_MC_LAYER_CHANNEL; 1298 + layers[0].size = ops->channels; 1299 + layers[0].is_virt_csrow = false; 1300 + layers[1].type = EDAC_MC_LAYER_SLOT; 1301 + layers[1].size = ops->dimms_per_channel; 1302 + layers[1].is_virt_csrow = true; 1303 + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 1304 + if (!mci) 1305 + return -ENOMEM; 1306 + 1307 + pvt = mci->pvt_info; 1308 + memset(pvt, 0, sizeof(*pvt)); 1309 + 1310 + mci->mod_name = "pnd2_edac.c"; 1311 + mci->dev_name = ops->name; 1312 + mci->ctl_name = "Pondicherry2"; 1313 + 1314 + /* Get dimm basic config and the memory layout */ 1315 + ops->get_dimm_config(mci); 1316 + 1317 + if (edac_mc_add_mc(mci)) { 1318 + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 1319 + edac_mc_free(mci); 1320 + return -EINVAL; 1321 + } 1322 + 1323 + *ppmci = mci; 1324 + 1325 + return 0; 1326 + } 1327 + 1328 + static void pnd2_unregister_mci(struct mem_ctl_info *mci) 1329 + { 1330 + if (unlikely(!mci || !mci->pvt_info)) { 1331 + pnd2_printk(KERN_ERR, "Couldn't find mci handler\n"); 1332 + return; 1333 + } 1334 + 1335 + /* Remove MC sysfs nodes */ 1336 + edac_mc_del_mc(NULL); 1337 + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 1338 + edac_mc_free(mci); 1339 + } 1340 + 1341 + /* 1342 + * Callback function registered with core kernel mce code. 1343 + * Called once for each logged error. 1344 + */ 1345 + static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) 1346 + { 1347 + struct mce *mce = (struct mce *)data; 1348 + struct mem_ctl_info *mci; 1349 + struct dram_addr daddr; 1350 + char *type; 1351 + 1352 + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) 1353 + return NOTIFY_DONE; 1354 + 1355 + mci = pnd2_mci; 1356 + if (!mci) 1357 + return NOTIFY_DONE; 1358 + 1359 + /* 1360 + * Just let mcelog handle it if the error is 1361 + * outside the memory controller. A memory error 1362 + * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 1363 + * bit 12 has an special meaning. 1364 + */ 1365 + if ((mce->status & 0xefff) >> 7 != 1) 1366 + return NOTIFY_DONE; 1367 + 1368 + if (mce->mcgstatus & MCG_STATUS_MCIP) 1369 + type = "Exception"; 1370 + else 1371 + type = "Event"; 1372 + 1373 + pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n"); 1374 + pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n", 1375 + mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status); 1376 + pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc); 1377 + pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr); 1378 + pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc); 1379 + pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", 1380 + mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); 1381 + 1382 + pnd2_mce_output_error(mci, mce, &daddr); 1383 + 1384 + /* Advice mcelog that the error were handled */ 1385 + return NOTIFY_STOP; 1386 + } 1387 + 1388 + static struct notifier_block pnd2_mce_dec = { 1389 + .notifier_call = pnd2_mce_check_error, 1390 + }; 1391 + 1392 + #ifdef CONFIG_EDAC_DEBUG 1393 + /* 1394 + * Write an address to this file to exercise the address decode 1395 + * logic in this driver. 1396 + */ 1397 + static u64 pnd2_fake_addr; 1398 + #define PND2_BLOB_SIZE 1024 1399 + static char pnd2_result[PND2_BLOB_SIZE]; 1400 + static struct dentry *pnd2_test; 1401 + static struct debugfs_blob_wrapper pnd2_blob = { 1402 + .data = pnd2_result, 1403 + .size = 0 1404 + }; 1405 + 1406 + static int debugfs_u64_set(void *data, u64 val) 1407 + { 1408 + struct dram_addr daddr; 1409 + struct mce m; 1410 + 1411 + *(u64 *)data = val; 1412 + m.mcgstatus = 0; 1413 + /* ADDRV + MemRd + Unknown channel */ 1414 + m.status = MCI_STATUS_ADDRV + 0x9f; 1415 + m.addr = val; 1416 + pnd2_mce_output_error(pnd2_mci, &m, &daddr); 1417 + snprintf(pnd2_blob.data, PND2_BLOB_SIZE, 1418 + "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", 1419 + m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col); 1420 + pnd2_blob.size = strlen(pnd2_blob.data); 1421 + 1422 + return 0; 1423 + } 1424 + DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); 1425 + 1426 + static void setup_pnd2_debug(void) 1427 + { 1428 + pnd2_test = edac_debugfs_create_dir("pnd2_test"); 1429 + edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test, 1430 + &pnd2_fake_addr, &fops_u64_wo); 1431 + debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob); 1432 + } 1433 + 1434 + static void teardown_pnd2_debug(void) 1435 + { 1436 + debugfs_remove_recursive(pnd2_test); 1437 + } 1438 + #else 1439 + static void setup_pnd2_debug(void) {} 1440 + static void teardown_pnd2_debug(void) {} 1441 + #endif /* CONFIG_EDAC_DEBUG */ 1442 + 1443 + 1444 + static int pnd2_probe(void) 1445 + { 1446 + int rc; 1447 + 1448 + edac_dbg(2, "\n"); 1449 + rc = get_registers(); 1450 + if (rc) 1451 + return rc; 1452 + 1453 + return pnd2_register_mci(&pnd2_mci); 1454 + } 1455 + 1456 + static void pnd2_remove(void) 1457 + { 1458 + edac_dbg(0, "\n"); 1459 + pnd2_unregister_mci(pnd2_mci); 1460 + } 1461 + 1462 + static struct dunit_ops apl_ops = { 1463 + .name = "pnd2/apl", 1464 + .type = APL, 1465 + .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY, 1466 + .pmiidx_shift = 0, 1467 + .channels = APL_NUM_CHANNELS, 1468 + .dimms_per_channel = 1, 1469 + .rd_reg = apl_rd_reg, 1470 + .get_registers = apl_get_registers, 1471 + .check_ecc = apl_check_ecc_active, 1472 + .mk_region = apl_mk_region, 1473 + .get_dimm_config = apl_get_dimm_config, 1474 + .pmi2mem = apl_pmi2mem, 1475 + }; 1476 + 1477 + static struct dunit_ops dnv_ops = { 1478 + .name = "pnd2/dnv", 1479 + .type = DNV, 1480 + .pmiaddr_shift = 0, 1481 + .pmiidx_shift = 1, 1482 + .channels = DNV_NUM_CHANNELS, 1483 + .dimms_per_channel = 2, 1484 + .rd_reg = dnv_rd_reg, 1485 + .get_registers = dnv_get_registers, 1486 + .check_ecc = dnv_check_ecc_active, 1487 + .mk_region = dnv_mk_region, 1488 + .get_dimm_config = dnv_get_dimm_config, 1489 + .pmi2mem = dnv_pmi2mem, 1490 + }; 1491 + 1492 + static const struct x86_cpu_id pnd2_cpuids[] = { 1493 + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, 1494 + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, 1495 + { } 1496 + }; 1497 + MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); 1498 + 1499 + static int __init pnd2_init(void) 1500 + { 1501 + const struct x86_cpu_id *id; 1502 + int rc; 1503 + 1504 + edac_dbg(2, "\n"); 1505 + 1506 + id = x86_match_cpu(pnd2_cpuids); 1507 + if (!id) 1508 + return -ENODEV; 1509 + 1510 + ops = (struct dunit_ops *)id->driver_data; 1511 + 1512 + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1513 + opstate_init(); 1514 + 1515 + rc = pnd2_probe(); 1516 + if (rc < 0) { 1517 + pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc); 1518 + return rc; 1519 + } 1520 + 1521 + if (!pnd2_mci) 1522 + return -ENODEV; 1523 + 1524 + mce_register_decode_chain(&pnd2_mce_dec); 1525 + setup_pnd2_debug(); 1526 + 1527 + return 0; 1528 + } 1529 + 1530 + static void __exit pnd2_exit(void) 1531 + { 1532 + edac_dbg(2, "\n"); 1533 + teardown_pnd2_debug(); 1534 + mce_unregister_decode_chain(&pnd2_mce_dec); 1535 + pnd2_remove(); 1536 + } 1537 + 1538 + module_init(pnd2_init); 1539 + module_exit(pnd2_exit); 1540 + 1541 + module_param(edac_op_state, int, 0444); 1542 + MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1543 + 1544 + MODULE_LICENSE("GPL v2"); 1545 + MODULE_AUTHOR("Tony Luck"); 1546 + MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
+301
drivers/edac/pnd2_edac.h
··· 1 + /* 2 + * Register bitfield descriptions for Pondicherry2 memory controller. 3 + * 4 + * Copyright (c) 2016, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + */ 15 + 16 + #ifndef _PND2_REGS_H 17 + #define _PND2_REGS_H 18 + 19 + struct b_cr_touud_lo_pci { 20 + u32 lock : 1; 21 + u32 reserved_1 : 19; 22 + u32 touud : 12; 23 + }; 24 + 25 + #define b_cr_touud_lo_pci_port 0x4c 26 + #define b_cr_touud_lo_pci_offset 0xa8 27 + #define b_cr_touud_lo_pci_r_opcode 0x04 28 + 29 + struct b_cr_touud_hi_pci { 30 + u32 touud : 7; 31 + u32 reserved_0 : 25; 32 + }; 33 + 34 + #define b_cr_touud_hi_pci_port 0x4c 35 + #define b_cr_touud_hi_pci_offset 0xac 36 + #define b_cr_touud_hi_pci_r_opcode 0x04 37 + 38 + struct b_cr_tolud_pci { 39 + u32 lock : 1; 40 + u32 reserved_0 : 19; 41 + u32 tolud : 12; 42 + }; 43 + 44 + #define b_cr_tolud_pci_port 0x4c 45 + #define b_cr_tolud_pci_offset 0xbc 46 + #define b_cr_tolud_pci_r_opcode 0x04 47 + 48 + struct b_cr_mchbar_lo_pci { 49 + u32 enable : 1; 50 + u32 pad_3_1 : 3; 51 + u32 pad_14_4: 11; 52 + u32 base: 17; 53 + }; 54 + 55 + struct b_cr_mchbar_hi_pci { 56 + u32 base : 7; 57 + u32 pad_31_7 : 25; 58 + }; 59 + 60 + /* Symmetric region */ 61 + struct b_cr_slice_channel_hash { 62 + u64 slice_1_disabled : 1; 63 + u64 hvm_mode : 1; 64 + u64 interleave_mode : 2; 65 + u64 slice_0_mem_disabled : 1; 66 + u64 reserved_0 : 1; 67 + u64 slice_hash_mask : 14; 68 + u64 reserved_1 : 11; 69 + u64 enable_pmi_dual_data_mode : 1; 70 + u64 ch_1_disabled : 1; 71 + u64 reserved_2 : 1; 72 + u64 sym_slice0_channel_enabled : 2; 73 + u64 sym_slice1_channel_enabled : 2; 74 + u64 ch_hash_mask : 14; 75 + u64 reserved_3 : 11; 76 + u64 lock : 1; 77 + }; 78 + 79 + #define b_cr_slice_channel_hash_port 0x4c 80 + #define b_cr_slice_channel_hash_offset 0x4c58 81 + #define b_cr_slice_channel_hash_r_opcode 0x06 82 + 83 + struct b_cr_mot_out_base_mchbar { 84 + u32 reserved_0 : 14; 85 + u32 mot_out_base : 15; 86 + u32 reserved_1 : 1; 87 + u32 tr_en : 1; 88 + u32 imr_en : 1; 89 + }; 90 + 91 + #define b_cr_mot_out_base_mchbar_port 0x4c 92 + #define b_cr_mot_out_base_mchbar_offset 0x6af0 93 + #define b_cr_mot_out_base_mchbar_r_opcode 0x00 94 + 95 + struct b_cr_mot_out_mask_mchbar { 96 + u32 reserved_0 : 14; 97 + u32 mot_out_mask : 15; 98 + u32 reserved_1 : 1; 99 + u32 ia_iwb_en : 1; 100 + u32 gt_iwb_en : 1; 101 + }; 102 + 103 + #define b_cr_mot_out_mask_mchbar_port 0x4c 104 + #define b_cr_mot_out_mask_mchbar_offset 0x6af4 105 + #define b_cr_mot_out_mask_mchbar_r_opcode 0x00 106 + 107 + struct b_cr_asym_mem_region0_mchbar { 108 + u32 pad : 4; 109 + u32 slice0_asym_base : 11; 110 + u32 pad_18_15 : 4; 111 + u32 slice0_asym_limit : 11; 112 + u32 slice0_asym_channel_select : 1; 113 + u32 slice0_asym_enable : 1; 114 + }; 115 + 116 + #define b_cr_asym_mem_region0_mchbar_port 0x4c 117 + #define b_cr_asym_mem_region0_mchbar_offset 0x6e40 118 + #define b_cr_asym_mem_region0_mchbar_r_opcode 0x00 119 + 120 + struct b_cr_asym_mem_region1_mchbar { 121 + u32 pad : 4; 122 + u32 slice1_asym_base : 11; 123 + u32 pad_18_15 : 4; 124 + u32 slice1_asym_limit : 11; 125 + u32 slice1_asym_channel_select : 1; 126 + u32 slice1_asym_enable : 1; 127 + }; 128 + 129 + #define b_cr_asym_mem_region1_mchbar_port 0x4c 130 + #define b_cr_asym_mem_region1_mchbar_offset 0x6e44 131 + #define b_cr_asym_mem_region1_mchbar_r_opcode 0x00 132 + 133 + /* Some bit fields moved in above two structs on Denverton */ 134 + struct b_cr_asym_mem_region_denverton { 135 + u32 pad : 4; 136 + u32 slice_asym_base : 8; 137 + u32 pad_19_12 : 8; 138 + u32 slice_asym_limit : 8; 139 + u32 pad_28_30 : 3; 140 + u32 slice_asym_enable : 1; 141 + }; 142 + 143 + struct b_cr_asym_2way_mem_region_mchbar { 144 + u32 pad : 2; 145 + u32 asym_2way_intlv_mode : 2; 146 + u32 asym_2way_base : 11; 147 + u32 pad_16_15 : 2; 148 + u32 asym_2way_limit : 11; 149 + u32 pad_30_28 : 3; 150 + u32 asym_2way_interleave_enable : 1; 151 + }; 152 + 153 + #define b_cr_asym_2way_mem_region_mchbar_port 0x4c 154 + #define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50 155 + #define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00 156 + 157 + /* Apollo Lake d-unit */ 158 + 159 + struct d_cr_drp0 { 160 + u32 rken0 : 1; 161 + u32 rken1 : 1; 162 + u32 ddmen : 1; 163 + u32 rsvd3 : 1; 164 + u32 dwid : 2; 165 + u32 dden : 3; 166 + u32 rsvd13_9 : 5; 167 + u32 rsien : 1; 168 + u32 bahen : 1; 169 + u32 rsvd18_16 : 3; 170 + u32 caswizzle : 2; 171 + u32 eccen : 1; 172 + u32 dramtype : 3; 173 + u32 blmode : 3; 174 + u32 addrdec : 2; 175 + u32 dramdevice_pr : 2; 176 + }; 177 + 178 + #define d_cr_drp0_offset 0x1400 179 + #define d_cr_drp0_r_opcode 0x00 180 + 181 + /* Denverton d-unit */ 182 + 183 + struct d_cr_dsch { 184 + u32 ch0en : 1; 185 + u32 ch1en : 1; 186 + u32 ddr4en : 1; 187 + u32 coldwake : 1; 188 + u32 newbypdis : 1; 189 + u32 chan_width : 1; 190 + u32 rsvd6_6 : 1; 191 + u32 ooodis : 1; 192 + u32 rsvd18_8 : 11; 193 + u32 ic : 1; 194 + u32 rsvd31_20 : 12; 195 + }; 196 + 197 + #define d_cr_dsch_port 0x16 198 + #define d_cr_dsch_offset 0x0 199 + #define d_cr_dsch_r_opcode 0x0 200 + 201 + struct d_cr_ecc_ctrl { 202 + u32 eccen : 1; 203 + u32 rsvd31_1 : 31; 204 + }; 205 + 206 + #define d_cr_ecc_ctrl_offset 0x180 207 + #define d_cr_ecc_ctrl_r_opcode 0x0 208 + 209 + struct d_cr_drp { 210 + u32 rken0 : 1; 211 + u32 rken1 : 1; 212 + u32 rken2 : 1; 213 + u32 rken3 : 1; 214 + u32 dimmdwid0 : 2; 215 + u32 dimmdden0 : 2; 216 + u32 dimmdwid1 : 2; 217 + u32 dimmdden1 : 2; 218 + u32 rsvd15_12 : 4; 219 + u32 dimmflip : 1; 220 + u32 rsvd31_17 : 15; 221 + }; 222 + 223 + #define d_cr_drp_offset 0x158 224 + #define d_cr_drp_r_opcode 0x0 225 + 226 + struct d_cr_dmap { 227 + u32 ba0 : 5; 228 + u32 ba1 : 5; 229 + u32 bg0 : 5; /* if ddr3, ba2 = bg0 */ 230 + u32 bg1 : 5; /* if ddr3, ba3 = bg1 */ 231 + u32 rs0 : 5; 232 + u32 rs1 : 5; 233 + u32 rsvd : 2; 234 + }; 235 + 236 + #define d_cr_dmap_offset 0x174 237 + #define d_cr_dmap_r_opcode 0x0 238 + 239 + struct d_cr_dmap1 { 240 + u32 ca11 : 6; 241 + u32 bxor : 1; 242 + u32 rsvd : 25; 243 + }; 244 + 245 + #define d_cr_dmap1_offset 0xb4 246 + #define d_cr_dmap1_r_opcode 0x0 247 + 248 + struct d_cr_dmap2 { 249 + u32 row0 : 5; 250 + u32 row1 : 5; 251 + u32 row2 : 5; 252 + u32 row3 : 5; 253 + u32 row4 : 5; 254 + u32 row5 : 5; 255 + u32 rsvd : 2; 256 + }; 257 + 258 + #define d_cr_dmap2_offset 0x148 259 + #define d_cr_dmap2_r_opcode 0x0 260 + 261 + struct d_cr_dmap3 { 262 + u32 row6 : 5; 263 + u32 row7 : 5; 264 + u32 row8 : 5; 265 + u32 row9 : 5; 266 + u32 row10 : 5; 267 + u32 row11 : 5; 268 + u32 rsvd : 2; 269 + }; 270 + 271 + #define d_cr_dmap3_offset 0x14c 272 + #define d_cr_dmap3_r_opcode 0x0 273 + 274 + struct d_cr_dmap4 { 275 + u32 row12 : 5; 276 + u32 row13 : 5; 277 + u32 row14 : 5; 278 + u32 row15 : 5; 279 + u32 row16 : 5; 280 + u32 row17 : 5; 281 + u32 rsvd : 2; 282 + }; 283 + 284 + #define d_cr_dmap4_offset 0x150 285 + #define d_cr_dmap4_r_opcode 0x0 286 + 287 + struct d_cr_dmap5 { 288 + u32 ca3 : 4; 289 + u32 ca4 : 4; 290 + u32 ca5 : 4; 291 + u32 ca6 : 4; 292 + u32 ca7 : 4; 293 + u32 ca8 : 4; 294 + u32 ca9 : 4; 295 + u32 rsvd : 4; 296 + }; 297 + 298 + #define d_cr_dmap5_offset 0x154 299 + #define d_cr_dmap5_r_opcode 0x0 300 + 301 + #endif /* _PND2_REGS_H */
+1 -1
drivers/edac/xgene_edac.c
··· 1596 1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); 1597 1597 if (!reg) 1598 1598 goto chk_iob_axi0; 1599 - dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); 1599 + dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n"); 1600 1600 if (reg & IOBPA_RDATA_CORRUPT_MASK) 1601 1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); 1602 1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
-1
drivers/firmware/efi/efi.c
··· 389 389 return 0; 390 390 } 391 391 } 392 - pr_err_once("requested map not found.\n"); 393 392 return -ENOENT; 394 393 } 395 394
+1 -1
drivers/firmware/efi/esrt.c
··· 254 254 255 255 rc = efi_mem_desc_lookup(efi.esrt, &md); 256 256 if (rc < 0) { 257 - pr_err("ESRT header is not in the memory map.\n"); 257 + pr_warn("ESRT header is not in the memory map.\n"); 258 258 return; 259 259 } 260 260
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 1311 1311 goto out_pm_put; 1312 1312 } 1313 1313 1314 + mutex_lock(&gpu->lock); 1315 + 1314 1316 fence = etnaviv_gpu_fence_alloc(gpu); 1315 1317 if (!fence) { 1316 1318 event_free(gpu, event); 1317 1319 ret = -ENOMEM; 1318 1320 goto out_pm_put; 1319 1321 } 1320 - 1321 - mutex_lock(&gpu->lock); 1322 1322 1323 1323 gpu->event[event].fence = fence; 1324 1324 submit->fence = fence->seqno;
+2 -1
drivers/gpu/drm/i915/gvt/edid.c
··· 495 495 unsigned char val = edid_get_byte(vgpu); 496 496 497 497 aux_data_for_write = (val << 16); 498 - } 498 + } else 499 + aux_data_for_write = (0xff << 16); 499 500 } 500 501 /* write the return value in AUX_CH_DATA reg which includes: 501 502 * ACK of I2C_WRITE
+6 -2
drivers/gpu/drm/i915/gvt/gtt.c
··· 1837 1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1838 1838 if (ret) { 1839 1839 gvt_vgpu_err("fail to translate guest gtt entry\n"); 1840 - return ret; 1840 + /* guest driver may read/write the entry when partial 1841 + * update the entry in this situation p2m will fail 1842 + * settting the shadow entry to point to a scratch page 1843 + */ 1844 + ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); 1841 1845 } 1842 1846 } else { 1843 1847 m = e; 1844 - m.val64 = 0; 1848 + ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); 1845 1849 } 1846 1850 1847 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+9 -1
drivers/gpu/drm/i915/gvt/handlers.c
··· 970 970 return 0; 971 971 } 972 972 973 + static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset, 974 + void *p_data, unsigned int bytes) 975 + { 976 + *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH); 977 + write_vreg(vgpu, offset, p_data, bytes); 978 + return 0; 979 + } 980 + 973 981 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 974 982 void *p_data, unsigned int bytes) 975 983 { ··· 2246 2238 MMIO_D(0x7180, D_ALL); 2247 2239 MMIO_D(0x7408, D_ALL); 2248 2240 MMIO_D(0x7c00, D_ALL); 2249 - MMIO_D(GEN6_MBCTL, D_ALL); 2241 + MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); 2250 2242 MMIO_D(0x911c, D_ALL); 2251 2243 MMIO_D(0x9120, D_ALL); 2252 2244 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
+2
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 1326 1326 vgpu->handle = (unsigned long)info; 1327 1327 info->vgpu = vgpu; 1328 1328 info->kvm = kvm; 1329 + kvm_get_kvm(info->kvm); 1329 1330 1330 1331 kvmgt_protect_table_init(info); 1331 1332 gvt_cache_init(vgpu); ··· 1348 1347 } 1349 1348 1350 1349 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1350 + kvm_put_kvm(info->kvm); 1351 1351 kvmgt_protect_table_destroy(info); 1352 1352 gvt_cache_destroy(info->vgpu); 1353 1353 vfree(info);
+1 -1
drivers/gpu/drm/i915/gvt/render.c
··· 207 207 l3_offset.reg = 0xb020; 208 208 for (i = 0; i < 32; i++) { 209 209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 210 - I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 210 + I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset)); 211 211 POSTING_READ(l3_offset); 212 212 l3_offset.reg += 4; 213 213 }
+6 -1
drivers/gpu/drm/i915/gvt/scheduler.c
··· 127 127 return 0; 128 128 } 129 129 130 + static inline bool is_gvt_request(struct drm_i915_gem_request *req) 131 + { 132 + return i915_gem_context_force_single_submission(req->ctx); 133 + } 134 + 130 135 static int shadow_context_status_change(struct notifier_block *nb, 131 136 unsigned long action, void *data) 132 137 { ··· 142 137 struct intel_vgpu_workload *workload = 143 138 scheduler->current_workload[req->engine->id]; 144 139 145 - if (unlikely(!workload)) 140 + if (!is_gvt_request(req) || unlikely(!workload)) 146 141 return NOTIFY_OK; 147 142 148 143 switch (action) {
+2
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 2024 2024 ret = context_pin(ctx, flags); 2025 2025 if (ret) 2026 2026 goto error; 2027 + 2028 + ce->state->obj->mm.dirty = true; 2027 2029 } 2028 2030 2029 2031 /* The kernel context is only used as a placeholder for flushing the
+2 -2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 213 213 rbo->placement.num_busy_placement = 0; 214 214 for (i = 0; i < rbo->placement.num_placement; i++) { 215 215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { 216 - if (rbo->placements[0].fpfn < fpfn) 217 - rbo->placements[0].fpfn = fpfn; 216 + if (rbo->placements[i].fpfn < fpfn) 217 + rbo->placements[i].fpfn = fpfn; 218 218 } else { 219 219 rbo->placement.busy_placement = 220 220 &rbo->placements[i];
+12 -1
drivers/gpu/drm/vc4/vc4_crtc.c
··· 846 846 drm_atomic_helper_crtc_destroy_state(crtc, state); 847 847 } 848 848 849 + static void 850 + vc4_crtc_reset(struct drm_crtc *crtc) 851 + { 852 + if (crtc->state) 853 + __drm_atomic_helper_crtc_destroy_state(crtc->state); 854 + 855 + crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); 856 + if (crtc->state) 857 + crtc->state->crtc = crtc; 858 + } 859 + 849 860 static const struct drm_crtc_funcs vc4_crtc_funcs = { 850 861 .set_config = drm_atomic_helper_set_config, 851 862 .destroy = vc4_crtc_destroy, ··· 864 853 .set_property = NULL, 865 854 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 866 855 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 867 - .reset = drm_atomic_helper_crtc_reset, 856 + .reset = vc4_crtc_reset, 868 857 .atomic_duplicate_state = vc4_crtc_duplicate_state, 869 858 .atomic_destroy_state = vc4_crtc_destroy_state, 870 859 .gamma_set = vc4_crtc_gamma_set,
+1
drivers/hid/hid-core.c
··· 2112 2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2113 2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2114 2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2115 + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, 2115 2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2116 2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2117 2118 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+1
drivers/hid/hid-ids.h
··· 1082 1082 1083 1083 #define USB_VENDOR_ID_XIN_MO 0x16c0 1084 1084 #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 1085 + #define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1 1085 1086 1086 1087 #define USB_VENDOR_ID_XIROKU 0x1477 1087 1088 #define USB_DEVICE_ID_XIROKU_SPX 0x1006
+1
drivers/hid/hid-xinmo.c
··· 46 46 47 47 static const struct hid_device_id xinmo_devices[] = { 48 48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 49 + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, 49 50 { } 50 51 }; 51 52
+10 -8
drivers/hid/wacom_sys.c
··· 2165 2165 2166 2166 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2167 2167 2168 + /* pen only Bamboo neither support touch nor pad */ 2169 + if ((features->type == BAMBOO_PEN) && 2170 + ((features->device_type & WACOM_DEVICETYPE_TOUCH) || 2171 + (features->device_type & WACOM_DEVICETYPE_PAD))) { 2172 + error = -ENODEV; 2173 + goto fail; 2174 + } 2175 + 2168 2176 error = wacom_add_shared_data(hdev); 2169 2177 if (error) 2170 2178 goto fail; ··· 2216 2208 /* touch only Bamboo doesn't support pen */ 2217 2209 if ((features->type == BAMBOO_TOUCH) && 2218 2210 (features->device_type & WACOM_DEVICETYPE_PEN)) { 2219 - error = -ENODEV; 2220 - goto fail_quirks; 2221 - } 2222 - 2223 - /* pen only Bamboo neither support touch nor pad */ 2224 - if ((features->type == BAMBOO_PEN) && 2225 - ((features->device_type & WACOM_DEVICETYPE_TOUCH) || 2226 - (features->device_type & WACOM_DEVICETYPE_PAD))) { 2211 + cancel_delayed_work_sync(&wacom->init_work); 2212 + _wacom_query_tablet_data(wacom); 2227 2213 error = -ENODEV; 2228 2214 goto fail_quirks; 2229 2215 }
+6 -28
drivers/i2c/muxes/i2c-mux-pca954x.c
··· 35 35 * warranty of any kind, whether express or implied. 36 36 */ 37 37 38 - #include <linux/acpi.h> 39 38 #include <linux/device.h> 40 39 #include <linux/gpio/consumer.h> 41 40 #include <linux/i2c.h> ··· 116 117 .has_irq = 1, 117 118 .muxtype = pca954x_isswi, 118 119 }, 120 + [pca_9546] = { 121 + .nchans = 4, 122 + .muxtype = pca954x_isswi, 123 + }, 119 124 [pca_9547] = { 120 125 .nchans = 8, 121 126 .enable = 0x8, ··· 137 134 { "pca9543", pca_9543 }, 138 135 { "pca9544", pca_9544 }, 139 136 { "pca9545", pca_9545 }, 140 - { "pca9546", pca_9545 }, 137 + { "pca9546", pca_9546 }, 141 138 { "pca9547", pca_9547 }, 142 139 { "pca9548", pca_9548 }, 143 140 { } 144 141 }; 145 142 MODULE_DEVICE_TABLE(i2c, pca954x_id); 146 - 147 - #ifdef CONFIG_ACPI 148 - static const struct acpi_device_id pca954x_acpi_ids[] = { 149 - { .id = "PCA9540", .driver_data = pca_9540 }, 150 - { .id = "PCA9542", .driver_data = pca_9542 }, 151 - { .id = "PCA9543", .driver_data = pca_9543 }, 152 - { .id = "PCA9544", .driver_data = pca_9544 }, 153 - { .id = "PCA9545", .driver_data = pca_9545 }, 154 - { .id = "PCA9546", .driver_data = pca_9545 }, 155 - { .id = "PCA9547", .driver_data = pca_9547 }, 156 - { .id = "PCA9548", .driver_data = pca_9548 }, 157 - { } 158 - }; 159 - MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids); 160 - #endif 161 143 162 144 #ifdef CONFIG_OF 163 145 static const struct of_device_id pca954x_of_match[] = { ··· 381 393 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); 382 394 if (match) 383 395 data->chip = of_device_get_match_data(&client->dev); 384 - else if (id) 396 + else 385 397 data->chip = &chips[id->driver_data]; 386 - else { 387 - const struct acpi_device_id *acpi_id; 388 - 389 - acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids), 390 - &client->dev); 391 - if (!acpi_id) 392 - return -ENODEV; 393 - data->chip = &chips[acpi_id->driver_data]; 394 - } 395 398 396 399 data->last_chan = 0; /* force the first selection */ 397 400 ··· 471 492 .name = "pca954x", 472 493 .pm = &pca954x_pm, 473 494 .of_match_table = of_match_ptr(pca954x_of_match), 474 - .acpi_match_table = ACPI_PTR(pca954x_acpi_ids), 475 495 }, 476 496 .probe = pca954x_probe, 477 497 .remove = pca954x_remove,
+1
drivers/irqchip/Kconfig
··· 262 262 263 263 config MVEBU_ODMI 264 264 bool 265 + select GENERIC_MSI_IRQ_DOMAIN 265 266 266 267 config MVEBU_PIC 267 268 bool
+4
drivers/irqchip/irq-mips-gic.c
··· 991 991 992 992 static void __init gic_map_interrupts(struct device_node *node) 993 993 { 994 + gic_map_single_int(node, GIC_LOCAL_INT_WD); 995 + gic_map_single_int(node, GIC_LOCAL_INT_COMPARE); 994 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 995 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 998 + gic_map_single_int(node, GIC_LOCAL_INT_SWINT0); 999 + gic_map_single_int(node, GIC_LOCAL_INT_SWINT1); 996 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 997 1001 } 998 1002
+10 -1
drivers/mmc/host/sdhci-of-at91.c
··· 29 29 30 30 #include "sdhci-pltfm.h" 31 31 32 + #define SDMMC_MC1R 0x204 33 + #define SDMMC_MC1R_DDR BIT(3) 32 34 #define SDMMC_CACR 0x230 33 35 #define SDMMC_CACR_CAPWREN BIT(0) 34 36 #define SDMMC_CACR_KEY (0x46 << 8) ··· 105 103 sdhci_set_power_noreg(host, mode, vdd); 106 104 } 107 105 106 + void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) 107 + { 108 + if (timing == MMC_TIMING_MMC_DDR52) 109 + sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R); 110 + sdhci_set_uhs_signaling(host, timing); 111 + } 112 + 108 113 static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 109 114 .set_clock = sdhci_at91_set_clock, 110 115 .set_bus_width = sdhci_set_bus_width, 111 116 .reset = sdhci_reset, 112 - .set_uhs_signaling = sdhci_set_uhs_signaling, 117 + .set_uhs_signaling = sdhci_at91_set_uhs_signaling, 113 118 .set_power = sdhci_at91_set_power, 114 119 }; 115 120
+6
drivers/mmc/host/sdhci.c
··· 1830 1830 struct sdhci_host *host = mmc_priv(mmc); 1831 1831 unsigned long flags; 1832 1832 1833 + if (enable) 1834 + pm_runtime_get_noresume(host->mmc->parent); 1835 + 1833 1836 spin_lock_irqsave(&host->lock, flags); 1834 1837 if (enable) 1835 1838 host->flags |= SDHCI_SDIO_IRQ_ENABLED; ··· 1841 1838 1842 1839 sdhci_enable_sdio_irq_nolock(host, enable); 1843 1840 spin_unlock_irqrestore(&host->lock, flags); 1841 + 1842 + if (!enable) 1843 + pm_runtime_put_noidle(host->mmc->parent); 1844 1844 } 1845 1845 1846 1846 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+55 -3
drivers/pci/host/pci-thunder-pem.c
··· 14 14 * Copyright (C) 2015 - 2016 Cavium, Inc. 15 15 */ 16 16 17 + #include <linux/bitfield.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/init.h> 19 20 #include <linux/of_address.h> ··· 335 334 336 335 #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 337 336 337 + #define PEM_RES_BASE 0x87e0c0000000UL 338 + #define PEM_NODE_MASK GENMASK(45, 44) 339 + #define PEM_INDX_MASK GENMASK(26, 24) 340 + #define PEM_MIN_DOM_IN_NODE 4 341 + #define PEM_MAX_DOM_IN_NODE 10 342 + 343 + static void thunder_pem_reserve_range(struct device *dev, int seg, 344 + struct resource *r) 345 + { 346 + resource_size_t start = r->start, end = r->end; 347 + struct resource *res; 348 + const char *regionid; 349 + 350 + regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); 351 + if (!regionid) 352 + return; 353 + 354 + res = request_mem_region(start, end - start + 1, regionid); 355 + if (res) 356 + res->flags &= ~IORESOURCE_BUSY; 357 + else 358 + kfree(regionid); 359 + 360 + dev_info(dev, "%pR %s reserved\n", r, 361 + res ? "has been" : "could not be"); 362 + } 363 + 364 + static void thunder_pem_legacy_fw(struct acpi_pci_root *root, 365 + struct resource *res_pem) 366 + { 367 + int node = acpi_get_node(root->device->handle); 368 + int index; 369 + 370 + if (node == NUMA_NO_NODE) 371 + node = 0; 372 + 373 + index = root->segment - PEM_MIN_DOM_IN_NODE; 374 + index -= node * PEM_MAX_DOM_IN_NODE; 375 + res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | 376 + FIELD_PREP(PEM_INDX_MASK, index); 377 + res_pem->end = res_pem->start + SZ_16M - 1; 378 + res_pem->flags = IORESOURCE_MEM; 379 + } 380 + 338 381 static int thunder_pem_acpi_init(struct pci_config_window *cfg) 339 382 { 340 383 struct device *dev = cfg->parent; ··· 391 346 if (!res_pem) 392 347 return -ENOMEM; 393 348 394 - ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); 349 + ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); 350 + 351 + /* 352 + * If we fail to gather resources it means that we run with old 353 + * FW where we need to calculate PEM-specific resources manually. 354 + */ 395 355 if (ret) { 396 - dev_err(dev, "can't get rc base address\n"); 397 - return ret; 356 + thunder_pem_legacy_fw(root, res_pem); 357 + /* Reserve PEM-specific resources and PCI configuration space */ 358 + thunder_pem_reserve_range(dev, root->segment, res_pem); 359 + thunder_pem_reserve_range(dev, root->segment, &cfg->res); 398 360 } 399 361 400 362 return thunder_pem_init(dev, cfg, res_pem);
+12 -12
drivers/pci/host/pcie-iproc-bcma.c
··· 44 44 { 45 45 struct device *dev = &bdev->dev; 46 46 struct iproc_pcie *pcie; 47 - LIST_HEAD(res); 48 - struct resource res_mem; 47 + LIST_HEAD(resources); 49 48 int ret; 50 49 51 50 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); ··· 62 63 63 64 pcie->base_addr = bdev->addr; 64 65 65 - res_mem.start = bdev->addr_s[0]; 66 - res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 67 - res_mem.name = "PCIe MEM space"; 68 - res_mem.flags = IORESOURCE_MEM; 69 - pci_add_resource(&res, &res_mem); 66 + pcie->mem.start = bdev->addr_s[0]; 67 + pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; 68 + pcie->mem.name = "PCIe MEM space"; 69 + pcie->mem.flags = IORESOURCE_MEM; 70 + pci_add_resource(&resources, &pcie->mem); 70 71 71 72 pcie->map_irq = iproc_pcie_bcma_map_irq; 72 73 73 - ret = iproc_pcie_setup(pcie, &res); 74 - if (ret) 74 + ret = iproc_pcie_setup(pcie, &resources); 75 + if (ret) { 75 76 dev_err(dev, "PCIe controller setup failed\n"); 76 - 77 - pci_free_resource_list(&res); 77 + pci_free_resource_list(&resources); 78 + return ret; 79 + } 78 80 79 81 bcma_set_drvdata(bdev, pcie); 80 - return ret; 82 + return 0; 81 83 } 82 84 83 85 static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+10 -9
drivers/pci/host/pcie-iproc-platform.c
··· 51 51 struct device_node *np = dev->of_node; 52 52 struct resource reg; 53 53 resource_size_t iobase = 0; 54 - LIST_HEAD(res); 54 + LIST_HEAD(resources); 55 55 int ret; 56 56 57 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); ··· 96 96 pcie->phy = NULL; 97 97 } 98 98 99 - ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); 99 + ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources, 100 + &iobase); 100 101 if (ret) { 101 - dev_err(dev, 102 - "unable to get PCI host bridge resources\n"); 102 + dev_err(dev, "unable to get PCI host bridge resources\n"); 103 103 return ret; 104 104 } 105 105 ··· 112 112 pcie->map_irq = of_irq_parse_and_map_pci; 113 113 } 114 114 115 - ret = iproc_pcie_setup(pcie, &res); 116 - if (ret) 115 + ret = iproc_pcie_setup(pcie, &resources); 116 + if (ret) { 117 117 dev_err(dev, "PCIe controller setup failed\n"); 118 - 119 - pci_free_resource_list(&res); 118 + pci_free_resource_list(&resources); 119 + return ret; 120 + } 120 121 121 122 platform_set_drvdata(pdev, pcie); 122 - return ret; 123 + return 0; 123 124 } 124 125 125 126 static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+1
drivers/pci/host/pcie-iproc.h
··· 90 90 #ifdef CONFIG_ARM 91 91 struct pci_sys_data sysdata; 92 92 #endif 93 + struct resource mem; 93 94 struct pci_bus *root_bus; 94 95 struct phy *phy; 95 96 int (*map_irq)(const struct pci_dev *, u8, u8);
+2 -2
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
··· 667 667 }; 668 668 669 669 static const char * const i2c_ao_groups[] = { 670 - "i2c_sdk_ao", "i2c_sda_ao", 670 + "i2c_sck_ao", "i2c_sda_ao", 671 671 }; 672 672 673 673 static const char * const i2c_slave_ao_groups[] = { 674 - "i2c_slave_sdk_ao", "i2c_slave_sda_ao", 674 + "i2c_slave_sck_ao", "i2c_slave_sda_ao", 675 675 }; 676 676 677 677 static const char * const remote_input_ao_groups[] = {
+24 -6
drivers/pinctrl/pinctrl-st.c
··· 1285 1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); 1286 1286 } 1287 1287 1288 + static int st_gpio_irq_request_resources(struct irq_data *d) 1289 + { 1290 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1291 + 1292 + st_gpio_direction_input(gc, d->hwirq); 1293 + 1294 + return gpiochip_lock_as_irq(gc, d->hwirq); 1295 + } 1296 + 1297 + static void st_gpio_irq_release_resources(struct irq_data *d) 1298 + { 1299 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1300 + 1301 + gpiochip_unlock_as_irq(gc, d->hwirq); 1302 + } 1303 + 1288 1304 static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) 1289 1305 { 1290 1306 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ··· 1454 1438 }; 1455 1439 1456 1440 static struct irq_chip st_gpio_irqchip = { 1457 - .name = "GPIO", 1458 - .irq_disable = st_gpio_irq_mask, 1459 - .irq_mask = st_gpio_irq_mask, 1460 - .irq_unmask = st_gpio_irq_unmask, 1461 - .irq_set_type = st_gpio_irq_set_type, 1462 - .flags = IRQCHIP_SKIP_SET_WAKE, 1441 + .name = "GPIO", 1442 + .irq_request_resources = st_gpio_irq_request_resources, 1443 + .irq_release_resources = st_gpio_irq_release_resources, 1444 + .irq_disable = st_gpio_irq_mask, 1445 + .irq_mask = st_gpio_irq_mask, 1446 + .irq_unmask = st_gpio_irq_unmask, 1447 + .irq_set_type = st_gpio_irq_set_type, 1448 + .flags = IRQCHIP_SKIP_SET_WAKE, 1463 1449 }; 1464 1450 1465 1451 static int st_gpiolib_register_bank(struct st_pinctrl *info,
+30
drivers/pinctrl/qcom/pinctrl-ipq4019.c
··· 405 405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 406 406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 407 407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 408 + PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 409 + PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 410 + PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 411 + PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 412 + PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 413 + PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 414 + PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 415 + PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 416 + PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 417 + PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 418 + PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 419 + PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 420 + PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 421 + PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 422 + PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 423 + PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 424 + PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 425 + PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 426 + PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 427 + PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 428 + PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 429 + PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 430 + PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 431 + PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 432 + PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 433 + PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 434 + PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 435 + PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 436 + PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 437 + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 408 438 }; 409 439 410 440 static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
-4
drivers/pinctrl/qcom/pinctrl-msm.c
··· 609 609 610 610 raw_spin_lock_irqsave(&pctrl->lock, flags); 611 611 612 - val = readl(pctrl->regs + g->intr_status_reg); 613 - val &= ~BIT(g->intr_status_bit); 614 - writel(val, pctrl->regs + g->intr_status_reg); 615 - 616 612 val = readl(pctrl->regs + g->intr_cfg_reg); 617 613 val |= BIT(g->intr_enable_bit); 618 614 writel(val, pctrl->regs + g->intr_cfg_reg);
+10 -3
drivers/pinctrl/samsung/pinctrl-samsung.c
··· 988 988 989 989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { 990 990 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 991 - virt_base[i] = devm_ioremap_resource(&pdev->dev, res); 992 - if (IS_ERR(virt_base[i])) 993 - return ERR_CAST(virt_base[i]); 991 + if (!res) { 992 + dev_err(&pdev->dev, "failed to get mem%d resource\n", i); 993 + return ERR_PTR(-EINVAL); 994 + } 995 + virt_base[i] = devm_ioremap(&pdev->dev, res->start, 996 + resource_size(res)); 997 + if (!virt_base[i]) { 998 + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); 999 + return ERR_PTR(-EIO); 1000 + } 994 1001 } 995 1002 996 1003 bank = d->pin_banks;
+1 -1
drivers/pinctrl/ti/Kconfig
··· 1 1 config PINCTRL_TI_IODELAY 2 2 tristate "TI IODelay Module pinconf driver" 3 - depends on OF 3 + depends on OF && (SOC_DRA7XX || COMPILE_TEST) 4 4 select GENERIC_PINCTRL_GROUPS 5 5 select GENERIC_PINMUX_FUNCTIONS 6 6 select GENERIC_PINCONF
+1 -4
drivers/ptp/ptp_kvm.c
··· 193 193 194 194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); 195 195 196 - if (IS_ERR(kvm_ptp_clock.ptp_clock)) 197 - return PTR_ERR(kvm_ptp_clock.ptp_clock); 198 - 199 - return 0; 196 + return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); 200 197 } 201 198 202 199 module_init(ptp_kvm_init);
+2 -2
drivers/rapidio/devices/tsi721.c
··· 37 37 #include "tsi721.h" 38 38 39 39 #ifdef DEBUG 40 - u32 dbg_level; 41 - module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 40 + u32 tsi_dbg_level; 41 + module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO); 42 42 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 43 43 #endif 44 44
+2 -2
drivers/rapidio/devices/tsi721.h
··· 40 40 }; 41 41 42 42 #ifdef DEBUG 43 - extern u32 dbg_level; 43 + extern u32 tsi_dbg_level; 44 44 45 45 #define tsi_debug(level, dev, fmt, arg...) \ 46 46 do { \ 47 - if (DBG_##level & dbg_level) \ 47 + if (DBG_##level & tsi_dbg_level) \ 48 48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ 49 49 } while (0) 50 50 #else
+6 -8
drivers/scsi/aacraid/commsup.c
··· 2056 2056 { 2057 2057 struct hw_fib **hw_fib_p; 2058 2058 struct fib **fib_p; 2059 - int rcode = 1; 2060 2059 2061 2060 hw_fib_p = hw_fib_pool; 2062 2061 fib_p = fib_pool; ··· 2073 2074 } 2074 2075 } 2075 2076 2077 + /* 2078 + * Get the actual number of allocated fibs 2079 + */ 2076 2080 num = hw_fib_p - hw_fib_pool; 2077 - if (!num) 2078 - rcode = 0; 2079 - 2080 - return rcode; 2081 + return num; 2081 2082 } 2082 2083 2083 2084 static void wakeup_fibctx_threads(struct aac_dev *dev, ··· 2185 2186 struct fib *fib; 2186 2187 unsigned long flags; 2187 2188 spinlock_t *t_lock; 2188 - unsigned int rcode; 2189 2189 2190 2190 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2191 2191 spin_lock_irqsave(t_lock, flags); ··· 2267 2269 * Fill up fib pointer pools with actual fibs 2268 2270 * and hw_fibs 2269 2271 */ 2270 - rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2271 - if (!rcode) 2272 + num = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2273 + if (!num) 2272 2274 goto free_mem; 2273 2275 2274 2276 /*
+24 -14
drivers/scsi/device_handler/scsi_dh_alua.c
··· 113 113 #define ALUA_POLICY_SWITCH_ALL 1 114 114 115 115 static void alua_rtpg_work(struct work_struct *work); 116 - static void alua_rtpg_queue(struct alua_port_group *pg, 116 + static bool alua_rtpg_queue(struct alua_port_group *pg, 117 117 struct scsi_device *sdev, 118 118 struct alua_queue_data *qdata, bool force); 119 119 static void alua_check(struct scsi_device *sdev, bool force); ··· 862 862 kref_put(&pg->kref, release_port_group); 863 863 } 864 864 865 - static void alua_rtpg_queue(struct alua_port_group *pg, 865 + /** 866 + * alua_rtpg_queue() - cause RTPG to be submitted asynchronously 867 + * 868 + * Returns true if and only if alua_rtpg_work() will be called asynchronously. 869 + * That function is responsible for calling @qdata->fn(). 870 + */ 871 + static bool alua_rtpg_queue(struct alua_port_group *pg, 866 872 struct scsi_device *sdev, 867 873 struct alua_queue_data *qdata, bool force) 868 874 { ··· 876 870 unsigned long flags; 877 871 struct workqueue_struct *alua_wq = kaluad_wq; 878 872 879 - if (!pg) 880 - return; 873 + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) 874 + return false; 881 875 882 876 spin_lock_irqsave(&pg->lock, flags); 883 877 if (qdata) { ··· 890 884 pg->flags |= ALUA_PG_RUN_RTPG; 891 885 kref_get(&pg->kref); 892 886 pg->rtpg_sdev = sdev; 893 - scsi_device_get(sdev); 894 887 start_queue = 1; 895 888 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 896 889 pg->flags |= ALUA_PG_RUN_RTPG; 897 890 /* Do not queue if the worker is already running */ 898 891 if (!(pg->flags & ALUA_PG_RUNNING)) { 899 892 kref_get(&pg->kref); 900 - sdev = NULL; 901 893 start_queue = 1; 902 894 } 903 895 } ··· 904 900 alua_wq = kaluad_sync_wq; 905 901 spin_unlock_irqrestore(&pg->lock, flags); 906 902 907 - if (start_queue && 908 - !queue_delayed_work(alua_wq, &pg->rtpg_work, 909 - msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 910 - if (sdev) 911 - scsi_device_put(sdev); 912 - kref_put(&pg->kref, release_port_group); 903 + if (start_queue) { 904 + if (queue_delayed_work(alua_wq, &pg->rtpg_work, 905 + msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) 906 + sdev = NULL; 907 + else 908 + kref_put(&pg->kref, release_port_group); 913 909 } 910 + if (sdev) 911 + scsi_device_put(sdev); 912 + 913 + return true; 914 914 } 915 915 916 916 /* ··· 1015 1007 mutex_unlock(&h->init_mutex); 1016 1008 goto out; 1017 1009 } 1018 - fn = NULL; 1019 1010 rcu_read_unlock(); 1020 1011 mutex_unlock(&h->init_mutex); 1021 1012 1022 - alua_rtpg_queue(pg, sdev, qdata, true); 1013 + if (alua_rtpg_queue(pg, sdev, qdata, true)) 1014 + fn = NULL; 1015 + else 1016 + err = SCSI_DH_DEV_OFFLINED; 1023 1017 kref_put(&pg->kref, release_port_group); 1024 1018 out: 1025 1019 if (fn)
+1
drivers/scsi/hpsa.c
··· 3885 3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3886 3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3887 3887 volume_offline = hpsa_volume_offline(h, scsi3addr); 3888 + this_device->volume_offline = volume_offline; 3888 3889 if (volume_offline == HPSA_LV_FAILED) { 3889 3890 rc = HPSA_LV_FAILED; 3890 3891 dev_err(&h->pdev->dev,
+1 -1
drivers/scsi/libsas/sas_ata.c
··· 221 221 task->num_scatter = qc->n_elem; 222 222 } else { 223 223 for_each_sg(qc->sg, sg, qc->n_elem, si) 224 - xfer += sg->length; 224 + xfer += sg_dma_len(sg); 225 225 226 226 task->total_xfer_len = xfer; 227 227 task->num_scatter = si;
+14 -8
drivers/scsi/lpfc/lpfc_debugfs.h
··· 44 44 /* hbqinfo output buffer size */ 45 45 #define LPFC_HBQINFO_SIZE 8192 46 46 47 - enum { 48 - DUMP_FCP, 49 - DUMP_NVME, 50 - DUMP_MBX, 51 - DUMP_ELS, 52 - DUMP_NVMELS, 53 - }; 54 - 55 47 /* nvmestat output buffer size */ 56 48 #define LPFC_NVMESTAT_SIZE 8192 57 49 #define LPFC_NVMEKTIME_SIZE 8192 ··· 275 283 struct lpfc_idiag_offset offset; 276 284 void *ptr_private; 277 285 }; 286 + 287 + #else 288 + 289 + #define lpfc_nvmeio_data(phba, fmt, arg...) \ 290 + no_printk(fmt, ##arg) 291 + 278 292 #endif 293 + 294 + enum { 295 + DUMP_FCP, 296 + DUMP_NVME, 297 + DUMP_MBX, 298 + DUMP_ELS, 299 + DUMP_NVMELS, 300 + }; 279 301 280 302 /* Mask for discovery_trace */ 281 303 #define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
+2 -1
drivers/scsi/lpfc/lpfc_els.c
··· 7968 7968 did, vport->port_state, ndlp->nlp_flag); 7969 7969 7970 7970 phba->fc_stat.elsRcvPRLI++; 7971 - if (vport->port_state < LPFC_DISC_AUTH) { 7971 + if ((vport->port_state < LPFC_DISC_AUTH) && 7972 + (vport->fc_flag & FC_FABRIC)) { 7972 7973 rjt_err = LSRJT_UNABLE_TPC; 7973 7974 rjt_exp = LSEXP_NOTHING_MORE; 7974 7975 break;
+2 -2
drivers/scsi/lpfc/lpfc_nvmet.c
··· 520 520 struct lpfc_hba *phba = ctxp->phba; 521 521 struct lpfc_iocbq *nvmewqeq; 522 522 unsigned long iflags; 523 - int rc, id; 523 + int rc; 524 524 525 525 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 526 526 if (phba->ktime_on) { ··· 530 530 ctxp->ts_nvme_data = ktime_get_ns(); 531 531 } 532 532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 533 - id = smp_processor_id(); 533 + int id = smp_processor_id(); 534 534 ctxp->cpu = id; 535 535 if (id < LPFC_CHECK_CPU_CNT) 536 536 phba->cpucheck_xmt_io[id]++;
+1
drivers/scsi/qedi/qedi_main.c
··· 2007 2007 2008 2008 static struct pci_device_id qedi_pci_tbl[] = { 2009 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2010 + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, 2010 2011 { 0 }, 2011 2012 }; 2012 2013 MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+2 -1
drivers/scsi/qla2xxx/qla_os.c
··· 1651 1651 /* Don't abort commands in adapter during EEH 1652 1652 * recovery as it's not accessible/responding. 1653 1653 */ 1654 - if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1654 + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && 1655 + (sp->type == SRB_SCSI_CMD)) { 1655 1656 /* Get a reference to the sp and drop the lock. 1656 1657 * The reference ensures this sp->done() call 1657 1658 * - and not the call in qla2xxx_eh_abort() -
+2
drivers/scsi/sg.c
··· 996 996 result = get_user(val, ip); 997 997 if (result) 998 998 return result; 999 + if (val > SG_MAX_CDB_SIZE) 1000 + return -ENOMEM; 999 1001 sfp->next_cmd_len = (val > 0) ? val : 0; 1000 1002 return 0; 1001 1003 case SG_GET_VERSION_NUM:
+2 -2
drivers/scsi/ufs/ufshcd-pltfrm.c
··· 309 309 310 310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 311 311 mmio_base = devm_ioremap_resource(dev, mem_res); 312 - if (IS_ERR(*(void **)&mmio_base)) { 313 - err = PTR_ERR(*(void **)&mmio_base); 312 + if (IS_ERR(mmio_base)) { 313 + err = PTR_ERR(mmio_base); 314 314 goto out; 315 315 } 316 316
-2
drivers/scsi/ufs/ufshcd.c
··· 4662 4662 } 4663 4663 if (ufshcd_is_clkscaling_supported(hba)) 4664 4664 hba->clk_scaling.active_reqs--; 4665 - if (ufshcd_is_clkscaling_supported(hba)) 4666 - hba->clk_scaling.active_reqs--; 4667 4665 } 4668 4666 4669 4667 /* clear corresponding bits of completed commands */
+25 -16
drivers/thermal/cpu_cooling.c
··· 107 107 }; 108 108 static DEFINE_IDA(cpufreq_ida); 109 109 110 - static unsigned int cpufreq_dev_count; 111 - 112 110 static DEFINE_MUTEX(cooling_list_lock); 113 111 static LIST_HEAD(cpufreq_dev_list); 114 112 ··· 393 395 394 396 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 395 397 true); 398 + if (IS_ERR(opp)) { 399 + dev_warn_ratelimited(cpufreq_device->cpu_dev, 400 + "Failed to find OPP for frequency %lu: %ld\n", 401 + freq_hz, PTR_ERR(opp)); 402 + return -EINVAL; 403 + } 404 + 396 405 voltage = dev_pm_opp_get_voltage(opp); 397 406 dev_pm_opp_put(opp); 398 407 399 408 if (voltage == 0) { 400 - dev_warn_ratelimited(cpufreq_device->cpu_dev, 401 - "Failed to get voltage for frequency %lu: %ld\n", 402 - freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 409 + dev_err_ratelimited(cpufreq_device->cpu_dev, 410 + "Failed to get voltage for frequency %lu\n", 411 + freq_hz); 403 412 return -EINVAL; 404 413 } 405 414 ··· 698 693 699 694 *state = cpufreq_cooling_get_level(cpu, target_freq); 700 695 if (*state == THERMAL_CSTATE_INVALID) { 701 - dev_warn_ratelimited(&cdev->device, 702 - "Failed to convert %dKHz for cpu %d into a cdev state\n", 703 - target_freq, cpu); 696 + dev_err_ratelimited(&cdev->device, 697 + "Failed to convert %dKHz for cpu %d into a cdev state\n", 698 + target_freq, cpu); 704 699 return -EINVAL; 705 700 } 706 701 ··· 776 771 unsigned int freq, i, num_cpus; 777 772 int ret; 778 773 struct thermal_cooling_device_ops *cooling_ops; 774 + bool first; 779 775 780 776 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) 781 777 return ERR_PTR(-ENOMEM); ··· 880 874 cpufreq_dev->cool_dev = cool_dev; 881 875 882 876 mutex_lock(&cooling_list_lock); 883 - list_add(&cpufreq_dev->node, &cpufreq_dev_list); 884 - 885 877 /* Register the notifier for first cpufreq cooling device */ 886 - if (!cpufreq_dev_count++) 878 + first = list_empty(&cpufreq_dev_list); 879 + list_add(&cpufreq_dev->node, &cpufreq_dev_list); 880 + mutex_unlock(&cooling_list_lock); 881 + 882 + if (first) 887 883 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 888 884 CPUFREQ_POLICY_NOTIFIER); 889 - mutex_unlock(&cooling_list_lock); 890 885 891 886 goto put_policy; 892 887 ··· 1028 1021 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 1029 1022 { 1030 1023 struct cpufreq_cooling_device *cpufreq_dev; 1024 + bool last; 1031 1025 1032 1026 if (!cdev) 1033 1027 return; ··· 1036 1028 cpufreq_dev = cdev->devdata; 1037 1029 1038 1030 mutex_lock(&cooling_list_lock); 1031 + list_del(&cpufreq_dev->node); 1039 1032 /* Unregister the notifier for the last cpufreq cooling device */ 1040 - if (!--cpufreq_dev_count) 1033 + last = list_empty(&cpufreq_dev_list); 1034 + mutex_unlock(&cooling_list_lock); 1035 + 1036 + if (last) 1041 1037 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1042 1038 CPUFREQ_POLICY_NOTIFIER); 1043 - 1044 - list_del(&cpufreq_dev->node); 1045 - mutex_unlock(&cooling_list_lock); 1046 1039 1047 1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1048 1041 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
+10 -4
drivers/thermal/devfreq_cooling.c
··· 186 186 return 0; 187 187 188 188 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 189 - if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) 189 + if (PTR_ERR(opp) == -ERANGE) 190 190 opp = dev_pm_opp_find_freq_exact(dev, freq, false); 191 + 192 + if (IS_ERR(opp)) { 193 + dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n", 194 + freq, PTR_ERR(opp)); 195 + return 0; 196 + } 191 197 192 198 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ 193 199 dev_pm_opp_put(opp); 194 200 195 201 if (voltage == 0) { 196 - dev_warn_ratelimited(dev, 197 - "Failed to get voltage for frequency %lu: %ld\n", 198 - freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); 202 + dev_err_ratelimited(dev, 203 + "Failed to get voltage for frequency %lu\n", 204 + freq); 199 205 return 0; 200 206 } 201 207
+6 -2
drivers/tty/serial/8250/Kconfig
··· 128 128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. 129 129 130 130 config SERIAL_8250_EXAR 131 - tristate "8250/16550 PCI device support" 132 - depends on SERIAL_8250_PCI 131 + tristate "8250/16550 Exar/Commtech PCI/PCIe device support" 132 + depends on SERIAL_8250_PCI 133 133 default SERIAL_8250 134 + help 135 + This builds support for XR17C1xx, XR17V3xx and some Commtech 136 + 422x PCIe serial cards that are not covered by the more generic 137 + SERIAL_8250_PCI option. 134 138 135 139 config SERIAL_8250_HP300 136 140 tristate
+21 -2
drivers/tty/serial/amba-pl011.c
··· 2452 2452 uart_console_write(&dev->port, s, n, pl011_putc); 2453 2453 } 2454 2454 2455 + /* 2456 + * On non-ACPI systems, earlycon is enabled by specifying 2457 + * "earlycon=pl011,<address>" on the kernel command line. 2458 + * 2459 + * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, 2460 + * by specifying only "earlycon" on the command line. Because it requires 2461 + * SPCR, the console starts after ACPI is parsed, which is later than a 2462 + * traditional early console. 2463 + * 2464 + * To get the traditional early console that starts before ACPI is parsed, 2465 + * specify the full "earlycon=pl011,<address>" option. 2466 + */ 2455 2467 static int __init pl011_early_console_setup(struct earlycon_device *device, 2456 2468 const char *opt) 2457 2469 { 2458 2470 if (!device->port.membase) 2459 2471 return -ENODEV; 2460 2472 2461 - device->con->write = qdf2400_e44_present ? 2462 - qdf2400_e44_early_write : pl011_early_write; 2473 + /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must 2474 + * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44". 2475 + */ 2476 + if (!strcmp(device->options, "qdf2400_e44")) 2477 + device->con->write = qdf2400_e44_early_write; 2478 + else 2479 + device->con->write = pl011_early_write; 2480 + 2463 2481 return 0; 2464 2482 } 2465 2483 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2466 2484 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2485 + EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup); 2467 2486 2468 2487 #else 2469 2488 #define AMBA_CONSOLE NULL
+8
drivers/tty/serial/atmel_serial.c
··· 1954 1954 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1955 1955 atmel_port->pdc_tx.ofs = 0; 1956 1956 } 1957 + /* 1958 + * in uart_flush_buffer(), the xmit circular buffer has just 1959 + * been cleared, so we have to reset tx_len accordingly. 1960 + */ 1961 + atmel_port->tx_len = 0; 1957 1962 } 1958 1963 1959 1964 /* ··· 2490 2485 /* Store PDC transmit status and disable it */ 2491 2486 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2492 2487 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2488 + 2489 + /* Make sure that tx path is actually able to send characters */ 2490 + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2493 2491 2494 2492 uart_console_write(port, s, count, atmel_console_putchar); 2495 2493
+1 -1
drivers/tty/serial/mxs-auart.c
··· 1088 1088 AUART_LINECTRL_BAUD_DIV_MAX); 1089 1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; 1090 1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); 1091 - div = u->uartclk * 32 / baud; 1091 + div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud); 1092 1092 } 1093 1093 1094 1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
-1
drivers/tty/vt/keyboard.c
··· 28 28 #include <linux/module.h> 29 29 #include <linux/sched/signal.h> 30 30 #include <linux/sched/debug.h> 31 - #include <linux/sched/debug.h> 32 31 #include <linux/tty.h> 33 32 #include <linux/tty_flip.h> 34 33 #include <linux/mm.h>
+5 -2
drivers/usb/core/hcd.c
··· 520 520 */ 521 521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); 522 522 tbuf = kzalloc(tbuf_size, GFP_KERNEL); 523 - if (!tbuf) 524 - return -ENOMEM; 523 + if (!tbuf) { 524 + status = -ENOMEM; 525 + goto err_alloc; 526 + } 525 527 526 528 bufp = tbuf; 527 529 ··· 736 734 } 737 735 738 736 kfree(tbuf); 737 + err_alloc: 739 738 740 739 /* any errors get returned through the urb completion */ 741 740 spin_lock_irq(&hcd_root_hub_lock);
+1
drivers/usb/host/xhci-plat.c
··· 344 344 static struct platform_driver usb_xhci_driver = { 345 345 .probe = xhci_plat_probe, 346 346 .remove = xhci_plat_remove, 347 + .shutdown = usb_hcd_platform_shutdown, 347 348 .driver = { 348 349 .name = "xhci-hcd", 349 350 .pm = DEV_PM_OPS,
+3
drivers/usb/host/xhci-ring.c
··· 1989 1989 case TRB_NORMAL: 1990 1990 td->urb->actual_length = requested - remaining; 1991 1991 goto finish_td; 1992 + case TRB_STATUS: 1993 + td->urb->actual_length = requested; 1994 + goto finish_td; 1992 1995 default: 1993 1996 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 1994 1997 trb_type);
+25 -18
drivers/usb/host/xhci.c
··· 1477 1477 struct xhci_ring *ep_ring; 1478 1478 struct xhci_virt_ep *ep; 1479 1479 struct xhci_command *command; 1480 + struct xhci_virt_device *vdev; 1480 1481 1481 1482 xhci = hcd_to_xhci(hcd); 1482 1483 spin_lock_irqsave(&xhci->lock, flags); ··· 1486 1485 1487 1486 /* Make sure the URB hasn't completed or been unlinked already */ 1488 1487 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1489 - if (ret || !urb->hcpriv) 1488 + if (ret) 1490 1489 goto done; 1490 + 1491 + /* give back URB now if we can't queue it for cancel */ 1492 + vdev = xhci->devs[urb->dev->slot_id]; 1493 + urb_priv = urb->hcpriv; 1494 + if (!vdev || !urb_priv) 1495 + goto err_giveback; 1496 + 1497 + ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1498 + ep = &vdev->eps[ep_index]; 1499 + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1500 + if (!ep || !ep_ring) 1501 + goto err_giveback; 1502 + 1491 1503 temp = readl(&xhci->op_regs->status); 1492 1504 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1493 1505 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1494 1506 "HW died, freeing TD."); 1495 - urb_priv = urb->hcpriv; 1496 1507 for (i = urb_priv->num_tds_done; 1497 - i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; 1508 + i < urb_priv->num_tds; 1498 1509 i++) { 1499 1510 td = &urb_priv->td[i]; 1500 1511 if (!list_empty(&td->td_list)) ··· 1514 1501 if (!list_empty(&td->cancelled_td_list)) 1515 1502 list_del_init(&td->cancelled_td_list); 1516 1503 } 1517 - 1518 - usb_hcd_unlink_urb_from_ep(hcd, urb); 1519 - spin_unlock_irqrestore(&xhci->lock, flags); 1520 - usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1521 - xhci_urb_free_priv(urb_priv); 1522 - return ret; 1504 + goto err_giveback; 1523 1505 } 1524 1506 1525 - ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1526 - ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1527 - ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1528 - if (!ep_ring) { 1529 - ret = -EINVAL; 1530 - goto done; 1531 - } 1532 - 1533 - urb_priv = urb->hcpriv; 1534 1507 i = urb_priv->num_tds_done; 1535 1508 if (i < urb_priv->num_tds) 1536 1509 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, ··· 1552 1553 } 1553 1554 done: 1554 1555 spin_unlock_irqrestore(&xhci->lock, flags); 1556 + return ret; 1557 + 1558 + err_giveback: 1559 + if (urb_priv) 1560 + xhci_urb_free_priv(urb_priv); 1561 + usb_hcd_unlink_urb_from_ep(hcd, urb); 1562 + spin_unlock_irqrestore(&xhci->lock, flags); 1563 + usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1555 1564 return ret; 1556 1565 } 1557 1566
+1 -1
drivers/usb/phy/phy-isp1301.c
··· 136 136 static struct i2c_driver isp1301_driver = { 137 137 .driver = { 138 138 .name = DRV_NAME, 139 - .of_match_table = of_match_ptr(isp1301_of_match), 139 + .of_match_table = isp1301_of_match, 140 140 }, 141 141 .probe = isp1301_probe, 142 142 .remove = isp1301_remove,
+13 -6
drivers/virtio/virtio_balloon.c
··· 242 242 243 243 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 244 244 245 - static void update_balloon_stats(struct virtio_balloon *vb) 245 + static unsigned int update_balloon_stats(struct virtio_balloon *vb) 246 246 { 247 247 unsigned long events[NR_VM_EVENT_ITEMS]; 248 248 struct sysinfo i; 249 - int idx = 0; 249 + unsigned int idx = 0; 250 250 long available; 251 251 252 252 all_vm_events(events); ··· 254 254 255 255 available = si_mem_available(); 256 256 257 + #ifdef CONFIG_VM_EVENT_COUNTERS 257 258 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 258 259 pages_to_bytes(events[PSWPIN])); 259 260 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 260 261 pages_to_bytes(events[PSWPOUT])); 261 262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 262 263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 264 + #endif 263 265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 264 266 pages_to_bytes(i.freeram)); 265 267 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 266 268 pages_to_bytes(i.totalram)); 267 269 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 268 270 pages_to_bytes(available)); 271 + 272 + return idx; 269 273 } 270 274 271 275 /* ··· 295 291 { 296 292 struct virtqueue *vq; 297 293 struct scatterlist sg; 298 - unsigned int len; 294 + unsigned int len, num_stats; 299 295 300 - update_balloon_stats(vb); 296 + num_stats = update_balloon_stats(vb); 301 297 302 298 vq = vb->stats_vq; 303 299 if (!virtqueue_get_buf(vq, &len)) 304 300 return; 305 - sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 301 + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 306 302 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 307 303 virtqueue_kick(vq); 308 304 } ··· 427 423 vb->deflate_vq = vqs[1]; 428 424 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 429 425 struct scatterlist sg; 426 + unsigned int num_stats; 430 427 vb->stats_vq = vqs[2]; 431 428 432 429 /* 433 430 * Prime this virtqueue with one buffer so the hypervisor can 434 431 * use it to signal us later (it can't be broken yet!). 435 432 */ 436 - sg_init_one(&sg, vb->stats, sizeof vb->stats); 433 + num_stats = update_balloon_stats(vb); 434 + 435 + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 437 436 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) 438 437 < 0) 439 438 BUG();
+5 -4
drivers/virtio/virtio_pci_common.c
··· 147 147 { 148 148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 149 149 const char *name = dev_name(&vp_dev->vdev.dev); 150 - int i, err = -ENOMEM, allocated_vectors, nvectors; 150 + int i, j, err = -ENOMEM, allocated_vectors, nvectors; 151 151 unsigned flags = PCI_IRQ_MSIX; 152 152 bool shared = false; 153 153 u16 msix_vec; ··· 212 212 if (!vp_dev->msix_vector_map) 213 213 goto out_disable_config_irq; 214 214 215 - allocated_vectors = 1; /* vector 0 is the config interrupt */ 215 + allocated_vectors = j = 1; /* vector 0 is the config interrupt */ 216 216 for (i = 0; i < nvqs; ++i) { 217 217 if (!names[i]) { 218 218 vqs[i] = NULL; ··· 236 236 continue; 237 237 } 238 238 239 - snprintf(vp_dev->msix_names[i + 1], 239 + snprintf(vp_dev->msix_names[j], 240 240 sizeof(*vp_dev->msix_names), "%s-%s", 241 241 dev_name(&vp_dev->vdev.dev), names[i]); 242 242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 243 243 vring_interrupt, IRQF_SHARED, 244 - vp_dev->msix_names[i + 1], vqs[i]); 244 + vp_dev->msix_names[j], vqs[i]); 245 245 if (err) { 246 246 /* don't free this irq on error */ 247 247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 248 248 goto out_remove_vqs; 249 249 } 250 250 vp_dev->msix_vector_map[i] = msix_vec; 251 + j++; 251 252 252 253 /* 253 254 * Use a different vector for each queue if they are available,
+1 -1
fs/btrfs/ctree.h
··· 1259 1259 atomic_t will_be_snapshoted; 1260 1260 1261 1261 /* For qgroup metadata space reserve */ 1262 - atomic_t qgroup_meta_rsv; 1262 + atomic64_t qgroup_meta_rsv; 1263 1263 }; 1264 1264 static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1265 1265 {
+1 -1
fs/btrfs/disk-io.c
··· 1342 1342 atomic_set(&root->orphan_inodes, 0); 1343 1343 atomic_set(&root->refs, 1); 1344 1344 atomic_set(&root->will_be_snapshoted, 0); 1345 - atomic_set(&root->qgroup_meta_rsv, 0); 1345 + atomic64_set(&root->qgroup_meta_rsv, 0); 1346 1346 root->log_transid = 0; 1347 1347 root->log_transid_committed = -1; 1348 1348 root->last_log_commit = 0;
+29 -19
fs/btrfs/extent_io.c
··· 2584 2584 2585 2585 if (tree->ops) { 2586 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2587 - if (!ret && !bio->bi_error) 2588 - uptodate = 1; 2589 - } else { 2590 - /* 2591 - * The generic bio_readpage_error handles errors the 2592 - * following way: If possible, new read requests are 2593 - * created and submitted and will end up in 2594 - * end_bio_extent_readpage as well (if we're lucky, not 2595 - * in the !uptodate case). In that case it returns 0 and 2596 - * we just go on with the next page in our bio. If it 2597 - * can't handle the error it will return -EIO and we 2598 - * remain responsible for that page. 2599 - */ 2600 - ret = bio_readpage_error(bio, offset, page, start, end, 2601 - mirror); 2602 - if (ret == 0) { 2603 - uptodate = !bio->bi_error; 2604 - offset += len; 2605 - continue; 2587 + if (ret == -EAGAIN) { 2588 + /* 2589 + * Data inode's readpage_io_failed_hook() always 2590 + * returns -EAGAIN. 2591 + * 2592 + * The generic bio_readpage_error handles errors 2593 + * the following way: If possible, new read 2594 + * requests are created and submitted and will 2595 + * end up in end_bio_extent_readpage as well (if 2596 + * we're lucky, not in the !uptodate case). In 2597 + * that case it returns 0 and we just go on with 2598 + * the next page in our bio. If it can't handle 2599 + * the error it will return -EIO and we remain 2600 + * responsible for that page. 2601 + */ 2602 + ret = bio_readpage_error(bio, offset, page, 2603 + start, end, mirror); 2604 + if (ret == 0) { 2605 + uptodate = !bio->bi_error; 2606 + offset += len; 2607 + continue; 2608 + } 2606 2609 } 2610 + 2611 + /* 2612 + * metadata's readpage_io_failed_hook() always returns 2613 + * -EIO and fixes nothing. -EIO is also returned if 2614 + * data inode error could not be fixed. 2615 + */ 2616 + ASSERT(ret == -EIO); 2607 2617 } 2608 2618 readpage_ok: 2609 2619 if (likely(uptodate)) {
+3 -3
fs/btrfs/inode.c
··· 10523 10523 } 10524 10524 10525 10525 __attribute__((const)) 10526 - static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10526 + static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror) 10527 10527 { 10528 - return 0; 10528 + return -EAGAIN; 10529 10529 } 10530 10530 10531 10531 static const struct inode_operations btrfs_dir_inode_operations = { ··· 10570 10570 .submit_bio_hook = btrfs_submit_bio_hook, 10571 10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10572 10572 .merge_bio_hook = btrfs_merge_bio_hook, 10573 - .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10573 + .readpage_io_failed_hook = btrfs_readpage_io_failed_hook, 10574 10574 10575 10575 /* optional callbacks */ 10576 10576 .fill_delalloc = run_delalloc_range,
+5 -5
fs/btrfs/qgroup.c
··· 2948 2948 ret = qgroup_reserve(root, num_bytes, enforce); 2949 2949 if (ret < 0) 2950 2950 return ret; 2951 - atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 + atomic64_add(num_bytes, &root->qgroup_meta_rsv); 2952 2952 return ret; 2953 2953 } 2954 2954 2955 2955 void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2956 2956 { 2957 2957 struct btrfs_fs_info *fs_info = root->fs_info; 2958 - int reserved; 2958 + u64 reserved; 2959 2959 2960 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2961 2961 !is_fstree(root->objectid)) 2962 2962 return; 2963 2963 2964 - reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 + reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0); 2965 2965 if (reserved == 0) 2966 2966 return; 2967 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); ··· 2976 2976 return; 2977 2977 2978 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2979 - WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2980 - atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2979 + WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes); 2980 + atomic64_sub(num_bytes, &root->qgroup_meta_rsv); 2981 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2982 2982 } 2983 2983
+6 -1
fs/btrfs/send.c
··· 6305 6305 goto out; 6306 6306 } 6307 6307 6308 + /* 6309 + * Check that we don't overflow at later allocations, we request 6310 + * clone_sources_count + 1 items, and compare to unsigned long inside 6311 + * access_ok. 6312 + */ 6308 6313 if (arg->clone_sources_count > 6309 - ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 + ULONG_MAX / sizeof(struct clone_root) - 1) { 6310 6315 ret = -EINVAL; 6311 6316 goto out; 6312 6317 }
+12 -13
fs/hugetlbfs/inode.c
··· 695 695 696 696 inode = new_inode(sb); 697 697 if (inode) { 698 - struct hugetlbfs_inode_info *info; 699 698 inode->i_ino = get_next_ino(); 700 699 inode->i_mode = S_IFDIR | config->mode; 701 700 inode->i_uid = config->uid; 702 701 inode->i_gid = config->gid; 703 702 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 704 - info = HUGETLBFS_I(inode); 705 - mpol_shared_policy_init(&info->policy, NULL); 706 703 inode->i_op = &hugetlbfs_dir_inode_operations; 707 704 inode->i_fop = &simple_dir_operations; 708 705 /* directory inodes start off with i_nlink == 2 (for "." entry) */ ··· 730 733 731 734 inode = new_inode(sb); 732 735 if (inode) { 733 - struct hugetlbfs_inode_info *info; 734 736 inode->i_ino = get_next_ino(); 735 737 inode_init_owner(inode, dir, mode); 736 738 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, ··· 737 741 inode->i_mapping->a_ops = &hugetlbfs_aops; 738 742 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 739 743 inode->i_mapping->private_data = resv_map; 740 - info = HUGETLBFS_I(inode); 741 - /* 742 - * The policy is initialized here even if we are creating a 743 - * private inode because initialization simply creates an 744 - * an empty rb tree and calls rwlock_init(), later when we 745 - * call mpol_free_shared_policy() it will just return because 746 - * the rb tree will still be empty. 747 - */ 748 - mpol_shared_policy_init(&info->policy, NULL); 749 744 switch (mode & S_IFMT) { 750 745 default: 751 746 init_special_inode(inode, mode, dev); ··· 924 937 hugetlbfs_inc_free_inodes(sbinfo); 925 938 return NULL; 926 939 } 940 + 941 + /* 942 + * Any time after allocation, hugetlbfs_destroy_inode can be called 943 + * for the inode. mpol_free_shared_policy is unconditionally called 944 + * as part of hugetlbfs_destroy_inode. So, initialize policy here 945 + * in case of a quick call to destroy. 946 + * 947 + * Note that the policy is initialized even if we are creating a 948 + * private inode. This simplifies hugetlbfs_destroy_inode. 949 + */ 950 + mpol_shared_policy_init(&p->policy, NULL); 951 + 927 952 return &p->vfs_inode; 928 953 } 929 954
+2 -7
fs/nfs/dir.c
··· 2055 2055 { 2056 2056 struct inode *old_inode = d_inode(old_dentry); 2057 2057 struct inode *new_inode = d_inode(new_dentry); 2058 - struct dentry *dentry = NULL, *rehash = NULL; 2058 + struct dentry *dentry = NULL; 2059 2059 struct rpc_task *task; 2060 2060 int error = -EBUSY; 2061 2061 ··· 2078 2078 * To prevent any new references to the target during the 2079 2079 * rename, we unhash the dentry in advance. 2080 2080 */ 2081 - if (!d_unhashed(new_dentry)) { 2081 + if (!d_unhashed(new_dentry)) 2082 2082 d_drop(new_dentry); 2083 - rehash = new_dentry; 2084 - } 2085 2083 2086 2084 if (d_count(new_dentry) > 2) { 2087 2085 int err; ··· 2096 2098 goto out; 2097 2099 2098 2100 new_dentry = dentry; 2099 - rehash = NULL; 2100 2101 new_inode = NULL; 2101 2102 } 2102 2103 } ··· 2116 2119 error = task->tk_status; 2117 2120 rpc_put_task(task); 2118 2121 out: 2119 - if (rehash) 2120 - d_rehash(rehash); 2121 2122 trace_nfs_rename_exit(old_dir, old_dentry, 2122 2123 new_dir, new_dentry, error); 2123 2124 /* new dentry created? */
+101 -60
fs/nfs/filelayout/filelayout.c
··· 202 202 task->tk_status); 203 203 nfs4_mark_deviceid_unavailable(devid); 204 204 pnfs_error_mark_layout_for_return(inode, lseg); 205 - pnfs_set_lo_fail(lseg); 206 205 rpc_wake_up(&tbl->slot_tbl_waitq); 207 206 /* fall through */ 208 207 default: 208 + pnfs_set_lo_fail(lseg); 209 209 reset: 210 210 dprintk("%s Retry through MDS. Error %d\n", __func__, 211 211 task->tk_status); ··· 560 560 return PNFS_ATTEMPTED; 561 561 } 562 562 563 - /* 564 - * filelayout_check_layout() 565 - * 566 - * Make sure layout segment parameters are sane WRT the device. 567 - * At this point no generic layer initialization of the lseg has occurred, 568 - * and nothing has been added to the layout_hdr cache. 569 - * 570 - */ 571 563 static int 572 - filelayout_check_layout(struct pnfs_layout_hdr *lo, 573 - struct nfs4_filelayout_segment *fl, 574 - struct nfs4_layoutget_res *lgr, 575 - struct nfs4_deviceid *id, 576 - gfp_t gfp_flags) 564 + filelayout_check_deviceid(struct pnfs_layout_hdr *lo, 565 + struct nfs4_filelayout_segment *fl, 566 + gfp_t gfp_flags) 577 567 { 578 568 struct nfs4_deviceid_node *d; 579 569 struct nfs4_file_layout_dsaddr *dsaddr; 580 570 int status = -EINVAL; 581 571 582 - dprintk("--> %s\n", __func__); 583 - 584 - /* FIXME: remove this check when layout segment support is added */ 585 - if (lgr->range.offset != 0 || 586 - lgr->range.length != NFS4_MAX_UINT64) { 587 - dprintk("%s Only whole file layouts supported. Use MDS i/o\n", 588 - __func__); 589 - goto out; 590 - } 591 - 592 - if (fl->pattern_offset > lgr->range.offset) { 593 - dprintk("%s pattern_offset %lld too large\n", 594 - __func__, fl->pattern_offset); 595 - goto out; 596 - } 597 - 598 - if (!fl->stripe_unit) { 599 - dprintk("%s Invalid stripe unit (%u)\n", 600 - __func__, fl->stripe_unit); 601 - goto out; 602 - } 603 - 604 572 /* find and reference the deviceid */ 605 - d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id, 573 + d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid, 606 574 lo->plh_lc_cred, gfp_flags); 607 575 if (d == NULL) 608 576 goto out; ··· 596 628 __func__, fl->num_fh); 597 629 goto out_put; 598 630 } 631 + status = 0; 632 + out: 633 + return status; 634 + out_put: 635 + nfs4_fl_put_deviceid(dsaddr); 636 + goto out; 637 + } 638 + 639 + /* 640 + * filelayout_check_layout() 641 + * 642 + * Make sure layout segment parameters are sane WRT the device. 643 + * At this point no generic layer initialization of the lseg has occurred, 644 + * and nothing has been added to the layout_hdr cache. 645 + * 646 + */ 647 + static int 648 + filelayout_check_layout(struct pnfs_layout_hdr *lo, 649 + struct nfs4_filelayout_segment *fl, 650 + struct nfs4_layoutget_res *lgr, 651 + gfp_t gfp_flags) 652 + { 653 + int status = -EINVAL; 654 + 655 + dprintk("--> %s\n", __func__); 656 + 657 + /* FIXME: remove this check when layout segment support is added */ 658 + if (lgr->range.offset != 0 || 659 + lgr->range.length != NFS4_MAX_UINT64) { 660 + dprintk("%s Only whole file layouts supported. Use MDS i/o\n", 661 + __func__); 662 + goto out; 663 + } 664 + 665 + if (fl->pattern_offset > lgr->range.offset) { 666 + dprintk("%s pattern_offset %lld too large\n", 667 + __func__, fl->pattern_offset); 668 + goto out; 669 + } 670 + 671 + if (!fl->stripe_unit) { 672 + dprintk("%s Invalid stripe unit (%u)\n", 673 + __func__, fl->stripe_unit); 674 + goto out; 675 + } 599 676 600 677 status = 0; 601 678 out: 602 679 dprintk("--> %s returns %d\n", __func__, status); 603 680 return status; 604 - out_put: 605 - nfs4_fl_put_deviceid(dsaddr); 606 - goto out; 607 681 } 608 682 609 683 static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) ··· 667 657 filelayout_decode_layout(struct pnfs_layout_hdr *flo, 668 658 struct nfs4_filelayout_segment *fl, 669 659 struct nfs4_layoutget_res *lgr, 670 - struct nfs4_deviceid *id, 671 660 gfp_t gfp_flags) 672 661 { 673 662 struct xdr_stream stream; ··· 691 682 if (unlikely(!p)) 692 683 goto out_err; 693 684 694 - memcpy(id, p, sizeof(*id)); 685 + memcpy(&fl->deviceid, p, sizeof(fl->deviceid)); 695 686 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 696 - nfs4_print_deviceid(id); 687 + nfs4_print_deviceid(&fl->deviceid); 697 688 698 689 nfl_util = be32_to_cpup(p++); 699 690 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) ··· 840 831 { 841 832 struct nfs4_filelayout_segment *fl; 842 833 int rc; 843 - struct nfs4_deviceid id; 844 834 845 835 dprintk("--> %s\n", __func__); 846 836 fl = kzalloc(sizeof(*fl), gfp_flags); 847 837 if (!fl) 848 838 return NULL; 849 839 850 - rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 851 - if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 840 + rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags); 841 + if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) { 852 842 _filelayout_free_lseg(fl); 853 843 return NULL; 854 844 } ··· 896 888 return min(stripe_unit - (unsigned int)stripe_offset, size); 897 889 } 898 890 891 + static struct pnfs_layout_segment * 892 + fl_pnfs_update_layout(struct inode *ino, 893 + struct nfs_open_context *ctx, 894 + loff_t pos, 895 + u64 count, 896 + enum pnfs_iomode iomode, 897 + bool strict_iomode, 898 + gfp_t gfp_flags) 899 + { 900 + struct pnfs_layout_segment *lseg = NULL; 901 + struct pnfs_layout_hdr *lo; 902 + struct nfs4_filelayout_segment *fl; 903 + int status; 904 + 905 + lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode, 906 + gfp_flags); 907 + if (!lseg) 908 + lseg = ERR_PTR(-ENOMEM); 909 + if (IS_ERR(lseg)) 910 + goto out; 911 + 912 + lo = NFS_I(ino)->layout; 913 + fl = FILELAYOUT_LSEG(lseg); 914 + 915 + status = filelayout_check_deviceid(lo, fl, gfp_flags); 916 + if (status) 917 + lseg = ERR_PTR(status); 918 + out: 919 + if (IS_ERR(lseg)) 920 + pnfs_put_lseg(lseg); 921 + return lseg; 922 + } 923 + 899 924 static void 900 925 filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, 901 926 struct nfs_page *req) 902 927 { 903 928 if (!pgio->pg_lseg) { 904 - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 905 - req->wb_context, 906 - 0, 907 - NFS4_MAX_UINT64, 908 - IOMODE_READ, 909 - false, 910 - GFP_KERNEL); 929 + pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, 930 + req->wb_context, 931 + 0, 932 + NFS4_MAX_UINT64, 933 + IOMODE_READ, 934 + false, 935 + GFP_KERNEL); 911 936 if (IS_ERR(pgio->pg_lseg)) { 912 937 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 913 938 pgio->pg_lseg = NULL; ··· 960 919 int status; 961 920 962 921 if (!pgio->pg_lseg) { 963 - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 964 - req->wb_context, 965 - 0, 966 - NFS4_MAX_UINT64, 967 - IOMODE_RW, 968 - false, 969 - GFP_NOFS); 922 + pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, 923 + req->wb_context, 924 + 0, 925 + NFS4_MAX_UINT64, 926 + IOMODE_RW, 927 + false, 928 + GFP_NOFS); 970 929 if (IS_ERR(pgio->pg_lseg)) { 971 930 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 972 931 pgio->pg_lseg = NULL;
+10 -9
fs/nfs/filelayout/filelayout.h
··· 55 55 }; 56 56 57 57 struct nfs4_filelayout_segment { 58 - struct pnfs_layout_segment generic_hdr; 59 - u32 stripe_type; 60 - u32 commit_through_mds; 61 - u32 stripe_unit; 62 - u32 first_stripe_index; 63 - u64 pattern_offset; 64 - struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 65 - unsigned int num_fh; 66 - struct nfs_fh **fh_array; 58 + struct pnfs_layout_segment generic_hdr; 59 + u32 stripe_type; 60 + u32 commit_through_mds; 61 + u32 stripe_unit; 62 + u32 first_stripe_index; 63 + u64 pattern_offset; 64 + struct nfs4_deviceid deviceid; 65 + struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 66 + unsigned int num_fh; 67 + struct nfs_fh **fh_array; 67 68 }; 68 69 69 70 struct nfs4_filelayout {
+4
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 208 208 } else 209 209 goto outerr; 210 210 } 211 + 212 + if (IS_ERR(mirror->mirror_ds)) 213 + goto outerr; 214 + 211 215 if (mirror->mirror_ds->ds == NULL) { 212 216 struct nfs4_deviceid_node *devid; 213 217 devid = &mirror->mirror_ds->id_node;
+3 -6
fs/nfs/nfs4proc.c
··· 2442 2442 } 2443 2443 2444 2444 nfs4_stateid_copy(&stateid, &delegation->stateid); 2445 - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2445 + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || 2446 + !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2447 + &delegation->flags)) { 2446 2448 rcu_read_unlock(); 2447 2449 nfs_finish_clear_delegation_stateid(state, &stateid); 2448 - return; 2449 - } 2450 - 2451 - if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) { 2452 - rcu_read_unlock(); 2453 2450 return; 2454 2451 } 2455 2452
+33 -10
fs/nfsd/nfsctl.c
··· 538 538 539 539 static ssize_t 540 540 nfsd_print_version_support(char *buf, int remaining, const char *sep, 541 - unsigned vers, unsigned minor) 541 + unsigned vers, int minor) 542 542 { 543 - const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; 543 + const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u"; 544 544 bool supported = !!nfsd_vers(vers, NFSD_TEST); 545 545 546 - if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) 546 + if (vers == 4 && minor >= 0 && 547 + !nfsd_minorversion(minor, NFSD_TEST)) 547 548 supported = false; 549 + if (minor == 0 && supported) 550 + /* 551 + * special case for backward compatability. 552 + * +4.0 is never reported, it is implied by 553 + * +4, unless -4.0 is present. 554 + */ 555 + return 0; 548 556 return snprintf(buf, remaining, format, sep, 549 557 supported ? '+' : '-', vers, minor); 550 558 } ··· 562 554 char *mesg = buf; 563 555 char *vers, *minorp, sign; 564 556 int len, num, remaining; 565 - unsigned minor; 566 557 ssize_t tlen = 0; 567 558 char *sep; 568 559 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); ··· 582 575 if (len <= 0) return -EINVAL; 583 576 do { 584 577 enum vers_op cmd; 578 + unsigned minor; 585 579 sign = *vers; 586 580 if (sign == '+' || sign == '-') 587 581 num = simple_strtol((vers+1), &minorp, 0); ··· 593 585 return -EINVAL; 594 586 if (kstrtouint(minorp+1, 0, &minor) < 0) 595 587 return -EINVAL; 596 - } else 597 - minor = 0; 588 + } 589 + 598 590 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; 599 591 switch(num) { 600 592 case 2: ··· 602 594 nfsd_vers(num, cmd); 603 595 break; 604 596 case 4: 605 - if (nfsd_minorversion(minor, cmd) >= 0) 606 - break; 597 + if (*minorp == '.') { 598 + if (nfsd_minorversion(minor, cmd) < 0) 599 + return -EINVAL; 600 + } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) { 601 + /* 602 + * Either we have +4 and no minors are enabled, 603 + * or we have -4 and at least one minor is enabled. 604 + * In either case, propagate 'cmd' to all minors. 605 + */ 606 + minor = 0; 607 + while (nfsd_minorversion(minor, cmd) >= 0) 608 + minor++; 609 + } 610 + break; 607 611 default: 608 612 return -EINVAL; 609 613 } ··· 632 612 sep = ""; 633 613 remaining = SIMPLE_TRANSACTION_LIMIT; 634 614 for (num=2 ; num <= 4 ; num++) { 615 + int minor; 635 616 if (!nfsd_vers(num, NFSD_AVAIL)) 636 617 continue; 637 - minor = 0; 618 + 619 + minor = -1; 638 620 do { 639 621 len = nfsd_print_version_support(buf, remaining, 640 622 sep, num, minor); ··· 646 624 buf += len; 647 625 tlen += len; 648 626 minor++; 649 - sep = " "; 627 + if (len) 628 + sep = " "; 650 629 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); 651 630 } 652 631 out:
+1
fs/nfsd/nfsproc.c
··· 786 786 { nfserr_serverfault, -ESERVERFAULT }, 787 787 { nfserr_serverfault, -ENFILE }, 788 788 { nfserr_io, -EUCLEAN }, 789 + { nfserr_perm, -ENOKEY }, 789 790 }; 790 791 int i; 791 792
+13 -15
fs/nfsd/nfssvc.c
··· 167 167 168 168 int nfsd_minorversion(u32 minorversion, enum vers_op change) 169 169 { 170 - if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) 170 + if (minorversion > NFSD_SUPPORTED_MINOR_VERSION && 171 + change != NFSD_AVAIL) 171 172 return -1; 172 173 switch(change) { 173 174 case NFSD_SET: ··· 416 415 417 416 void nfsd_reset_versions(void) 418 417 { 419 - int found_one = 0; 420 418 int i; 421 419 422 - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { 423 - if (nfsd_program.pg_vers[i]) 424 - found_one = 1; 425 - } 420 + for (i = 0; i < NFSD_NRVERS; i++) 421 + if (nfsd_vers(i, NFSD_TEST)) 422 + return; 426 423 427 - if (!found_one) { 428 - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) 429 - nfsd_program.pg_vers[i] = nfsd_version[i]; 430 - #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 431 - for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) 432 - nfsd_acl_program.pg_vers[i] = 433 - nfsd_acl_version[i]; 434 - #endif 435 - } 424 + for (i = 0; i < NFSD_NRVERS; i++) 425 + if (i != 4) 426 + nfsd_vers(i, NFSD_SET); 427 + else { 428 + int minor = 0; 429 + while (nfsd_minorversion(minor, NFSD_SET) >= 0) 430 + minor++; 431 + } 436 432 } 437 433 438 434 /*
+3 -3
include/asm-generic/sections.h
··· 14 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 15 15 * and/or .init.* sections. 16 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 17 - * [__start_data_ro_after_init, __end_data_ro_after_init]: 18 - * contains data.ro_after_init section 17 + * [__start_ro_after_init, __end_ro_after_init]: 18 + * contains .data..ro_after_init section 19 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 20 20 * may be out of this range on some architectures. 21 21 * [_sinittext, _einittext]: contains .init.text.* sections ··· 33 33 extern char __bss_start[], __bss_stop[]; 34 34 extern char __init_begin[], __init_end[]; 35 35 extern char _sinittext[], _einittext[]; 36 - extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36 + extern char __start_ro_after_init[], __end_ro_after_init[]; 37 37 extern char _end[]; 38 38 extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 39 39 extern char __kprobes_text_start[], __kprobes_text_end[];
+4 -2
include/asm-generic/vmlinux.lds.h
··· 173 173 KEEP(*(__##name##_of_table_end)) 174 174 175 175 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 176 + #define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt) 176 177 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 177 178 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 178 179 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) ··· 261 260 */ 262 261 #ifndef RO_AFTER_INIT_DATA 263 262 #define RO_AFTER_INIT_DATA \ 264 - __start_data_ro_after_init = .; \ 263 + __start_ro_after_init = .; \ 265 264 *(.data..ro_after_init) \ 266 - __end_data_ro_after_init = .; 265 + __end_ro_after_init = .; 267 266 #endif 268 267 269 268 /* ··· 560 559 CLK_OF_TABLES() \ 561 560 RESERVEDMEM_OF_TABLES() \ 562 561 CLKSRC_OF_TABLES() \ 562 + CLKEVT_OF_TABLES() \ 563 563 IOMMU_OF_TABLES() \ 564 564 CPU_METHOD_OF_TABLES() \ 565 565 CPUIDLE_METHOD_OF_TABLES() \
+1 -1
include/linux/clockchips.h
··· 229 229 230 230 #ifdef CONFIG_CLKEVT_PROBE 231 231 extern int clockevent_probe(void); 232 - #els 232 + #else 233 233 static inline int clockevent_probe(void) { return 0; } 234 234 #endif 235 235
+3
include/linux/kasan.h
··· 76 76 static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 77 77 size_t kasan_metadata_size(struct kmem_cache *cache); 78 78 79 + bool kasan_save_enable_multi_shot(void); 80 + void kasan_restore_multi_shot(bool enabled); 81 + 79 82 #else /* CONFIG_KASAN */ 80 83 81 84 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+2 -2
include/linux/kvm_host.h
··· 162 162 int len, void *val); 163 163 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 164 164 int len, struct kvm_io_device *dev); 165 - int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 166 - struct kvm_io_device *dev); 165 + void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 166 + struct kvm_io_device *dev); 167 167 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 168 168 gpa_t addr); 169 169
+6
include/linux/memcontrol.h
··· 740 740 return false; 741 741 } 742 742 743 + static inline void mem_cgroup_update_page_stat(struct page *page, 744 + enum mem_cgroup_stat_index idx, 745 + int nr) 746 + { 747 + } 748 + 743 749 static inline void mem_cgroup_inc_page_stat(struct page *page, 744 750 enum mem_cgroup_stat_index idx) 745 751 {
+2
include/linux/mm.h
··· 32 32 struct writeback_control; 33 33 struct bdi_writeback; 34 34 35 + void init_mm_internals(void); 36 + 35 37 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 36 38 extern unsigned long max_mapnr; 37 39
+7 -6
include/linux/sched/clock.h
··· 54 54 } 55 55 #else 56 56 extern void sched_clock_init_late(void); 57 - /* 58 - * Architectures can set this to 1 if they have specified 59 - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 60 - * but then during bootup it turns out that sched_clock() 61 - * is reliable after all: 62 - */ 63 57 extern int sched_clock_stable(void); 64 58 extern void clear_sched_clock_stable(void); 59 + 60 + /* 61 + * When sched_clock_stable(), __sched_clock_offset provides the offset 62 + * between local_clock() and sched_clock(). 63 + */ 64 + extern u64 __sched_clock_offset; 65 + 65 66 66 67 extern void sched_clock_tick(void); 67 68 extern void sched_clock_idle_sleep_event(void);
+2
init/main.c
··· 1022 1022 1023 1023 workqueue_init(); 1024 1024 1025 + init_mm_internals(); 1026 + 1025 1027 do_pre_smp_initcalls(); 1026 1028 lockup_detector_init(); 1027 1029
+3 -2
kernel/padata.c
··· 186 186 187 187 reorder = &next_queue->reorder; 188 188 189 + spin_lock(&reorder->lock); 189 190 if (!list_empty(&reorder->list)) { 190 191 padata = list_entry(reorder->list.next, 191 192 struct padata_priv, list); 192 193 193 - spin_lock(&reorder->lock); 194 194 list_del_init(&padata->list); 195 195 atomic_dec(&pd->reorder_objects); 196 - spin_unlock(&reorder->lock); 197 196 198 197 pd->processed++; 199 198 199 + spin_unlock(&reorder->lock); 200 200 goto out; 201 201 } 202 + spin_unlock(&reorder->lock); 202 203 203 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 204 205 padata = ERR_PTR(-ENODATA);
+27 -19
kernel/sched/clock.c
··· 96 96 static int __sched_clock_stable_early = 1; 97 97 98 98 /* 99 - * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 + * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset 100 100 */ 101 - static __read_mostly u64 raw_offset; 102 - static __read_mostly u64 gtod_offset; 101 + __read_mostly u64 __sched_clock_offset; 102 + static __read_mostly u64 __gtod_offset; 103 103 104 104 struct sched_clock_data { 105 105 u64 tick_raw; ··· 131 131 /* 132 132 * Attempt to make the (initial) unstable->stable transition continuous. 133 133 */ 134 - raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 + __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); 135 135 136 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 137 - scd->tick_gtod, gtod_offset, 138 - scd->tick_raw, raw_offset); 137 + scd->tick_gtod, __gtod_offset, 138 + scd->tick_raw, __sched_clock_offset); 139 139 140 140 static_branch_enable(&__sched_clock_stable); 141 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 142 142 } 143 143 144 - static void __clear_sched_clock_stable(struct work_struct *work) 144 + static void __sched_clock_work(struct work_struct *work) 145 + { 146 + static_branch_disable(&__sched_clock_stable); 147 + } 148 + 149 + static DECLARE_WORK(sched_clock_work, __sched_clock_work); 150 + 151 + static void __clear_sched_clock_stable(void) 145 152 { 146 153 struct sched_clock_data *scd = this_scd(); 147 154 ··· 161 154 * 162 155 * Still do what we can. 163 156 */ 164 - gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 157 + __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod); 165 158 166 159 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 167 - scd->tick_gtod, gtod_offset, 168 - scd->tick_raw, raw_offset); 160 + scd->tick_gtod, __gtod_offset, 161 + scd->tick_raw, __sched_clock_offset); 169 162 170 - static_branch_disable(&__sched_clock_stable); 171 163 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 172 - } 173 164 174 - static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 165 + if (sched_clock_stable()) 166 + schedule_work(&sched_clock_work); 167 + } 175 168 176 169 void clear_sched_clock_stable(void) 177 170 { ··· 180 173 smp_mb(); /* matches sched_clock_init_late() */ 181 174 182 175 if (sched_clock_running == 2) 183 - schedule_work(&sched_clock_work); 176 + __clear_sched_clock_stable(); 184 177 } 185 178 186 179 void sched_clock_init_late(void) ··· 221 214 */ 222 215 static u64 sched_clock_local(struct sched_clock_data *scd) 223 216 { 224 - u64 now, clock, old_clock, min_clock, max_clock; 217 + u64 now, clock, old_clock, min_clock, max_clock, gtod; 225 218 s64 delta; 226 219 227 220 again: ··· 238 231 * scd->tick_gtod + TICK_NSEC); 239 232 */ 240 233 241 - clock = scd->tick_gtod + gtod_offset + delta; 242 - min_clock = wrap_max(scd->tick_gtod, old_clock); 243 - max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 234 + gtod = scd->tick_gtod + __gtod_offset; 235 + clock = gtod + delta; 236 + min_clock = wrap_max(gtod, old_clock); 237 + max_clock = wrap_max(old_clock, gtod + TICK_NSEC); 244 238 245 239 clock = wrap_max(clock, min_clock); 246 240 clock = wrap_min(clock, max_clock); ··· 325 317 u64 clock; 326 318 327 319 if (sched_clock_stable()) 328 - return sched_clock() + raw_offset; 320 + return sched_clock() + __sched_clock_offset; 329 321 330 322 if (unlikely(!sched_clock_running)) 331 323 return 0ull;
+1
lib/syscall.c
··· 12 12 13 13 if (!try_get_task_stack(target)) { 14 14 /* Task has no stack, so the task isn't in a syscall. */ 15 + *sp = *pc = 0; 15 16 *callno = -1; 16 17 return 0; 17 18 }
+10
lib/test_kasan.c
··· 20 20 #include <linux/string.h> 21 21 #include <linux/uaccess.h> 22 22 #include <linux/module.h> 23 + #include <linux/kasan.h> 23 24 24 25 /* 25 26 * Note: test functions are marked noinline so that their names appear in ··· 475 474 476 475 static int __init kmalloc_tests_init(void) 477 476 { 477 + /* 478 + * Temporarily enable multi-shot mode. Otherwise, we'd only get a 479 + * report for the first case. 480 + */ 481 + bool multishot = kasan_save_enable_multi_shot(); 482 + 478 483 kmalloc_oob_right(); 479 484 kmalloc_oob_left(); 480 485 kmalloc_node_oob_right(); ··· 506 499 ksize_unpoisons_memory(); 507 500 copy_user_test(); 508 501 use_after_scope_test(); 502 + 503 + kasan_restore_multi_shot(multishot); 504 + 509 505 return -EAGAIN; 510 506 } 511 507
+7 -3
mm/hugetlb.c
··· 4403 4403 return 0; 4404 4404 out_err: 4405 4405 if (!vma || vma->vm_flags & VM_MAYSHARE) 4406 - region_abort(resv_map, from, to); 4406 + /* Don't call region_abort if region_chg failed */ 4407 + if (chg >= 0) 4408 + region_abort(resv_map, from, to); 4407 4409 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4408 4410 kref_put(&resv_map->refs, resv_map_release); 4409 4411 return ret; ··· 4653 4651 { 4654 4652 struct page *page = NULL; 4655 4653 spinlock_t *ptl; 4654 + pte_t pte; 4656 4655 retry: 4657 4656 ptl = pmd_lockptr(mm, pmd); 4658 4657 spin_lock(ptl); ··· 4663 4660 */ 4664 4661 if (!pmd_huge(*pmd)) 4665 4662 goto out; 4666 - if (pmd_present(*pmd)) { 4663 + pte = huge_ptep_get((pte_t *)pmd); 4664 + if (pte_present(pte)) { 4667 4665 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4668 4666 if (flags & FOLL_GET) 4669 4667 get_page(page); 4670 4668 } else { 4671 - if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4669 + if (is_hugetlb_entry_migration(pte)) { 4672 4670 spin_unlock(ptl); 4673 4671 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4674 4672 goto retry;
-5
mm/kasan/kasan.h
··· 96 96 << KASAN_SHADOW_SCALE_SHIFT); 97 97 } 98 98 99 - static inline bool kasan_report_enabled(void) 100 - { 101 - return !current->kasan_depth; 102 - } 103 - 104 99 void kasan_report(unsigned long addr, size_t size, 105 100 bool is_write, unsigned long ip); 106 101 void kasan_report_double_free(struct kmem_cache *cache, void *object,
+36
mm/kasan/report.c
··· 13 13 * 14 14 */ 15 15 16 + #include <linux/bitops.h> 16 17 #include <linux/ftrace.h> 18 + #include <linux/init.h> 17 19 #include <linux/kernel.h> 18 20 #include <linux/mm.h> 19 21 #include <linux/printk.h> ··· 293 291 } 294 292 295 293 kasan_end_report(&flags); 294 + } 295 + 296 + static unsigned long kasan_flags; 297 + 298 + #define KASAN_BIT_REPORTED 0 299 + #define KASAN_BIT_MULTI_SHOT 1 300 + 301 + bool kasan_save_enable_multi_shot(void) 302 + { 303 + return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); 304 + } 305 + EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); 306 + 307 + void kasan_restore_multi_shot(bool enabled) 308 + { 309 + if (!enabled) 310 + clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); 311 + } 312 + EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); 313 + 314 + static int __init kasan_set_multi_shot(char *str) 315 + { 316 + set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); 317 + return 1; 318 + } 319 + __setup("kasan_multi_shot", kasan_set_multi_shot); 320 + 321 + static inline bool kasan_report_enabled(void) 322 + { 323 + if (current->kasan_depth) 324 + return false; 325 + if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) 326 + return true; 327 + return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); 296 328 } 297 329 298 330 void kasan_report(unsigned long addr, size_t size,
+1 -1
mm/kmemleak.c
··· 1416 1416 /* data/bss scanning */ 1417 1417 scan_large_block(_sdata, _edata); 1418 1418 scan_large_block(__bss_start, __bss_stop); 1419 - scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1419 + scan_large_block(__start_ro_after_init, __end_ro_after_init); 1420 1420 1421 1421 #ifdef CONFIG_SMP 1422 1422 /* per-cpu sections scanning */
+5 -2
mm/migrate.c
··· 209 209 210 210 VM_BUG_ON_PAGE(PageTail(page), page); 211 211 while (page_vma_mapped_walk(&pvmw)) { 212 - new = page - pvmw.page->index + 213 - linear_page_index(vma, pvmw.address); 212 + if (PageKsm(page)) 213 + new = page; 214 + else 215 + new = page - pvmw.page->index + 216 + linear_page_index(vma, pvmw.address); 214 217 215 218 get_page(new); 216 219 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+2 -2
mm/rmap.c
··· 1159 1159 goto out; 1160 1160 } 1161 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1162 - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1162 + mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr); 1163 1163 out: 1164 1164 unlock_page_memcg(page); 1165 1165 } ··· 1199 1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1200 1200 */ 1201 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1202 - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1202 + mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr); 1203 1203 1204 1204 if (unlikely(PageMlocked(page))) 1205 1205 clear_page_mlock(page);
+1 -3
mm/vmstat.c
··· 1764 1764 1765 1765 #endif 1766 1766 1767 - static int __init setup_vmstat(void) 1767 + void __init init_mm_internals(void) 1768 1768 { 1769 1769 #ifdef CONFIG_SMP 1770 1770 int ret; ··· 1792 1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1793 1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1794 1794 #endif 1795 - return 0; 1796 1795 } 1797 - module_init(setup_vmstat) 1798 1796 1799 1797 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1800 1798
+1 -1
mm/workingset.c
··· 532 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 533 533 timestamp_bits, max_order, bucket_order); 534 534 535 - ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 535 + ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key); 536 536 if (ret) 537 537 goto err; 538 538 ret = register_shrinker(&workingset_shadow_shrinker);
+1
net/sunrpc/svcsock.c
··· 1635 1635 1636 1636 xprt = &svsk->sk_xprt; 1637 1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); 1638 + set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); 1638 1639 1639 1640 serv->sv_bc_xprt = xprt; 1640 1641
+1
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 127 127 xprt = &cma_xprt->sc_xprt; 128 128 129 129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 130 + set_bit(XPT_CONG_CTRL, &xprt->xpt_flags); 130 131 serv->sv_bc_xprt = xprt; 131 132 132 133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
+8 -1
net/xfrm/xfrm_user.c
··· 412 412 up = nla_data(rp); 413 413 ulen = xfrm_replay_state_esn_len(up); 414 414 415 - if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 + /* Check the overall length and the internal bitmap length to avoid 416 + * potential overflow. */ 417 + if (nla_len(rp) < ulen || 418 + xfrm_replay_state_esn_len(replay_esn) != ulen || 419 + replay_esn->bmp_len != up->bmp_len) 420 + return -EINVAL; 421 + 422 + if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) 416 423 return -EINVAL; 417 424 418 425 return 0;
+4
scripts/Kbuild.include
··· 148 148 # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 149 149 cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 150 150 151 + # cc-if-fullversion 152 + # Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) 153 + cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) 154 + 151 155 # cc-ldoption 152 156 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 153 157 cc-ldoption = $(call try-run,\
+4
sound/core/seq/seq_fifo.c
··· 267 267 /* NOTE: overflow flag is not cleared */ 268 268 spin_unlock_irqrestore(&f->lock, flags); 269 269 270 + /* close the old pool and wait until all users are gone */ 271 + snd_seq_pool_mark_closing(oldpool); 272 + snd_use_lock_sync(&f->use_lock); 273 + 270 274 /* release cells in old pool */ 271 275 for (cell = oldhead; cell; cell = next) { 272 276 next = cell->next;
+11 -1
sound/pci/hda/patch_realtek.c
··· 4858 4858 ALC292_FIXUP_DISABLE_AAMIX, 4859 4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4860 4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4861 + ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 4861 4862 ALC275_FIXUP_DELL_XPS, 4862 4863 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4863 4864 ALC293_FIXUP_LENOVO_SPK_NOISE, ··· 5471 5470 .chained = true, 5472 5471 .chain_id = ALC269_FIXUP_HEADSET_MODE 5473 5472 }, 5473 + [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = { 5474 + .type = HDA_FIXUP_PINS, 5475 + .v.pins = (const struct hda_pintbl[]) { 5476 + { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ 5477 + { } 5478 + }, 5479 + .chained = true, 5480 + .chain_id = ALC269_FIXUP_HEADSET_MODE 5481 + }, 5474 5482 [ALC275_FIXUP_DELL_XPS] = { 5475 5483 .type = HDA_FIXUP_VERBS, 5476 5484 .v.verbs = (const struct hda_verb[]) { ··· 5552 5542 .type = HDA_FIXUP_FUNC, 5553 5543 .v.func = alc298_fixup_speaker_volume, 5554 5544 .chained = true, 5555 - .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5545 + .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 5556 5546 }, 5557 5547 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 5558 5548 .type = HDA_FIXUP_PINS,
+1 -1
sound/soc/atmel/atmel-classd.c
··· 349 349 } 350 350 351 351 #define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) 352 - #define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) 352 + #define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8) 353 353 354 354 static struct { 355 355 int rate;
+8 -8
sound/soc/codecs/hdac_hdmi.c
··· 1534 1534 pin->mst_capable = false; 1535 1535 /* if not MST, default is port[0] */ 1536 1536 hport = &pin->ports[0]; 1537 - goto out; 1538 1537 } else { 1539 1538 for (i = 0; i < pin->num_ports; i++) { 1540 1539 pin->mst_capable = true; 1541 1540 if (pin->ports[i].id == pipe) { 1542 1541 hport = &pin->ports[i]; 1543 - goto out; 1542 + break; 1544 1543 } 1545 1544 } 1546 1545 } 1546 + 1547 + if (hport) 1548 + hdac_hdmi_present_sense(pin, hport); 1547 1549 } 1548 1550 1549 - out: 1550 - if (pin && hport) 1551 - hdac_hdmi_present_sense(pin, hport); 1552 1551 } 1553 1552 1554 1553 static struct i915_audio_component_audio_ops aops = { ··· 1997 1998 struct hdac_hdmi_pin *pin, *pin_next; 1998 1999 struct hdac_hdmi_cvt *cvt, *cvt_next; 1999 2000 struct hdac_hdmi_pcm *pcm, *pcm_next; 2000 - struct hdac_hdmi_port *port; 2001 + struct hdac_hdmi_port *port, *port_next; 2001 2002 int i; 2002 2003 2003 2004 snd_soc_unregister_codec(&edev->hdac.dev); ··· 2007 2008 if (list_empty(&pcm->port_list)) 2008 2009 continue; 2009 2010 2010 - list_for_each_entry(port, &pcm->port_list, head) 2011 - port = NULL; 2011 + list_for_each_entry_safe(port, port_next, 2012 + &pcm->port_list, head) 2013 + list_del(&port->head); 2012 2014 2013 2015 list_del(&pcm->head); 2014 2016 kfree(pcm);
+7 -3
sound/soc/codecs/rt5665.c
··· 1241 1241 static void rt5665_jd_check_handler(struct work_struct *work) 1242 1242 { 1243 1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, 1244 - calibrate_work.work); 1244 + jd_check_work.work); 1245 1245 1246 1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) { 1247 1247 /* jack out */ ··· 2252 2252 2253 2253 static const SOC_ENUM_SINGLE_DECL( 2254 2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA, 2255 - RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src); 2255 + RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src); 2256 2256 2257 2257 static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux = 2258 2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum); ··· 3178 3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc}, 3179 3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 3180 3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc}, 3181 + {"I2S1 ASRC", NULL, "CLKDET"}, 3182 + {"I2S2 ASRC", NULL, "CLKDET"}, 3183 + {"I2S3 ASRC", NULL, "CLKDET"}, 3181 3184 3182 3185 /*Vref*/ 3183 3186 {"Mic Det Power", NULL, "Vref2"}, ··· 3915 3912 {"Mono MIX", "MONOVOL Switch", "MONOVOL"}, 3916 3913 {"Mono Amp", NULL, "Mono MIX"}, 3917 3914 {"Mono Amp", NULL, "Vref2"}, 3915 + {"Mono Amp", NULL, "Vref3"}, 3918 3916 {"Mono Amp", NULL, "CLKDET SYS"}, 3919 3917 {"Mono Amp", NULL, "CLKDET MONO"}, 3920 3918 {"Mono Playback", "Switch", "Mono Amp"}, ··· 4802 4798 /* Enhance performance*/ 4803 4799 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1, 4804 4800 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK, 4805 - RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09); 4801 + RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12); 4806 4802 4807 4803 INIT_DELAYED_WORK(&rt5665->jack_detect_work, 4808 4804 rt5665_jack_detect_handler);
+1 -1
sound/soc/codecs/rt5665.h
··· 1106 1106 #define RT5665_HP_DRIVER_MASK (0x3 << 2) 1107 1107 #define RT5665_HP_DRIVER_1X (0x0 << 2) 1108 1108 #define RT5665_HP_DRIVER_3X (0x1 << 2) 1109 - #define RT5665_HP_DRIVER_5X (0x2 << 2) 1109 + #define RT5665_HP_DRIVER_5X (0x3 << 2) 1110 1110 #define RT5665_LDO1_DVO_MASK (0x3) 1111 1111 #define RT5665_LDO1_DVO_09 (0x0) 1112 1112 #define RT5665_LDO1_DVO_10 (0x1)
+7 -2
sound/soc/codecs/wm_adsp.c
··· 899 899 900 900 mutex_lock(&ctl->dsp->pwr_lock); 901 901 902 - memcpy(ctl->cache, p, ctl->len); 902 + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) 903 + ret = -EPERM; 904 + else 905 + memcpy(ctl->cache, p, ctl->len); 903 906 904 907 ctl->set = 1; 905 908 if (ctl->enabled && ctl->dsp->running) ··· 929 926 ctl->set = 1; 930 927 if (ctl->enabled && ctl->dsp->running) 931 928 ret = wm_coeff_write_control(ctl, ctl->cache, size); 929 + else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) 930 + ret = -EPERM; 932 931 } 933 932 934 933 mutex_unlock(&ctl->dsp->pwr_lock); ··· 952 947 953 948 mutex_lock(&ctl->dsp->pwr_lock); 954 949 955 - if (ctl->enabled) 950 + if (ctl->enabled && ctl->dsp->running) 956 951 ret = wm_coeff_write_acked_control(ctl, val); 957 952 else 958 953 ret = -EPERM;
+1
sound/soc/generic/simple-card-utils.c
··· 115 115 clk = devm_get_clk_from_child(dev, node, NULL); 116 116 if (!IS_ERR(clk)) { 117 117 simple_dai->sysclk = clk_get_rate(clk); 118 + simple_dai->clk = clk; 118 119 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { 119 120 simple_dai->sysclk = val; 120 121 } else {
+1 -1
sound/soc/intel/skylake/skl-topology.c
··· 512 512 if (bc->set_params != SKL_PARAM_INIT) 513 513 continue; 514 514 515 - mconfig->formats_config.caps = (u32 *)&bc->params; 515 + mconfig->formats_config.caps = (u32 *)bc->params; 516 516 mconfig->formats_config.caps_size = bc->size; 517 517 518 518 break;
+1 -1
sound/soc/mediatek/Kconfig
··· 13 13 14 14 config SND_SOC_MT2701_CS42448 15 15 tristate "ASoc Audio driver for MT2701 with CS42448 codec" 16 - depends on SND_SOC_MT2701 16 + depends on SND_SOC_MT2701 && I2C 17 17 select SND_SOC_CS42XX8_I2C 18 18 select SND_SOC_BT_SCO 19 19 help
+20 -16
sound/soc/sh/rcar/cmd.c
··· 31 31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 32 32 struct device *dev = rsnd_priv_to_dev(priv); 33 33 u32 data; 34 + u32 path[] = { 35 + [1] = 1 << 0, 36 + [5] = 1 << 8, 37 + [6] = 1 << 12, 38 + [9] = 1 << 15, 39 + }; 34 40 35 41 if (!mix && !dvc) 36 42 return 0; 43 + 44 + if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1) 45 + return -ENXIO; 37 46 38 47 if (mix) { 39 48 struct rsnd_dai *rdai; 40 49 struct rsnd_mod *src; 41 50 struct rsnd_dai_stream *tio; 42 51 int i; 43 - u32 path[] = { 44 - [0] = 0, 45 - [1] = 1 << 0, 46 - [2] = 0, 47 - [3] = 0, 48 - [4] = 0, 49 - [5] = 1 << 8 50 - }; 51 52 52 53 /* 53 54 * it is assuming that integrater is well understanding about ··· 71 70 } else { 72 71 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 73 72 74 - u32 path[] = { 75 - [0] = 0x30000, 76 - [1] = 0x30001, 77 - [2] = 0x40000, 78 - [3] = 0x10000, 79 - [4] = 0x20000, 80 - [5] = 0x40100 73 + u8 cmd_case[] = { 74 + [0] = 0x3, 75 + [1] = 0x3, 76 + [2] = 0x4, 77 + [3] = 0x1, 78 + [4] = 0x2, 79 + [5] = 0x4, 80 + [6] = 0x1, 81 + [9] = 0x2, 81 82 }; 82 83 83 - data = path[rsnd_mod_id(src)]; 84 + data = path[rsnd_mod_id(src)] | 85 + cmd_case[rsnd_mod_id(src)] << 16; 84 86 } 85 87 86 88 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
+16 -2
sound/soc/sh/rcar/dma.c
··· 454 454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 455 455 } 456 456 457 + static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) 458 + { 459 + struct rsnd_mod *mod = rsnd_mod_get(dma); 460 + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 461 + struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 462 + void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); 463 + u32 val = ioread32(addr); 464 + 465 + val &= ~mask; 466 + val |= (data & mask); 467 + 468 + iowrite32(val, addr); 469 + } 470 + 457 471 static int rsnd_dmapp_stop(struct rsnd_mod *mod, 458 472 struct rsnd_dai_stream *io, 459 473 struct rsnd_priv *priv) ··· 475 461 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 476 462 int i; 477 463 478 - rsnd_dmapp_write(dma, 0, PDMACHCR); 464 + rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); 479 465 480 466 for (i = 0; i < 1024; i++) { 481 - if (0 == rsnd_dmapp_read(dma, PDMACHCR)) 467 + if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) 482 468 return 0; 483 469 udelay(1); 484 470 }
+5 -1
sound/soc/sh/rcar/ssiu.c
··· 64 64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */ 65 65 mask2 = (1 << 4); /* mask sync bit */ 66 66 val1 = val2 = 0; 67 - if (rsnd_ssi_is_pin_sharing(io)) { 67 + if (id == 8) { 68 + /* 69 + * SSI8 pin is sharing with SSI7, nothing to do. 70 + */ 71 + } else if (rsnd_ssi_is_pin_sharing(io)) { 68 72 int shift = -1; 69 73 70 74 switch (id) {
+6 -2
sound/soc/soc-core.c
··· 3326 3326 { 3327 3327 struct snd_soc_platform *platform = rtd->platform; 3328 3328 3329 - return platform->driver->pcm_new(rtd); 3329 + if (platform->driver->pcm_new) 3330 + return platform->driver->pcm_new(rtd); 3331 + else 3332 + return 0; 3330 3333 } 3331 3334 3332 3335 static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) ··· 3337 3334 struct snd_soc_pcm_runtime *rtd = pcm->private_data; 3338 3335 struct snd_soc_platform *platform = rtd->platform; 3339 3336 3340 - platform->driver->pcm_free(pcm); 3337 + if (platform->driver->pcm_free) 3338 + platform->driver->pcm_free(pcm); 3341 3339 } 3342 3340 3343 3341 /**
+3
sound/soc/sti/uniperif_reader.c
··· 349 349 struct uniperif *reader = priv->dai_data.uni; 350 350 int ret; 351 351 352 + reader->substream = substream; 353 + 352 354 if (!UNIPERIF_TYPE_IS_TDM(reader)) 353 355 return 0; 354 356 ··· 380 378 /* Stop the reader */ 381 379 uni_reader_stop(reader); 382 380 } 381 + reader->substream = NULL; 383 382 } 384 383 385 384 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+30 -37
sound/soc/sunxi/sun8i-codec.c
··· 259 259 return 0; 260 260 } 261 261 262 - static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = { 263 - SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC, 264 - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0), 265 - SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC, 266 - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0), 267 - SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC, 268 - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0), 269 - SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC, 270 - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0), 271 - }; 272 - 273 - static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = { 274 - SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC, 262 + static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = { 263 + SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch", 264 + SUN8I_DAC_MXR_SRC, 265 + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 275 266 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0), 276 - SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC, 267 + SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch", 268 + SUN8I_DAC_MXR_SRC, 269 + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 277 270 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0), 278 - SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC, 271 + SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC, 272 + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 279 273 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0), 280 - SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC, 274 + SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC, 275 + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 281 276 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0), 282 277 }; 283 278 ··· 281 286 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA, 282 287 0, NULL, 0), 283 288 284 - /* Analog DAC */ 285 - SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 286 - SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), 287 - SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 288 - SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 289 + /* Analog DAC AIF */ 290 + SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0, 291 + SUN8I_AIF1_DACDAT_CTRL, 292 + SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), 293 + SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0, 294 + SUN8I_AIF1_DACDAT_CTRL, 295 + SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 289 296 290 297 /* DAC Mixers */ 291 - SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0, 292 - sun8i_output_left_mixer_controls, 293 - ARRAY_SIZE(sun8i_output_left_mixer_controls)), 294 - SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0, 295 - sun8i_output_right_mixer_controls, 296 - ARRAY_SIZE(sun8i_output_right_mixer_controls)), 298 + SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0, 299 + sun8i_dac_mixer_controls, 300 + ARRAY_SIZE(sun8i_dac_mixer_controls)), 301 + SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0, 302 + sun8i_dac_mixer_controls, 303 + ARRAY_SIZE(sun8i_dac_mixer_controls)), 297 304 298 305 /* Clocks */ 299 306 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, ··· 318 321 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0), 319 322 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL, 320 323 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0), 321 - 322 - SND_SOC_DAPM_OUTPUT("HP"), 323 324 }; 324 325 325 326 static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { ··· 333 338 { "DAC", NULL, "MODCLK DAC" }, 334 339 335 340 /* DAC Routes */ 336 - { "Digital Left DAC", NULL, "DAC" }, 337 - { "Digital Right DAC", NULL, "DAC" }, 341 + { "AIF1 Slot 0 Right", NULL, "DAC" }, 342 + { "AIF1 Slot 0 Left", NULL, "DAC" }, 338 343 339 344 /* DAC Mixer Routes */ 340 - { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"}, 341 - { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"}, 342 - 343 - /* End of route : HP out */ 344 - { "HP", NULL, "Left DAC Mixer" }, 345 - { "HP", NULL, "Right DAC Mixer" }, 345 + { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", 346 + "AIF1 Slot 0 Left"}, 347 + { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", 348 + "AIF1 Slot 0 Right"}, 346 349 }; 347 350 348 351 static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
+2 -1
virt/kvm/eventfd.c
··· 870 870 continue; 871 871 872 872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 873 - kvm->buses[bus_idx]->ioeventfd_count--; 873 + if (kvm->buses[bus_idx]) 874 + kvm->buses[bus_idx]->ioeventfd_count--; 874 875 ioeventfd_release(p); 875 876 ret = 0; 876 877 break;
+31 -13
virt/kvm/kvm_main.c
··· 727 727 list_del(&kvm->vm_list); 728 728 spin_unlock(&kvm_lock); 729 729 kvm_free_irq_routing(kvm); 730 - for (i = 0; i < KVM_NR_BUSES; i++) 731 - kvm_io_bus_destroy(kvm->buses[i]); 730 + for (i = 0; i < KVM_NR_BUSES; i++) { 731 + if (kvm->buses[i]) 732 + kvm_io_bus_destroy(kvm->buses[i]); 733 + kvm->buses[i] = NULL; 734 + } 732 735 kvm_coalesced_mmio_free(kvm); 733 736 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 734 737 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); ··· 1065 1062 * changes) is disallowed above, so any other attribute changes getting 1066 1063 * here can be skipped. 1067 1064 */ 1068 - if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1065 + if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) { 1069 1066 r = kvm_iommu_map_pages(kvm, &new); 1070 1067 return r; 1071 1068 } ··· 3477 3474 }; 3478 3475 3479 3476 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3477 + if (!bus) 3478 + return -ENOMEM; 3480 3479 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3481 3480 return r < 0 ? r : 0; 3482 3481 } ··· 3496 3491 }; 3497 3492 3498 3493 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3494 + if (!bus) 3495 + return -ENOMEM; 3499 3496 3500 3497 /* First try the device referenced by cookie. */ 3501 3498 if ((cookie >= 0) && (cookie < bus->dev_count) && ··· 3548 3541 }; 3549 3542 3550 3543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3544 + if (!bus) 3545 + return -ENOMEM; 3551 3546 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3552 3547 return r < 0 ? r : 0; 3553 3548 } ··· 3562 3553 struct kvm_io_bus *new_bus, *bus; 3563 3554 3564 3555 bus = kvm->buses[bus_idx]; 3556 + if (!bus) 3557 + return -ENOMEM; 3558 + 3565 3559 /* exclude ioeventfd which is limited by maximum fd */ 3566 3560 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3567 3561 return -ENOSPC; ··· 3584 3572 } 3585 3573 3586 3574 /* Caller must hold slots_lock. */ 3587 - int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3588 - struct kvm_io_device *dev) 3575 + void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3576 + struct kvm_io_device *dev) 3589 3577 { 3590 - int i, r; 3578 + int i; 3591 3579 struct kvm_io_bus *new_bus, *bus; 3592 3580 3593 3581 bus = kvm->buses[bus_idx]; 3594 - r = -ENOENT; 3582 + if (!bus) 3583 + return; 3584 + 3595 3585 for (i = 0; i < bus->dev_count; i++) 3596 3586 if (bus->range[i].dev == dev) { 3597 - r = 0; 3598 3587 break; 3599 3588 } 3600 3589 3601 - if (r) 3602 - return r; 3590 + if (i == bus->dev_count) 3591 + return; 3603 3592 3604 3593 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3605 3594 sizeof(struct kvm_io_range)), GFP_KERNEL); 3606 - if (!new_bus) 3607 - return -ENOMEM; 3595 + if (!new_bus) { 3596 + pr_err("kvm: failed to shrink bus, removing it completely\n"); 3597 + goto broken; 3598 + } 3608 3599 3609 3600 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3610 3601 new_bus->dev_count--; 3611 3602 memcpy(new_bus->range + i, bus->range + i + 1, 3612 3603 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3613 3604 3605 + broken: 3614 3606 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3615 3607 synchronize_srcu_expedited(&kvm->srcu); 3616 3608 kfree(bus); 3617 - return r; 3609 + return; 3618 3610 } 3619 3611 3620 3612 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, ··· 3631 3615 srcu_idx = srcu_read_lock(&kvm->srcu); 3632 3616 3633 3617 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3618 + if (!bus) 3619 + goto out_unlock; 3634 3620 3635 3621 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3636 3622 if (dev_idx < 0)