Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.12-rc4 into tty-next

We want the tty locking fix in here, so that maybe we can finally get it
fixed for real...

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2052 -1200
-2
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
··· 247 247 bias-pull-up - pull up the pin 248 248 bias-pull-down - pull down the pin 249 249 bias-pull-pin-default - use pin-default pull state 250 - bi-directional - pin supports simultaneous input/output operations 251 250 drive-push-pull - drive actively high and low 252 251 drive-open-drain - drive with open drain 253 252 drive-open-source - drive with open source ··· 259 260 power-source - select between different power supplies 260 261 low-power-enable - enable low power mode 261 262 low-power-disable - disable low power mode 262 - output-enable - enable output on pin regardless of output value 263 263 output-low - set the pin to output mode with low level 264 264 output-high - set the pin to output mode with high level 265 265 slew-rate - set the slew rate
+2 -2
MAINTAINERS
··· 10450 10450 10451 10451 PXA RTC DRIVER 10452 10452 M: Robert Jarzmik <robert.jarzmik@free.fr> 10453 - L: rtc-linux@googlegroups.com 10453 + L: linux-rtc@vger.kernel.org 10454 10454 S: Maintained 10455 10455 10456 10456 QAT DRIVER ··· 10757 10757 REAL TIME CLOCK (RTC) SUBSYSTEM 10758 10758 M: Alessandro Zummo <a.zummo@towertech.it> 10759 10759 M: Alexandre Belloni <alexandre.belloni@free-electrons.com> 10760 - L: rtc-linux@googlegroups.com 10760 + L: linux-rtc@vger.kernel.org 10761 10761 Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ 10762 10762 T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git 10763 10763 S: Maintained
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+3 -3
arch/arm64/include/asm/acpi.h
··· 23 23 #define ACPI_MADT_GICC_LENGTH \ 24 24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) 25 25 26 - #define BAD_MADT_GICC_ENTRY(entry, end) \ 27 - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ 28 - (entry)->header.length != ACPI_MADT_GICC_LENGTH) 26 + #define BAD_MADT_GICC_ENTRY(entry, end) \ 27 + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ 28 + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) 29 29 30 30 /* Basic configuration for ACPI */ 31 31 #ifdef CONFIG_ACPI
+3 -1
arch/arm64/kernel/pci.c
··· 191 191 return NULL; 192 192 193 193 root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); 194 - if (!root_ops) 194 + if (!root_ops) { 195 + kfree(ri); 195 196 return NULL; 197 + } 196 198 197 199 ri->cfg = pci_acpi_setup_ecam_mapping(root); 198 200 if (!ri->cfg) {
+6
arch/frv/include/asm/timex.h
··· 16 16 #define vxtime_lock() do {} while (0) 17 17 #define vxtime_unlock() do {} while (0) 18 18 19 + /* This attribute is used in include/linux/jiffies.h alongside with 20 + * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp 21 + * for frv does not contain another section specification. 22 + */ 23 + #define __jiffy_arch_data __attribute__((__section__(".data"))) 24 + 19 25 #endif 20 26
-1
arch/mips/kernel/process.c
··· 120 120 struct thread_info *ti = task_thread_info(p); 121 121 struct pt_regs *childregs, *regs = current_pt_regs(); 122 122 unsigned long childksp; 123 - p->set_child_tid = p->clear_child_tid = NULL; 124 123 125 124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 126 125
-2
arch/openrisc/kernel/process.c
··· 167 167 168 168 top_of_kernel_stack = sp; 169 169 170 - p->set_child_tid = p->clear_child_tid = NULL; 171 - 172 170 /* Locate userspace context on stack... */ 173 171 sp -= STACK_FRAME_OVERHEAD; /* redzone */ 174 172 sp -= sizeof(struct pt_regs);
+8 -8
arch/x86/kernel/cpu/microcode/amd.c
··· 320 320 } 321 321 322 322 static enum ucode_state 323 - load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); 323 + load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); 324 324 325 325 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) 326 326 { ··· 338 338 if (!desc.mc) 339 339 return -EINVAL; 340 340 341 - ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), 342 - desc.data, desc.size); 341 + ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); 343 342 if (ret != UCODE_OK) 344 343 return -EINVAL; 345 344 ··· 674 675 } 675 676 676 677 static enum ucode_state 677 - load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) 678 + load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) 678 679 { 679 680 enum ucode_state ret; 680 681 ··· 688 689 689 690 #ifdef CONFIG_X86_32 690 691 /* save BSP's matching patch for early load */ 691 - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 692 - struct ucode_patch *p = find_patch(cpu); 692 + if (save) { 693 + struct ucode_patch *p = find_patch(0); 693 694 if (p) { 694 695 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 695 696 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), ··· 721 722 { 722 723 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 723 724 struct cpuinfo_x86 *c = &cpu_data(cpu); 725 + bool bsp = c->cpu_index == boot_cpu_data.cpu_index; 724 726 enum ucode_state ret = UCODE_NFOUND; 725 727 const struct firmware *fw; 726 728 727 729 /* reload ucode container only on the boot cpu */ 728 - if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) 730 + if (!refresh_fw || !bsp) 729 731 return UCODE_OK; 730 732 731 733 if (c->x86 >= 0x15) ··· 743 743 goto fw_release; 744 744 } 745 745 746 - ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); 746 + ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); 747 747 748 748 fw_release: 749 749 release_firmware(fw);
+1 -1
arch/x86/kernel/process_32.c
··· 78 78 79 79 printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); 80 80 printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, 81 - smp_processor_id()); 81 + raw_smp_processor_id()); 82 82 83 83 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 84 84 regs->ax, regs->bx, regs->cx, regs->dx);
+4 -1
arch/x86/kvm/lapic.c
··· 1495 1495 1496 1496 static void cancel_hv_timer(struct kvm_lapic *apic) 1497 1497 { 1498 + preempt_disable(); 1498 1499 kvm_x86_ops->cancel_hv_timer(apic->vcpu); 1499 1500 apic->lapic_timer.hv_timer_in_use = false; 1501 + preempt_enable(); 1500 1502 } 1501 1503 1502 1504 static bool start_hv_timer(struct kvm_lapic *apic) ··· 1936 1934 for (i = 0; i < KVM_APIC_LVT_NUM; i++) 1937 1935 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1938 1936 apic_update_lvtt(apic); 1939 - if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1937 + if (kvm_vcpu_is_reset_bsp(vcpu) && 1938 + kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1940 1939 kvm_lapic_set_reg(apic, APIC_LVT0, 1941 1940 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1942 1941 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
+12 -14
arch/x86/kvm/svm.c
··· 1807 1807 * AMD's VMCB does not have an explicit unusable field, so emulate it 1808 1808 * for cross vendor migration purposes by "not present" 1809 1809 */ 1810 - var->unusable = !var->present || (var->type == 0); 1810 + var->unusable = !var->present; 1811 1811 1812 1812 switch (seg) { 1813 1813 case VCPU_SREG_TR: ··· 1840 1840 */ 1841 1841 if (var->unusable) 1842 1842 var->db = 0; 1843 + /* This is symmetric with svm_set_segment() */ 1843 1844 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1844 1845 break; 1845 1846 } ··· 1981 1980 s->base = var->base; 1982 1981 s->limit = var->limit; 1983 1982 s->selector = var->selector; 1984 - if (var->unusable) 1985 - s->attrib = 0; 1986 - else { 1987 - s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1988 - s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1989 - s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1990 - s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 1991 - s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1992 - s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1993 - s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1994 - s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1995 - } 1983 + s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1984 + s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1985 + s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1986 + s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; 1987 + s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1988 + s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1989 + s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1990 + s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1996 1991 1997 1992 /* 1998 1993 * This is always accurate, except if SYSRET returned to a segment ··· 1997 2000 * would entail passing the CPL to userspace and back. 1998 2001 */ 1999 2002 if (seg == VCPU_SREG_SS) 2000 - svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 2003 + /* This is symmetric with svm_get_segment() */ 2004 + svm->vmcb->save.cpl = (var->dpl & 3); 2001 2005 2002 2006 mark_dirty(svm->vmcb, VMCB_SEG); 2003 2007 }
+62 -85
arch/x86/kvm/vmx.c
··· 6914 6914 return 0; 6915 6915 } 6916 6916 6917 - /* 6918 - * This function performs the various checks including 6919 - * - if it's 4KB aligned 6920 - * - No bits beyond the physical address width are set 6921 - * - Returns 0 on success or else 1 6922 - * (Intel SDM Section 30.3) 6923 - */ 6924 - static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, 6925 - gpa_t *vmpointer) 6917 + static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) 6926 6918 { 6927 6919 gva_t gva; 6928 - gpa_t vmptr; 6929 6920 struct x86_exception e; 6930 - struct page *page; 6931 - struct vcpu_vmx *vmx = to_vmx(vcpu); 6932 - int maxphyaddr = cpuid_maxphyaddr(vcpu); 6933 6921 6934 6922 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6935 6923 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 6936 6924 return 1; 6937 6925 6938 - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6939 - sizeof(vmptr), &e)) { 6926 + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, 6927 + sizeof(*vmpointer), &e)) { 6940 6928 kvm_inject_page_fault(vcpu, &e); 6941 6929 return 1; 6942 6930 } 6943 6931 6944 - switch (exit_reason) { 6945 - case EXIT_REASON_VMON: 6946 - /* 6947 - * SDM 3: 24.11.5 6948 - * The first 4 bytes of VMXON region contain the supported 6949 - * VMCS revision identifier 6950 - * 6951 - * Note - IA32_VMX_BASIC[48] will never be 1 6952 - * for the nested case; 6953 - * which replaces physical address width with 32 6954 - * 6955 - */ 6956 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6957 - nested_vmx_failInvalid(vcpu); 6958 - return kvm_skip_emulated_instruction(vcpu); 6959 - } 6960 - 6961 - page = nested_get_page(vcpu, vmptr); 6962 - if (page == NULL) { 6963 - nested_vmx_failInvalid(vcpu); 6964 - return kvm_skip_emulated_instruction(vcpu); 6965 - } 6966 - if (*(u32 *)kmap(page) != VMCS12_REVISION) { 6967 - kunmap(page); 6968 - nested_release_page_clean(page); 6969 - nested_vmx_failInvalid(vcpu); 6970 - return kvm_skip_emulated_instruction(vcpu); 6971 - } 6972 - kunmap(page); 6973 - nested_release_page_clean(page); 6974 - vmx->nested.vmxon_ptr = vmptr; 6975 - break; 6976 - case EXIT_REASON_VMCLEAR: 6977 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6978 - nested_vmx_failValid(vcpu, 6979 - VMXERR_VMCLEAR_INVALID_ADDRESS); 6980 - return kvm_skip_emulated_instruction(vcpu); 6981 - } 6982 - 6983 - if (vmptr == vmx->nested.vmxon_ptr) { 6984 - nested_vmx_failValid(vcpu, 6985 - VMXERR_VMCLEAR_VMXON_POINTER); 6986 - return kvm_skip_emulated_instruction(vcpu); 6987 - } 6988 - break; 6989 - case EXIT_REASON_VMPTRLD: 6990 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6991 - nested_vmx_failValid(vcpu, 6992 - VMXERR_VMPTRLD_INVALID_ADDRESS); 6993 - return kvm_skip_emulated_instruction(vcpu); 6994 - } 6995 - 6996 - if (vmptr == vmx->nested.vmxon_ptr) { 6997 - nested_vmx_failValid(vcpu, 6998 - VMXERR_VMPTRLD_VMXON_POINTER); 6999 - return kvm_skip_emulated_instruction(vcpu); 7000 - } 7001 - break; 7002 - default: 7003 - return 1; /* shouldn't happen */ 7004 - } 7005 - 7006 - if (vmpointer) 7007 - *vmpointer = vmptr; 7008 6932 return 0; 7009 6933 } 7010 6934 ··· 6990 7066 static int handle_vmon(struct kvm_vcpu *vcpu) 6991 7067 { 6992 7068 int ret; 7069 + gpa_t vmptr; 7070 + struct page *page; 6993 7071 struct vcpu_vmx *vmx = to_vmx(vcpu); 6994 7072 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 6995 7073 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; ··· 7021 7095 return 1; 7022 7096 } 7023 7097 7024 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) 7098 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7025 7099 return 1; 7026 - 7100 + 7101 + /* 7102 + * SDM 3: 24.11.5 7103 + * The first 4 bytes of VMXON region contain the supported 7104 + * VMCS revision identifier 7105 + * 7106 + * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 7107 + * which replaces physical address width with 32 7108 + */ 7109 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7110 + nested_vmx_failInvalid(vcpu); 7111 + return kvm_skip_emulated_instruction(vcpu); 7112 + } 7113 + 7114 + page = nested_get_page(vcpu, vmptr); 7115 + if (page == NULL) { 7116 + nested_vmx_failInvalid(vcpu); 7117 + return kvm_skip_emulated_instruction(vcpu); 7118 + } 7119 + if (*(u32 *)kmap(page) != VMCS12_REVISION) { 7120 + kunmap(page); 7121 + nested_release_page_clean(page); 7122 + nested_vmx_failInvalid(vcpu); 7123 + return kvm_skip_emulated_instruction(vcpu); 7124 + } 7125 + kunmap(page); 7126 + nested_release_page_clean(page); 7127 + 7128 + vmx->nested.vmxon_ptr = vmptr; 7027 7129 ret = enter_vmx_operation(vcpu); 7028 7130 if (ret) 7029 7131 return ret; ··· 7167 7213 if (!nested_vmx_check_permission(vcpu)) 7168 7214 return 1; 7169 7215 7170 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) 7216 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7171 7217 return 1; 7218 + 7219 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7220 + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 7221 + return kvm_skip_emulated_instruction(vcpu); 7222 + } 7223 + 7224 + if (vmptr == vmx->nested.vmxon_ptr) { 7225 + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 7226 + return kvm_skip_emulated_instruction(vcpu); 7227 + } 7172 7228 7173 7229 if (vmptr == vmx->nested.current_vmptr) 7174 7230 nested_release_vmcs12(vmx); ··· 7509 7545 if (!nested_vmx_check_permission(vcpu)) 7510 7546 return 1; 7511 7547 7512 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) 7548 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7513 7549 return 1; 7550 + 7551 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7552 + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 7553 + return kvm_skip_emulated_instruction(vcpu); 7554 + } 7555 + 7556 + if (vmptr == vmx->nested.vmxon_ptr) { 7557 + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 7558 + return kvm_skip_emulated_instruction(vcpu); 7559 + } 7514 7560 7515 7561 if (vmx->nested.current_vmptr != vmptr) { 7516 7562 struct vmcs12 *new_vmcs12; ··· 7887 7913 { 7888 7914 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7889 7915 int cr = exit_qualification & 15; 7890 - int reg = (exit_qualification >> 8) & 15; 7891 - unsigned long val = kvm_register_readl(vcpu, reg); 7916 + int reg; 7917 + unsigned long val; 7892 7918 7893 7919 switch ((exit_qualification >> 4) & 3) { 7894 7920 case 0: /* mov to cr */ 7921 + reg = (exit_qualification >> 8) & 15; 7922 + val = kvm_register_readl(vcpu, reg); 7895 7923 switch (cr) { 7896 7924 case 0: 7897 7925 if (vmcs12->cr0_guest_host_mask & ··· 7948 7972 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 7949 7973 * cr0. Other attempted changes are ignored, with no exit. 7950 7974 */ 7975 + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 7951 7976 if (vmcs12->cr0_guest_host_mask & 0xe & 7952 7977 (val ^ vmcs12->cr0_read_shadow)) 7953 7978 return true;
+5 -2
arch/x86/kvm/x86.c
··· 8394 8394 if (vcpu->arch.pv.pv_unhalted) 8395 8395 return true; 8396 8396 8397 - if (atomic_read(&vcpu->arch.nmi_queued)) 8397 + if (kvm_test_request(KVM_REQ_NMI, vcpu) || 8398 + (vcpu->arch.nmi_pending && 8399 + kvm_x86_ops->nmi_allowed(vcpu))) 8398 8400 return true; 8399 8401 8400 - if (kvm_test_request(KVM_REQ_SMI, vcpu)) 8402 + if (kvm_test_request(KVM_REQ_SMI, vcpu) || 8403 + (vcpu->arch.smi_pending && !is_smm(vcpu))) 8401 8404 return true; 8402 8405 8403 8406 if (kvm_arch_interrupt_allowed(vcpu) &&
+3 -6
arch/x86/mm/pat.c
··· 65 65 } 66 66 early_param("nopat", nopat); 67 67 68 - static bool __read_mostly __pat_initialized = false; 69 - 70 68 bool pat_enabled(void) 71 69 { 72 - return __pat_initialized; 70 + return !!__pat_enabled; 73 71 } 74 72 EXPORT_SYMBOL_GPL(pat_enabled); 75 73 ··· 225 227 } 226 228 227 229 wrmsrl(MSR_IA32_CR_PAT, pat); 228 - __pat_initialized = true; 229 230 230 231 __init_cache_modes(pat); 231 232 } 232 233 233 234 static void pat_ap_init(u64 pat) 234 235 { 235 - if (!this_cpu_has(X86_FEATURE_PAT)) { 236 + if (!boot_cpu_has(X86_FEATURE_PAT)) { 236 237 /* 237 238 * If this happens we are on a secondary CPU, but switched to 238 239 * PAT on the boot CPU. We have no way to undo PAT. ··· 306 309 u64 pat; 307 310 struct cpuinfo_x86 *c = &boot_cpu_data; 308 311 309 - if (!__pat_enabled) { 312 + if (!pat_enabled()) { 310 313 init_cache_modes(); 311 314 return; 312 315 }
+4 -2
arch/x86/platform/efi/efi.c
··· 828 828 829 829 /* 830 830 * We don't do virtual mode, since we don't do runtime services, on 831 - * non-native EFI 831 + * non-native EFI. With efi=old_map, we don't do runtime services in 832 + * kexec kernel because in the initial boot something else might 833 + * have been mapped at these virtual addresses. 832 834 */ 833 - if (!efi_is_native()) { 835 + if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { 834 836 efi_memmap_unmap(); 835 837 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 836 838 return;
+71 -8
arch/x86/platform/efi/efi_64.c
··· 71 71 72 72 pgd_t * __init efi_call_phys_prolog(void) 73 73 { 74 - unsigned long vaddress; 75 - pgd_t *save_pgd; 74 + unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; 75 + pgd_t *save_pgd, *pgd_k, *pgd_efi; 76 + p4d_t *p4d, *p4d_k, *p4d_efi; 77 + pud_t *pud; 76 78 77 79 int pgd; 78 - int n_pgds; 80 + int n_pgds, i, j; 79 81 80 82 if (!efi_enabled(EFI_OLD_MEMMAP)) { 81 83 save_pgd = (pgd_t *)read_cr3(); ··· 90 88 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 91 89 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 92 90 91 + /* 92 + * Build 1:1 identity mapping for efi=old_map usage. Note that 93 + * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while 94 + * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical 95 + * address X, the pud_index(X) != pud_index(__va(X)), we can only copy 96 + * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. 97 + * This means here we can only reuse the PMD tables of the direct mapping. 98 + */ 93 99 for (pgd = 0; pgd < n_pgds; pgd++) { 94 - save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 95 - vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 96 - set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 100 + addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); 101 + vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); 102 + pgd_efi = pgd_offset_k(addr_pgd); 103 + save_pgd[pgd] = *pgd_efi; 104 + 105 + p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); 106 + if (!p4d) { 107 + pr_err("Failed to allocate p4d table!\n"); 108 + goto out; 109 + } 110 + 111 + for (i = 0; i < PTRS_PER_P4D; i++) { 112 + addr_p4d = addr_pgd + i * P4D_SIZE; 113 + p4d_efi = p4d + p4d_index(addr_p4d); 114 + 115 + pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); 116 + if (!pud) { 117 + pr_err("Failed to allocate pud table!\n"); 118 + goto out; 119 + } 120 + 121 + for (j = 0; j < PTRS_PER_PUD; j++) { 122 + addr_pud = addr_p4d + j * PUD_SIZE; 123 + 124 + if (addr_pud > (max_pfn << PAGE_SHIFT)) 125 + break; 126 + 127 + vaddr = (unsigned long)__va(addr_pud); 128 + 129 + pgd_k = pgd_offset_k(vaddr); 130 + p4d_k = p4d_offset(pgd_k, vaddr); 131 + pud[j] = *pud_offset(p4d_k, vaddr); 132 + } 133 + } 97 134 } 98 135 out: 99 136 __flush_tlb_all(); ··· 145 104 /* 146 105 * After the lock is released, the original page table is restored. 147 106 */ 148 - int pgd_idx; 107 + int pgd_idx, i; 149 108 int nr_pgds; 109 + pgd_t *pgd; 110 + p4d_t *p4d; 111 + pud_t *pud; 150 112 151 113 if (!efi_enabled(EFI_OLD_MEMMAP)) { 152 114 write_cr3((unsigned long)save_pgd); ··· 159 115 160 116 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 161 117 162 - for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) 118 + for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { 119 + pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); 163 120 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 121 + 122 + if (!(pgd_val(*pgd) & _PAGE_PRESENT)) 123 + continue; 124 + 125 + for (i = 0; i < PTRS_PER_P4D; i++) { 126 + p4d = p4d_offset(pgd, 127 + pgd_idx * PGDIR_SIZE + i * P4D_SIZE); 128 + 129 + if (!(p4d_val(*p4d) & _PAGE_PRESENT)) 130 + continue; 131 + 132 + pud = (pud_t *)p4d_page_vaddr(*p4d); 133 + pud_free(&init_mm, pud); 134 + } 135 + 136 + p4d = (p4d_t *)pgd_page_vaddr(*pgd); 137 + p4d_free(&init_mm, p4d); 138 + } 164 139 165 140 kfree(save_pgd); 166 141
+3
arch/x86/platform/efi/quirks.c
··· 360 360 free_bootmem_late(start, size); 361 361 } 362 362 363 + if (!num_entries) 364 + return; 365 + 363 366 new_size = efi.memmap.desc_size * num_entries; 364 367 new_phys = efi_memmap_alloc(num_entries); 365 368 if (!new_phys) {
+1 -1
block/blk-cgroup.c
··· 74 74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 75 75 76 76 if (blkg->blkcg != &blkcg_root) 77 - blk_exit_rl(&blkg->rl); 77 + blk_exit_rl(blkg->q, &blkg->rl); 78 78 79 79 blkg_rwstat_exit(&blkg->stat_ios); 80 80 blkg_rwstat_exit(&blkg->stat_bytes);
+8 -2
block/blk-core.c
··· 648 648 if (!rl->rq_pool) 649 649 return -ENOMEM; 650 650 651 + if (rl != &q->root_rl) 652 + WARN_ON_ONCE(!blk_get_queue(q)); 653 + 651 654 return 0; 652 655 } 653 656 654 - void blk_exit_rl(struct request_list *rl) 657 + void blk_exit_rl(struct request_queue *q, struct request_list *rl) 655 658 { 656 - if (rl->rq_pool) 659 + if (rl->rq_pool) { 657 660 mempool_destroy(rl->rq_pool); 661 + if (rl != &q->root_rl) 662 + blk_put_queue(q); 663 + } 658 664 } 659 665 660 666 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+9 -1
block/blk-mq.c
··· 2641 2641 return ret; 2642 2642 } 2643 2643 2644 - void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2644 + static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2645 + int nr_hw_queues) 2645 2646 { 2646 2647 struct request_queue *q; 2647 2648 ··· 2665 2664 2666 2665 list_for_each_entry(q, &set->tag_list, tag_set_list) 2667 2666 blk_mq_unfreeze_queue(q); 2667 + } 2668 + 2669 + void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2670 + { 2671 + mutex_lock(&set->tag_list_lock); 2672 + __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2673 + mutex_unlock(&set->tag_list_lock); 2668 2674 } 2669 2675 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2670 2676
+1 -1
block/blk-sysfs.c
··· 809 809 810 810 blk_free_queue_stats(q->stats); 811 811 812 - blk_exit_rl(&q->root_rl); 812 + blk_exit_rl(q, &q->root_rl); 813 813 814 814 if (q->queue_tags) 815 815 __blk_queue_free_tags(q);
+1 -1
block/blk.h
··· 59 59 60 60 int blk_init_rl(struct request_list *rl, struct request_queue *q, 61 61 gfp_t gfp_mask); 62 - void blk_exit_rl(struct request_list *rl); 62 + void blk_exit_rl(struct request_queue *q, struct request_list *rl); 63 63 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 64 64 struct bio *bio); 65 65 void blk_queue_bypass_start(struct request_queue *q);
+15 -2
block/cfq-iosched.c
··· 38 38 static const int cfq_hist_divisor = 4; 39 39 40 40 /* 41 - * offset from end of service tree 41 + * offset from end of queue service tree for idle class 42 42 */ 43 43 #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) 44 + /* offset from end of group service tree under time slice mode */ 45 + #define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5) 46 + /* offset from end of group service under IOPS mode */ 47 + #define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5) 44 48 45 49 /* 46 50 * below this threshold, we consider thinktime immediate ··· 1366 1362 cfqg->vfraction = max_t(unsigned, vfr, 1); 1367 1363 } 1368 1364 1365 + static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd) 1366 + { 1367 + if (!iops_mode(cfqd)) 1368 + return CFQ_SLICE_MODE_GROUP_DELAY; 1369 + else 1370 + return CFQ_IOPS_MODE_GROUP_DELAY; 1371 + } 1372 + 1369 1373 static void 1370 1374 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1371 1375 { ··· 1393 1381 n = rb_last(&st->rb); 1394 1382 if (n) { 1395 1383 __cfqg = rb_entry_cfqg(n); 1396 - cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1384 + cfqg->vdisktime = __cfqg->vdisktime + 1385 + cfq_get_cfqg_vdisktime_delay(cfqd); 1397 1386 } else 1398 1387 cfqg->vdisktime = st->min_vdisktime; 1399 1388 cfq_group_service_tree_add(st, cfqg);
-4
drivers/acpi/acpica/tbutils.c
··· 418 418 419 419 table_desc->validation_count++; 420 420 if (table_desc->validation_count == 0) { 421 - ACPI_ERROR((AE_INFO, 422 - "Table %p, Validation count is zero after increment\n", 423 - table_desc)); 424 421 table_desc->validation_count--; 425 - return_ACPI_STATUS(AE_LIMIT); 426 422 } 427 423 428 424 *out_table = table_desc->pointer;
+1 -1
drivers/acpi/button.c
··· 113 113 114 114 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); 115 115 static struct acpi_device *lid_device; 116 - static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; 116 + static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; 117 117 118 118 static unsigned long lid_report_interval __read_mostly = 500; 119 119 module_param(lid_report_interval, ulong, 0644);
+5 -2
drivers/acpi/sysfs.c
··· 333 333 container_of(bin_attr, struct acpi_table_attr, attr); 334 334 struct acpi_table_header *table_header = NULL; 335 335 acpi_status status; 336 + ssize_t rc; 336 337 337 338 status = acpi_get_table(table_attr->name, table_attr->instance, 338 339 &table_header); 339 340 if (ACPI_FAILURE(status)) 340 341 return -ENODEV; 341 342 342 - return memory_read_from_buffer(buf, count, &offset, 343 - table_header, table_header->length); 343 + rc = memory_read_from_buffer(buf, count, &offset, table_header, 344 + table_header->length); 345 + acpi_put_table(table_header); 346 + return rc; 344 347 } 345 348 346 349 static int acpi_table_attr_init(struct kobject *tables_obj,
+5 -10
drivers/block/nbd.c
··· 937 937 return -ENOSPC; 938 938 } 939 939 940 - /* Reset all properties of an NBD device */ 941 - static void nbd_reset(struct nbd_device *nbd) 942 - { 943 - nbd->config = NULL; 944 - nbd->tag_set.timeout = 0; 945 - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 946 - } 947 - 948 940 static void nbd_bdev_reset(struct block_device *bdev) 949 941 { 950 942 if (bdev->bd_openers > 1) ··· 1021 1029 } 1022 1030 kfree(config->socks); 1023 1031 } 1024 - nbd_reset(nbd); 1032 + kfree(nbd->config); 1033 + nbd->config = NULL; 1034 + 1035 + nbd->tag_set.timeout = 0; 1036 + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1025 1037 1026 1038 mutex_unlock(&nbd->config_lock); 1027 1039 nbd_put(nbd); ··· 1479 1483 disk->fops = &nbd_fops; 1480 1484 disk->private_data = nbd; 1481 1485 sprintf(disk->disk_name, "nbd%d", index); 1482 - nbd_reset(nbd); 1483 1486 add_disk(disk); 1484 1487 nbd_total_devices++; 1485 1488 return index;
+2
drivers/block/rbd.c
··· 4023 4023 4024 4024 switch (req_op(rq)) { 4025 4025 case REQ_OP_DISCARD: 4026 + case REQ_OP_WRITE_ZEROES: 4026 4027 op_type = OBJ_OP_DISCARD; 4027 4028 break; 4028 4029 case REQ_OP_WRITE: ··· 4421 4420 q->limits.discard_granularity = segment_size; 4422 4421 q->limits.discard_alignment = segment_size; 4423 4422 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 4423 + blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 4424 4424 4425 4425 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4426 4426 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+3 -3
drivers/char/pcmcia/cm4040_cs.c
··· 374 374 375 375 rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); 376 376 if (rc <= 0) { 377 - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 377 + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); 378 378 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 379 379 if (rc == -ERESTARTSYS) 380 380 return rc; ··· 387 387 for (i = 0; i < bytes_to_write; i++) { 388 388 rc = wait_for_bulk_out_ready(dev); 389 389 if (rc <= 0) { 390 - DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", 390 + DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", 391 391 rc); 392 392 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 393 393 if (rc == -ERESTARTSYS) ··· 403 403 rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); 404 404 405 405 if (rc <= 0) { 406 - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 406 + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); 407 407 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 408 408 if (rc == -ERESTARTSYS) 409 409 return rc;
+5 -1
drivers/char/random.c
··· 1097 1097 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 1098 1098 { 1099 1099 __u32 *ptr = (__u32 *) regs; 1100 + unsigned long flags; 1100 1101 1101 1102 if (regs == NULL) 1102 1103 return 0; 1104 + local_irq_save(flags); 1103 1105 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1104 1106 f->reg_idx = 0; 1105 - return *(ptr + f->reg_idx++); 1107 + ptr += f->reg_idx++; 1108 + local_irq_restore(flags); 1109 + return *ptr; 1106 1110 } 1107 1111 1108 1112 void add_interrupt_randomness(int irq, int irq_flags)
+1
drivers/cpufreq/cpufreq.c
··· 2468 2468 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2469 2469 list_empty(&cpufreq_policy_list)) { 2470 2470 /* if all ->init() calls failed, unregister */ 2471 + ret = -ENODEV; 2471 2472 pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2472 2473 driver_data->name); 2473 2474 goto err_if_unreg;
+16 -3
drivers/cpufreq/kirkwood-cpufreq.c
··· 127 127 return PTR_ERR(priv.cpu_clk); 128 128 } 129 129 130 - clk_prepare_enable(priv.cpu_clk); 130 + err = clk_prepare_enable(priv.cpu_clk); 131 + if (err) { 132 + dev_err(priv.dev, "Unable to prepare cpuclk\n"); 133 + return err; 134 + } 135 + 131 136 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; 132 137 133 138 priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); ··· 142 137 goto out_cpu; 143 138 } 144 139 145 - clk_prepare_enable(priv.ddr_clk); 140 + err = clk_prepare_enable(priv.ddr_clk); 141 + if (err) { 142 + dev_err(priv.dev, "Unable to prepare ddrclk\n"); 143 + goto out_cpu; 144 + } 146 145 kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; 147 146 148 147 priv.powersave_clk = of_clk_get_by_name(np, "powersave"); ··· 155 146 err = PTR_ERR(priv.powersave_clk); 156 147 goto out_ddr; 157 148 } 158 - clk_prepare_enable(priv.powersave_clk); 149 + err = clk_prepare_enable(priv.powersave_clk); 150 + if (err) { 151 + dev_err(priv.dev, "Unable to prepare powersave clk\n"); 152 + goto out_ddr; 153 + } 159 154 160 155 of_node_put(np); 161 156 np = NULL;
+35 -4
drivers/dma/ep93xx_dma.c
··· 201 201 struct dma_device dma_dev; 202 202 bool m2m; 203 203 int (*hw_setup)(struct ep93xx_dma_chan *); 204 + void (*hw_synchronize)(struct ep93xx_dma_chan *); 204 205 void (*hw_shutdown)(struct ep93xx_dma_chan *); 205 206 void (*hw_submit)(struct ep93xx_dma_chan *); 206 207 int (*hw_interrupt)(struct ep93xx_dma_chan *); ··· 324 323 | M2P_CONTROL_ENABLE; 325 324 m2p_set_control(edmac, control); 326 325 326 + edmac->buffer = 0; 327 + 327 328 return 0; 328 329 } 329 330 ··· 334 331 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; 335 332 } 336 333 337 - static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 334 + static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) 338 335 { 336 + unsigned long flags; 339 337 u32 control; 340 338 339 + spin_lock_irqsave(&edmac->lock, flags); 341 340 control = readl(edmac->regs + M2P_CONTROL); 342 341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 343 342 m2p_set_control(edmac, control); 343 + spin_unlock_irqrestore(&edmac->lock, flags); 344 344 345 345 while (m2p_channel_state(edmac) >= M2P_STATE_ON) 346 - cpu_relax(); 346 + schedule(); 347 + } 347 348 349 + static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 350 + { 348 351 m2p_set_control(edmac, 0); 349 352 350 - while (m2p_channel_state(edmac) == M2P_STATE_STALL) 351 - cpu_relax(); 353 + while (m2p_channel_state(edmac) != M2P_STATE_IDLE) 354 + dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); 352 355 } 353 356 354 357 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) ··· 1170 1161 } 1171 1162 1172 1163 /** 1164 + * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the 1165 + * current context. 1166 + * @chan: channel 1167 + * 1168 + * Synchronizes the DMA channel termination to the current context. When this 1169 + * function returns it is guaranteed that all transfers for previously issued 1170 + * descriptors have stopped and and it is safe to free the memory associated 1171 + * with them. Furthermore it is guaranteed that all complete callback functions 1172 + * for a previously submitted descriptor have finished running and it is safe to 1173 + * free resources accessed from within the complete callbacks. 1174 + */ 1175 + static void ep93xx_dma_synchronize(struct dma_chan *chan) 1176 + { 1177 + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1178 + 1179 + if (edmac->edma->hw_synchronize) 1180 + edmac->edma->hw_synchronize(edmac); 1181 + } 1182 + 1183 + /** 1173 1184 * ep93xx_dma_terminate_all - terminate all transactions 1174 1185 * @chan: channel 1175 1186 * ··· 1352 1323 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1353 1324 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1354 1325 dma_dev->device_config = ep93xx_dma_slave_config; 1326 + dma_dev->device_synchronize = ep93xx_dma_synchronize; 1355 1327 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; 1356 1328 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1357 1329 dma_dev->device_tx_status = ep93xx_dma_tx_status; ··· 1370 1340 } else { 1371 1341 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1372 1342 1343 + edma->hw_synchronize = m2p_hw_synchronize; 1373 1344 edma->hw_setup = m2p_hw_setup; 1374 1345 edma->hw_shutdown = m2p_hw_shutdown; 1375 1346 edma->hw_submit = m2p_hw_submit;
+44 -65
drivers/dma/mv_xor_v2.c
··· 161 161 struct mv_xor_v2_sw_desc *sw_desq; 162 162 int desc_size; 163 163 unsigned int npendings; 164 + unsigned int hw_queue_idx; 164 165 }; 165 166 166 167 /** ··· 215 214 } 216 215 217 216 /* 218 - * Return the next available index in the DESQ. 219 - */ 220 - static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) 221 - { 222 - /* read the index for the next available descriptor in the DESQ */ 223 - u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); 224 - 225 - return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) 226 - & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); 227 - } 228 - 229 - /* 230 217 * notify the engine of new descriptors, and update the available index. 231 218 */ 232 219 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, ··· 246 257 return MV_XOR_V2_EXT_DESC_SIZE; 247 258 } 248 259 249 - /* 250 - * Set the IMSG threshold 251 - */ 252 - static inline 253 - void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) 254 - { 255 - u32 reg; 256 - 257 - reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); 258 - 259 - reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); 260 - reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); 261 - 262 - writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); 263 - } 264 - 265 260 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) 266 261 { 267 262 struct mv_xor_v2_device *xor_dev = data; ··· 261 288 if (!ndescs) 262 289 return IRQ_NONE; 263 290 264 - /* 265 - * Update IMSG threshold, to disable new IMSG interrupts until 266 - * end of the tasklet 267 - */ 268 - mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); 269 - 270 291 /* schedule a tasklet to handle descriptors callbacks */ 271 292 tasklet_schedule(&xor_dev->irq_tasklet); 272 293 ··· 273 306 static dma_cookie_t 274 307 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) 275 308 { 276 - int desq_ptr; 277 309 void *dest_hw_desc; 278 310 dma_cookie_t cookie; 279 311 struct mv_xor_v2_sw_desc *sw_desc = ··· 288 322 spin_lock_bh(&xor_dev->lock); 289 323 cookie = dma_cookie_assign(tx); 290 324 291 - /* get the next available slot in the DESQ */ 292 - desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); 293 - 294 325 /* copy the HW descriptor from the SW descriptor to the DESQ */ 295 - dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; 326 + dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; 296 327 297 328 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); 298 329 299 330 xor_dev->npendings++; 331 + xor_dev->hw_queue_idx++; 332 + if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) 333 + xor_dev->hw_queue_idx = 0; 300 334 301 335 spin_unlock_bh(&xor_dev->lock); 302 336 ··· 310 344 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) 311 345 { 312 346 struct mv_xor_v2_sw_desc *sw_desc; 347 + bool found = false; 313 348 314 349 /* Lock the channel */ 315 350 spin_lock_bh(&xor_dev->lock); ··· 322 355 return NULL; 323 356 } 324 357 325 - /* get a free SW descriptor from the SW DESQ */ 326 - sw_desc = list_first_entry(&xor_dev->free_sw_desc, 327 - struct mv_xor_v2_sw_desc, free_list); 358 + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { 359 + if (async_tx_test_ack(&sw_desc->async_tx)) { 360 + found = true; 361 + break; 362 + } 363 + } 364 + 365 + if (!found) { 366 + spin_unlock_bh(&xor_dev->lock); 367 + return NULL; 368 + } 369 + 328 370 list_del(&sw_desc->free_list); 329 371 330 372 /* Release the channel */ 331 373 spin_unlock_bh(&xor_dev->lock); 332 - 333 - /* set the async tx descriptor */ 334 - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); 335 - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; 336 - async_tx_ack(&sw_desc->async_tx); 337 374 338 375 return sw_desc; 339 376 } ··· 360 389 __func__, len, &src, &dest, flags); 361 390 362 391 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 392 + if (!sw_desc) 393 + return NULL; 363 394 364 395 sw_desc->async_tx.flags = flags; 365 396 ··· 416 443 __func__, src_cnt, len, &dest, flags); 417 444 418 445 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 446 + if (!sw_desc) 447 + return NULL; 419 448 420 449 sw_desc->async_tx.flags = flags; 421 450 ··· 466 491 container_of(chan, struct mv_xor_v2_device, dmachan); 467 492 468 493 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 494 + if (!sw_desc) 495 + return NULL; 469 496 470 497 /* set the HW descriptor */ 471 498 hw_descriptor = &sw_desc->hw_desc; ··· 531 554 { 532 555 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; 533 556 int pending_ptr, num_of_pending, i; 534 - struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; 535 557 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; 536 558 537 559 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); ··· 538 562 /* get the pending descriptors parameters */ 539 563 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); 540 564 541 - /* next HW descriptor */ 542 - next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; 543 - 544 565 /* loop over free descriptors */ 545 566 for (i = 0; i < num_of_pending; i++) { 546 - 547 - if (pending_ptr > MV_XOR_V2_DESC_NUM) 548 - pending_ptr = 0; 549 - 550 - if (next_pending_sw_desc != NULL) 551 - next_pending_hw_desc++; 567 + struct mv_xor_v2_descriptor *next_pending_hw_desc = 568 + xor_dev->hw_desq_virt + pending_ptr; 552 569 553 570 /* get the SW descriptor related to the HW descriptor */ 554 571 next_pending_sw_desc = ··· 577 608 578 609 /* increment the next descriptor */ 579 610 pending_ptr++; 611 + if (pending_ptr >= MV_XOR_V2_DESC_NUM) 612 + pending_ptr = 0; 580 613 } 581 614 582 615 if (num_of_pending != 0) { 583 616 /* free the descriptores */ 584 617 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); 585 618 } 586 - 587 - /* Update IMSG threshold, to enable new IMSG interrupts */ 588 - mv_xor_v2_set_imsg_thrd(xor_dev, 0); 589 619 } 590 620 591 621 /* ··· 615 647 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); 616 648 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, 617 649 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); 618 - 619 - /* enable the DMA engine */ 620 - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); 621 650 622 651 /* 623 652 * This is a temporary solution, until we activate the ··· 659 694 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; 660 695 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); 661 696 697 + /* enable the DMA engine */ 698 + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); 699 + 662 700 return 0; 663 701 } 664 702 ··· 692 724 return PTR_ERR(xor_dev->glob_base); 693 725 694 726 platform_set_drvdata(pdev, xor_dev); 727 + 728 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 729 + if (ret) 730 + return ret; 695 731 696 732 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 697 733 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) ··· 757 785 758 786 /* add all SW descriptors to the free list */ 759 787 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { 760 - xor_dev->sw_desq[i].idx = i; 761 - list_add(&xor_dev->sw_desq[i].free_list, 788 + struct mv_xor_v2_sw_desc *sw_desc = 789 + xor_dev->sw_desq + i; 790 + sw_desc->idx = i; 791 + dma_async_tx_descriptor_init(&sw_desc->async_tx, 792 + &xor_dev->dmachan); 793 + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; 794 + async_tx_ack(&sw_desc->async_tx); 795 + 796 + list_add(&sw_desc->free_list, 762 797 &xor_dev->free_sw_desc); 763 798 } 764 799
+2 -1
drivers/dma/pl330.c
··· 3008 3008 3009 3009 for (i = 0; i < AMBA_NR_IRQS; i++) { 3010 3010 irq = adev->irq[i]; 3011 - devm_free_irq(&adev->dev, irq, pl330); 3011 + if (irq) 3012 + devm_free_irq(&adev->dev, irq, pl330); 3012 3013 } 3013 3014 3014 3015 dma_async_device_unregister(&pl330->ddma);
+3
drivers/dma/sh/rcar-dmac.c
··· 1287 1287 if (desc->hwdescs.use) { 1288 1288 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1289 1289 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1290 + if (dptr == 0) 1291 + dptr = desc->nchunks; 1292 + dptr--; 1290 1293 WARN_ON(dptr >= desc->nchunks); 1291 1294 } else { 1292 1295 running = desc->running;
+1 -1
drivers/dma/sh/usb-dmac.c
··· 117 117 #define USB_DMASWR 0x0008 118 118 #define USB_DMASWR_SWR (1 << 0) 119 119 #define USB_DMAOR 0x0060 120 - #define USB_DMAOR_AE (1 << 2) 120 + #define USB_DMAOR_AE (1 << 1) 121 121 #define USB_DMAOR_DME (1 << 0) 122 122 123 123 #define USB_DMASAR 0x0000
+2
drivers/firmware/dmi-id.c
··· 47 47 DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 48 48 DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 49 49 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 50 + DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); 50 51 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 51 52 DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 52 53 DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); ··· 192 191 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); 193 192 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 194 193 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 194 + ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 195 195 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 196 196 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 197 197 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+1
drivers/firmware/dmi_scan.c
··· 430 430 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 431 431 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 432 432 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); 433 + dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); 433 434 break; 434 435 case 2: /* Base Board Information */ 435 436 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+3
drivers/firmware/efi/efi-bgrt.c
··· 36 36 if (acpi_disabled) 37 37 return; 38 38 39 + if (!efi_enabled(EFI_BOOT)) 40 + return; 41 + 39 42 if (table->length < sizeof(bgrt_tab)) { 40 43 pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", 41 44 table->length, sizeof(bgrt_tab));
+2 -2
drivers/firmware/efi/libstub/secureboot.c
··· 16 16 17 17 /* BIOS variables */ 18 18 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; 19 - static const efi_char16_t const efi_SecureBoot_name[] = { 19 + static const efi_char16_t efi_SecureBoot_name[] = { 20 20 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 21 21 }; 22 - static const efi_char16_t const efi_SetupMode_name[] = { 22 + static const efi_char16_t efi_SetupMode_name[] = { 23 23 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 24 24 }; 25 25
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 220 220 } 221 221 222 222 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { 223 - amdgpu_vram_mgr_init, 224 - amdgpu_vram_mgr_fini, 225 - amdgpu_vram_mgr_new, 226 - amdgpu_vram_mgr_del, 227 - amdgpu_vram_mgr_debug 223 + .init = amdgpu_vram_mgr_init, 224 + .takedown = amdgpu_vram_mgr_fini, 225 + .get_node = amdgpu_vram_mgr_new, 226 + .put_node = amdgpu_vram_mgr_del, 227 + .debug = amdgpu_vram_mgr_debug 228 228 };
+68 -27
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 77 77 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 78 78 { 79 79 struct amdgpu_device *adev = ring->adev; 80 + u32 v; 81 + 82 + mutex_lock(&adev->grbm_idx_mutex); 83 + if (adev->vce.harvest_config == 0 || 84 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 85 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 86 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 87 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 80 88 81 89 if (ring == &adev->vce.ring[0]) 82 - return RREG32(mmVCE_RB_RPTR); 90 + v = RREG32(mmVCE_RB_RPTR); 83 91 else if (ring == &adev->vce.ring[1]) 84 - return RREG32(mmVCE_RB_RPTR2); 92 + v = RREG32(mmVCE_RB_RPTR2); 85 93 else 86 - return RREG32(mmVCE_RB_RPTR3); 94 + v = RREG32(mmVCE_RB_RPTR3); 95 + 96 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 97 + mutex_unlock(&adev->grbm_idx_mutex); 98 + 99 + return v; 87 100 } 88 101 89 102 /** ··· 109 96 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 110 97 { 111 98 struct amdgpu_device *adev = ring->adev; 99 + u32 v; 100 + 101 + mutex_lock(&adev->grbm_idx_mutex); 102 + if (adev->vce.harvest_config == 0 || 103 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 104 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 105 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 106 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 112 107 113 108 if (ring == &adev->vce.ring[0]) 114 - return RREG32(mmVCE_RB_WPTR); 109 + v = RREG32(mmVCE_RB_WPTR); 115 110 else if (ring == &adev->vce.ring[1]) 116 - return RREG32(mmVCE_RB_WPTR2); 111 + v = RREG32(mmVCE_RB_WPTR2); 117 112 else 118 - return RREG32(mmVCE_RB_WPTR3); 113 + v = RREG32(mmVCE_RB_WPTR3); 114 + 115 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 116 + mutex_unlock(&adev->grbm_idx_mutex); 117 + 118 + return v; 119 119 } 120 120 121 121 /** ··· 142 116 { 143 117 struct amdgpu_device *adev = ring->adev; 144 118 119 + mutex_lock(&adev->grbm_idx_mutex); 120 + if (adev->vce.harvest_config == 0 || 121 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 122 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 123 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 124 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 125 + 145 126 if (ring == &adev->vce.ring[0]) 146 127 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 147 128 else if (ring == &adev->vce.ring[1]) 148 129 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 149 130 else 150 131 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 132 + 133 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 134 + mutex_unlock(&adev->grbm_idx_mutex); 151 135 } 152 136 153 137 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) ··· 267 231 struct amdgpu_ring *ring; 268 232 int idx, r; 269 233 270 - ring = &adev->vce.ring[0]; 271 - WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 272 - WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 273 - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 274 - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 275 - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 276 - 277 - ring = &adev->vce.ring[1]; 278 - WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 279 - WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 280 - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 281 - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 282 - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 283 - 284 - ring = &adev->vce.ring[2]; 285 - WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 286 - WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 287 - WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 288 - WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 289 - WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 290 - 291 234 mutex_lock(&adev->grbm_idx_mutex); 292 235 for (idx = 0; idx < 2; ++idx) { 293 236 if (adev->vce.harvest_config & (1 << idx)) 294 237 continue; 295 238 296 239 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 240 + 241 + /* Program instance 0 reg space for two instances or instance 0 case 242 + program instance 1 reg space for only instance 1 available case */ 243 + if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 244 + ring = &adev->vce.ring[0]; 245 + WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 246 + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 247 + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 248 + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 249 + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 250 + 251 + ring = &adev->vce.ring[1]; 252 + WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 253 + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 254 + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 255 + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 256 + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 257 + 258 + ring = &adev->vce.ring[2]; 259 + WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 260 + WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 261 + WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 262 + WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 263 + WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 264 + } 265 + 297 266 vce_v3_0_mc_resume(adev, idx); 298 267 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 299 268
+10 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
··· 709 709 710 710 static struct phm_master_table_item 711 711 vega10_thermal_start_thermal_controller_master_list[] = { 712 - {NULL, tf_vega10_thermal_initialize}, 713 - {NULL, tf_vega10_thermal_set_temperature_range}, 714 - {NULL, tf_vega10_thermal_enable_alert}, 712 + { .tableFunction = tf_vega10_thermal_initialize }, 713 + { .tableFunction = tf_vega10_thermal_set_temperature_range }, 714 + { .tableFunction = tf_vega10_thermal_enable_alert }, 715 715 /* We should restrict performance levels to low before we halt the SMC. 716 716 * On the other hand we are still in boot state when we do this 717 717 * so it would be pointless. 718 718 * If this assumption changes we have to revisit this table. 719 719 */ 720 - {NULL, tf_vega10_thermal_setup_fan_table}, 721 - {NULL, tf_vega10_thermal_start_smc_fan_control}, 722 - {NULL, NULL} 720 + { .tableFunction = tf_vega10_thermal_setup_fan_table }, 721 + { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, 722 + { } 723 723 }; 724 724 725 725 static struct phm_master_table_header ··· 731 731 732 732 static struct phm_master_table_item 733 733 vega10_thermal_set_temperature_range_master_list[] = { 734 - {NULL, tf_vega10_thermal_disable_alert}, 735 - {NULL, tf_vega10_thermal_set_temperature_range}, 736 - {NULL, tf_vega10_thermal_enable_alert}, 737 - {NULL, NULL} 734 + { .tableFunction = tf_vega10_thermal_disable_alert }, 735 + { .tableFunction = tf_vega10_thermal_set_temperature_range }, 736 + { .tableFunction = tf_vega10_thermal_enable_alert }, 737 + { } 738 738 }; 739 739 740 740 struct phm_master_table_header
+83
drivers/gpu/drm/drm_dp_helper.c
··· 1208 1208 return 0; 1209 1209 } 1210 1210 EXPORT_SYMBOL(drm_dp_stop_crc); 1211 + 1212 + struct dpcd_quirk { 1213 + u8 oui[3]; 1214 + bool is_branch; 1215 + u32 quirks; 1216 + }; 1217 + 1218 + #define OUI(first, second, third) { (first), (second), (third) } 1219 + 1220 + static const struct dpcd_quirk dpcd_quirk_list[] = { 1221 + /* Analogix 7737 needs reduced M and N at HBR2 link rates */ 1222 + { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) }, 1223 + }; 1224 + 1225 + #undef OUI 1226 + 1227 + /* 1228 + * Get a bit mask of DPCD quirks for the sink/branch device identified by 1229 + * ident. The quirk data is shared but it's up to the drivers to act on the 1230 + * data. 1231 + * 1232 + * For now, only the OUI (first three bytes) is used, but this may be extended 1233 + * to device identification string and hardware/firmware revisions later. 1234 + */ 1235 + static u32 1236 + drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) 1237 + { 1238 + const struct dpcd_quirk *quirk; 1239 + u32 quirks = 0; 1240 + int i; 1241 + 1242 + for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { 1243 + quirk = &dpcd_quirk_list[i]; 1244 + 1245 + if (quirk->is_branch != is_branch) 1246 + continue; 1247 + 1248 + if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) 1249 + continue; 1250 + 1251 + quirks |= quirk->quirks; 1252 + } 1253 + 1254 + return quirks; 1255 + } 1256 + 1257 + /** 1258 + * drm_dp_read_desc - read sink/branch descriptor from DPCD 1259 + * @aux: DisplayPort AUX channel 1260 + * @desc: Device decriptor to fill from DPCD 1261 + * @is_branch: true for branch devices, false for sink devices 1262 + * 1263 + * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the 1264 + * identification. 1265 + * 1266 + * Returns 0 on success or a negative error code on failure. 1267 + */ 1268 + int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, 1269 + bool is_branch) 1270 + { 1271 + struct drm_dp_dpcd_ident *ident = &desc->ident; 1272 + unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; 1273 + int ret, dev_id_len; 1274 + 1275 + ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); 1276 + if (ret < 0) 1277 + return ret; 1278 + 1279 + desc->quirks = drm_dp_get_quirks(ident, is_branch); 1280 + 1281 + dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); 1282 + 1283 + DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", 1284 + is_branch ? "branch" : "sink", 1285 + (int)sizeof(ident->oui), ident->oui, 1286 + dev_id_len, ident->device_id, 1287 + ident->hw_rev >> 4, ident->hw_rev & 0xf, 1288 + ident->sw_major_rev, ident->sw_minor_rev, 1289 + desc->quirks); 1290 + 1291 + return 0; 1292 + } 1293 + EXPORT_SYMBOL(drm_dp_read_desc);
+1 -7
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 82 82 return ret; 83 83 } 84 84 85 - static void exynos_drm_preclose(struct drm_device *dev, 86 - struct drm_file *file) 87 - { 88 - exynos_drm_subdrv_close(dev, file); 89 - } 90 - 91 85 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 92 86 { 87 + exynos_drm_subdrv_close(dev, file); 93 88 kfree(file->driver_priv); 94 89 file->driver_priv = NULL; 95 90 } ··· 140 145 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 141 146 | DRIVER_ATOMIC | DRIVER_RENDER, 142 147 .open = exynos_drm_open, 143 - .preclose = exynos_drm_preclose, 144 148 .lastclose = exynos_drm_lastclose, 145 149 .postclose = exynos_drm_postclose, 146 150 .gem_free_object_unlocked = exynos_drm_gem_free_object,
+1 -4
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 160 160 * drm framework doesn't support multiple irq yet. 161 161 * we can refer to the crtc to current hardware interrupt occurred through 162 162 * this pipe value. 163 - * @enabled: if the crtc is enabled or not 164 - * @event: vblank event that is currently queued for flip 165 - * @wait_update: wait all pending planes updates to finish 166 - * @pending_update: number of pending plane updates in this crtc 167 163 * @ops: pointer to callbacks for exynos drm specific functionality 168 164 * @ctx: A pointer to the crtc's implementation specific context 165 + * @pipe_clk: A pointer to the crtc's pipeline clock. 169 166 */ 170 167 struct exynos_drm_crtc { 171 168 struct drm_crtc base;
+9 -17
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1633 1633 { 1634 1634 struct device *dev = dsi->dev; 1635 1635 struct device_node *node = dev->of_node; 1636 - struct device_node *ep; 1637 1636 int ret; 1638 1637 1639 1638 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", ··· 1640 1641 if (ret < 0) 1641 1642 return ret; 1642 1643 1643 - ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); 1644 - if (!ep) { 1645 - dev_err(dev, "no output port with endpoint specified\n"); 1646 - return -EINVAL; 1647 - } 1648 - 1649 - ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", 1644 + ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", 1650 1645 &dsi->burst_clk_rate); 1651 1646 if (ret < 0) 1652 - goto end; 1647 + return ret; 1653 1648 1654 - ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", 1649 + ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", 1655 1650 &dsi->esc_clk_rate); 1656 1651 if (ret < 0) 1657 - goto end; 1658 - 1659 - of_node_put(ep); 1652 + return ret; 1660 1653 1661 1654 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); 1662 1655 if (!dsi->bridge_node) 1663 1656 return -EINVAL; 1664 1657 1665 - end: 1666 - of_node_put(ep); 1667 - 1668 - return ret; 1658 + return 0; 1669 1659 } 1670 1660 1671 1661 static int exynos_dsi_bind(struct device *dev, struct device *master, ··· 1805 1817 1806 1818 static int exynos_dsi_remove(struct platform_device *pdev) 1807 1819 { 1820 + struct exynos_dsi *dsi = platform_get_drvdata(pdev); 1821 + 1822 + of_node_put(dsi->bridge_node); 1823 + 1808 1824 pm_runtime_disable(&pdev->dev); 1809 1825 1810 1826 component_del(&pdev->dev, &exynos_dsi_component_ops);
+20 -10
drivers/gpu/drm/i915/gvt/execlist.c
··· 779 779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 780 } 781 781 782 + static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) 783 + { 784 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 785 + struct intel_engine_cs *engine; 786 + struct intel_vgpu_workload *pos, *n; 787 + unsigned int tmp; 788 + 789 + /* free the unsubmited workloads in the queues. */ 790 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 791 + list_for_each_entry_safe(pos, n, 792 + &vgpu->workload_q_head[engine->id], list) { 793 + list_del_init(&pos->list); 794 + free_workload(pos); 795 + } 796 + } 797 + } 798 + 782 799 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) 783 800 { 801 + clean_workloads(vgpu, ALL_ENGINES); 784 802 kmem_cache_destroy(vgpu->workloads); 785 803 } 786 804 ··· 829 811 { 830 812 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 831 813 struct intel_engine_cs *engine; 832 - struct intel_vgpu_workload *pos, *n; 833 814 unsigned int tmp; 834 815 835 - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 836 - /* free the unsubmited workload in the queue */ 837 - list_for_each_entry_safe(pos, n, 838 - &vgpu->workload_q_head[engine->id], list) { 839 - list_del_init(&pos->list); 840 - free_workload(pos); 841 - } 842 - 816 + clean_workloads(vgpu, engine_mask); 817 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 843 818 init_vgpu_execlist(vgpu, engine->id); 844 - } 845 819 }
+21 -9
drivers/gpu/drm/i915/gvt/handlers.c
··· 1366 1366 void *p_data, unsigned int bytes) 1367 1367 { 1368 1368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1369 - i915_reg_t reg = {.reg = offset}; 1369 + u32 v = *(u32 *)p_data; 1370 + 1371 + if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 1372 + return intel_vgpu_default_mmio_write(vgpu, 1373 + offset, p_data, bytes); 1370 1374 1371 1375 switch (offset) { 1372 1376 case 0x4ddc: 1373 - vgpu_vreg(vgpu, offset) = 0x8000003c; 1374 - /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ 1375 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1377 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1378 + vgpu_vreg(vgpu, offset) = v & ~(1 << 31); 1376 1379 break; 1377 1380 case 0x42080: 1378 - vgpu_vreg(vgpu, offset) = 0x8000; 1379 - /* WaCompressedResourceDisplayNewHashMode:skl */ 1380 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1381 + /* bypass WaCompressedResourceDisplayNewHashMode */ 1382 + vgpu_vreg(vgpu, offset) = v & ~(1 << 15); 1383 + break; 1384 + case 0xe194: 1385 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1386 + vgpu_vreg(vgpu, offset) = v & ~(1 << 8); 1387 + break; 1388 + case 0x7014: 1389 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1390 + vgpu_vreg(vgpu, offset) = v & ~(1 << 13); 1381 1391 break; 1382 1392 default: 1383 1393 return -EINVAL; ··· 1644 1634 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1645 1635 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1646 1636 NULL, NULL); 1647 - MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1637 + MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1638 + skl_misc_ctl_write); 1648 1639 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1649 1640 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1650 1641 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); ··· 2579 2568 MMIO_D(0x6e570, D_BDW_PLUS); 2580 2569 MMIO_D(0x65f10, D_BDW_PLUS); 2581 2570 2582 - MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2571 + MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2572 + skl_misc_ctl_write); 2583 2573 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2584 2574 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2585 2575 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-4
drivers/gpu/drm/i915/i915_drv.c
··· 1272 1272 1273 1273 dev_priv->ipc_enabled = false; 1274 1274 1275 - /* Everything is in place, we can now relax! */ 1276 - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1277 - driver.name, driver.major, driver.minor, driver.patchlevel, 1278 - driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1279 1275 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1280 1276 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1281 1277 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+2 -1
drivers/gpu/drm/i915/i915_drv.h
··· 562 562 563 563 void intel_link_compute_m_n(int bpp, int nlanes, 564 564 int pixel_clock, int link_clock, 565 - struct intel_link_m_n *m_n); 565 + struct intel_link_m_n *m_n, 566 + bool reduce_m_n); 566 567 567 568 /* Interface history: 568 569 *
+1 -1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2313 2313 appgtt->base.allocate_va_range) { 2314 2314 ret = appgtt->base.allocate_va_range(&appgtt->base, 2315 2315 vma->node.start, 2316 - vma->node.size); 2316 + vma->size); 2317 2317 if (ret) 2318 2318 goto err_pages; 2319 2319 }
-5
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 59 59 return; 60 60 61 61 mutex_unlock(&dev->struct_mutex); 62 - 63 - /* expedite the RCU grace period to free some request slabs */ 64 - synchronize_rcu_expedited(); 65 62 } 66 63 67 64 static bool any_vma_pinned(struct drm_i915_gem_object *obj) ··· 270 273 I915_SHRINK_UNBOUND | 271 274 I915_SHRINK_ACTIVE); 272 275 intel_runtime_pm_put(dev_priv); 273 - 274 - synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ 275 276 276 277 return freed; 277 278 }
+6 -9
drivers/gpu/drm/i915/i915_irq.c
··· 2953 2953 u32 pipestat_mask; 2954 2954 u32 enable_mask; 2955 2955 enum pipe pipe; 2956 - u32 val; 2957 2956 2958 2957 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2959 2958 PIPE_CRC_DONE_INTERRUPT_STATUS; ··· 2963 2964 2964 2965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2965 2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2966 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2967 + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2968 + I915_LPE_PIPE_A_INTERRUPT | 2969 + I915_LPE_PIPE_B_INTERRUPT; 2970 + 2967 2971 if (IS_CHERRYVIEW(dev_priv)) 2968 - enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2972 + enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2973 + I915_LPE_PIPE_C_INTERRUPT; 2969 2974 2970 2975 WARN_ON(dev_priv->irq_mask != ~0); 2971 - 2972 - val = (I915_LPE_PIPE_A_INTERRUPT | 2973 - I915_LPE_PIPE_B_INTERRUPT | 2974 - I915_LPE_PIPE_C_INTERRUPT); 2975 - 2976 - enable_mask |= val; 2977 2976 2978 2977 dev_priv->irq_mask = ~enable_mask; 2979 2978
+1 -1
drivers/gpu/drm/i915/i915_reg.h
··· 8280 8280 8281 8281 /* MIPI DSI registers */ 8282 8282 8283 - #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ 8283 + #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ 8284 8284 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 8285 8285 8286 8286 #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
+14 -8
drivers/gpu/drm/i915/intel_display.c
··· 6101 6101 pipe_config->fdi_lanes = lane; 6102 6102 6103 6103 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6104 - link_bw, &pipe_config->fdi_m_n); 6104 + link_bw, &pipe_config->fdi_m_n, false); 6105 6105 6106 6106 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6107 6107 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { ··· 6277 6277 } 6278 6278 6279 6279 static void compute_m_n(unsigned int m, unsigned int n, 6280 - uint32_t *ret_m, uint32_t *ret_n) 6280 + uint32_t *ret_m, uint32_t *ret_n, 6281 + bool reduce_m_n) 6281 6282 { 6282 6283 /* 6283 6284 * Reduce M/N as much as possible without loss in precision. Several DP ··· 6286 6285 * values. The passed in values are more likely to have the least 6287 6286 * significant bits zero than M after rounding below, so do this first. 6288 6287 */ 6289 - while ((m & 1) == 0 && (n & 1) == 0) { 6290 - m >>= 1; 6291 - n >>= 1; 6288 + if (reduce_m_n) { 6289 + while ((m & 1) == 0 && (n & 1) == 0) { 6290 + m >>= 1; 6291 + n >>= 1; 6292 + } 6292 6293 } 6293 6294 6294 6295 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); ··· 6301 6298 void 6302 6299 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6303 6300 int pixel_clock, int link_clock, 6304 - struct intel_link_m_n *m_n) 6301 + struct intel_link_m_n *m_n, 6302 + bool reduce_m_n) 6305 6303 { 6306 6304 m_n->tu = 64; 6307 6305 6308 6306 compute_m_n(bits_per_pixel * pixel_clock, 6309 6307 link_clock * nlanes * 8, 6310 - &m_n->gmch_m, &m_n->gmch_n); 6308 + &m_n->gmch_m, &m_n->gmch_n, 6309 + reduce_m_n); 6311 6310 6312 6311 compute_m_n(pixel_clock, link_clock, 6313 - &m_n->link_m, &m_n->link_n); 6312 + &m_n->link_m, &m_n->link_n, 6313 + reduce_m_n); 6314 6314 } 6315 6315 6316 6316 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+10 -35
drivers/gpu/drm/i915/intel_dp.c
··· 1507 1507 DRM_DEBUG_KMS("common rates: %s\n", str); 1508 1508 } 1509 1509 1510 - bool 1511 - __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc) 1512 - { 1513 - u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI : 1514 - DP_SINK_OUI; 1515 - 1516 - return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) == 1517 - sizeof(*desc); 1518 - } 1519 - 1520 - bool intel_dp_read_desc(struct intel_dp *intel_dp) 1521 - { 1522 - struct intel_dp_desc *desc = &intel_dp->desc; 1523 - bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & 1524 - DP_OUI_SUPPORT; 1525 - int dev_id_len; 1526 - 1527 - if (!__intel_dp_read_desc(intel_dp, desc)) 1528 - return false; 1529 - 1530 - dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id)); 1531 - DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", 1532 - drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink", 1533 - (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)", 1534 - dev_id_len, desc->device_id, 1535 - desc->hw_rev >> 4, desc->hw_rev & 0xf, 1536 - desc->sw_major_rev, desc->sw_minor_rev); 1537 - 1538 - return true; 1539 - } 1540 - 1541 1510 static int rate_to_index(int find, const int *rates) 1542 1511 { 1543 1512 int i = 0; ··· 1593 1624 int common_rates[DP_MAX_SUPPORTED_RATES] = {}; 1594 1625 int common_len; 1595 1626 uint8_t link_bw, rate_select; 1627 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 1628 + DP_DPCD_QUIRK_LIMITED_M_N); 1596 1629 1597 1630 common_len = intel_dp_common_rates(intel_dp, common_rates); 1598 1631 ··· 1724 1753 intel_link_compute_m_n(bpp, lane_count, 1725 1754 adjusted_mode->crtc_clock, 1726 1755 pipe_config->port_clock, 1727 - &pipe_config->dp_m_n); 1756 + &pipe_config->dp_m_n, 1757 + reduce_m_n); 1728 1758 1729 1759 if (intel_connector->panel.downclock_mode != NULL && 1730 1760 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { ··· 1733 1761 intel_link_compute_m_n(bpp, lane_count, 1734 1762 intel_connector->panel.downclock_mode->clock, 1735 1763 pipe_config->port_clock, 1736 - &pipe_config->dp_m2_n2); 1764 + &pipe_config->dp_m2_n2, 1765 + reduce_m_n); 1737 1766 } 1738 1767 1739 1768 /* ··· 3595 3622 if (!intel_dp_read_dpcd(intel_dp)) 3596 3623 return false; 3597 3624 3598 - intel_dp_read_desc(intel_dp); 3625 + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3626 + drm_dp_is_branch(intel_dp->dpcd)); 3599 3627 3600 3628 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3601 3629 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & ··· 4598 4624 4599 4625 intel_dp_print_rates(intel_dp); 4600 4626 4601 - intel_dp_read_desc(intel_dp); 4627 + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4628 + drm_dp_is_branch(intel_dp->dpcd)); 4602 4629 4603 4630 intel_dp_configure_mst(intel_dp); 4604 4631
+4 -1
drivers/gpu/drm/i915/intel_dp_mst.c
··· 44 44 int lane_count, slots; 45 45 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 46 46 int mst_pbn; 47 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 48 + DP_DPCD_QUIRK_LIMITED_M_N); 47 49 48 50 pipe_config->has_pch_encoder = false; 49 51 bpp = 24; ··· 77 75 intel_link_compute_m_n(bpp, lane_count, 78 76 adjusted_mode->crtc_clock, 79 77 pipe_config->port_clock, 80 - &pipe_config->dp_m_n); 78 + &pipe_config->dp_m_n, 79 + reduce_m_n); 81 80 82 81 pipe_config->dp_m_n.tu = slots; 83 82
+1 -12
drivers/gpu/drm/i915/intel_drv.h
··· 906 906 M2_N2 907 907 }; 908 908 909 - struct intel_dp_desc { 910 - u8 oui[3]; 911 - u8 device_id[6]; 912 - u8 hw_rev; 913 - u8 sw_major_rev; 914 - u8 sw_minor_rev; 915 - } __packed; 916 - 917 909 struct intel_dp_compliance_data { 918 910 unsigned long edid; 919 911 uint8_t video_pattern; ··· 949 957 /* Max link BW for the sink as per DPCD registers */ 950 958 int max_sink_link_bw; 951 959 /* sink or branch descriptor */ 952 - struct intel_dp_desc desc; 960 + struct drm_dp_desc desc; 953 961 struct drm_dp_aux aux; 954 962 enum intel_display_power_domain aux_power_domain; 955 963 uint8_t train_set[4]; ··· 1524 1532 } 1525 1533 1526 1534 bool intel_dp_read_dpcd(struct intel_dp *intel_dp); 1527 - bool __intel_dp_read_desc(struct intel_dp *intel_dp, 1528 - struct intel_dp_desc *desc); 1529 - bool intel_dp_read_desc(struct intel_dp *intel_dp); 1530 1535 int intel_dp_link_required(int pixel_clock, int bpp); 1531 1536 int intel_dp_max_data_rate(int max_link_clock, int max_lanes); 1532 1537 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-36
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 149 149 150 150 static void lpe_audio_irq_unmask(struct irq_data *d) 151 151 { 152 - struct drm_i915_private *dev_priv = d->chip_data; 153 - unsigned long irqflags; 154 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 155 - I915_LPE_PIPE_B_INTERRUPT); 156 - 157 - if (IS_CHERRYVIEW(dev_priv)) 158 - val |= I915_LPE_PIPE_C_INTERRUPT; 159 - 160 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 161 - 162 - dev_priv->irq_mask &= ~val; 163 - I915_WRITE(VLV_IIR, val); 164 - I915_WRITE(VLV_IIR, val); 165 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 166 - POSTING_READ(VLV_IMR); 167 - 168 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 169 152 } 170 153 171 154 static void lpe_audio_irq_mask(struct irq_data *d) 172 155 { 173 - struct drm_i915_private *dev_priv = d->chip_data; 174 - unsigned long irqflags; 175 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 176 - I915_LPE_PIPE_B_INTERRUPT); 177 - 178 - if (IS_CHERRYVIEW(dev_priv)) 179 - val |= I915_LPE_PIPE_C_INTERRUPT; 180 - 181 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 182 - 183 - dev_priv->irq_mask |= val; 184 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 185 - I915_WRITE(VLV_IIR, val); 186 - I915_WRITE(VLV_IIR, val); 187 - POSTING_READ(VLV_IIR); 188 - 189 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 190 156 } 191 157 192 158 static struct irq_chip lpe_audio_irqchip = { ··· 295 329 return; 296 330 297 331 desc = irq_to_desc(dev_priv->lpe_audio.irq); 298 - 299 - lpe_audio_irq_mask(&desc->irq_data); 300 332 301 333 lpe_audio_platdev_destroy(dev_priv); 302 334
+1 -1
drivers/gpu/drm/i915/intel_lrc.c
··· 1989 1989 1990 1990 ce->ring = ring; 1991 1991 ce->state = vma; 1992 - ce->initialised = engine->init_context == NULL; 1992 + ce->initialised |= engine->init_context == NULL; 1993 1993 1994 1994 return 0; 1995 1995
+1 -1
drivers/gpu/drm/i915/intel_lspcon.c
··· 240 240 return false; 241 241 } 242 242 243 - intel_dp_read_desc(dp); 243 + drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); 244 244 245 245 DRM_DEBUG_KMS("Success: LSPCON init\n"); 246 246 return true;
+5 -3
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 320 320 static int igt_ctx_exec(void *arg) 321 321 { 322 322 struct drm_i915_private *i915 = arg; 323 - struct drm_i915_gem_object *obj; 323 + struct drm_i915_gem_object *obj = NULL; 324 324 struct drm_file *file; 325 325 IGT_TIMEOUT(end_time); 326 326 LIST_HEAD(objects); ··· 359 359 } 360 360 361 361 for_each_engine(engine, i915, id) { 362 - if (dw == 0) { 362 + if (!obj) { 363 363 obj = create_test_object(ctx, file, &objects); 364 364 if (IS_ERR(obj)) { 365 365 err = PTR_ERR(obj); ··· 376 376 goto out_unlock; 377 377 } 378 378 379 - if (++dw == max_dwords(obj)) 379 + if (++dw == max_dwords(obj)) { 380 + obj = NULL; 380 381 dw = 0; 382 + } 381 383 ndwords++; 382 384 } 383 385 ncontexts++;
+1
drivers/gpu/drm/msm/Kconfig
··· 13 13 select QCOM_SCM 14 14 select SND_SOC_HDMI_CODEC if SND_SOC 15 15 select SYNC_FILE 16 + select PM_OPP 16 17 default y 17 18 help 18 19 DRM/KMS driver for MSM/snapdragon.
+1 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
··· 116 116 return 0; 117 117 } 118 118 119 - static struct irq_domain_ops mdss_hw_irqdomain_ops = { 119 + static const struct irq_domain_ops mdss_hw_irqdomain_ops = { 120 120 .map = mdss_hw_irqdomain_map, 121 121 .xlate = irq_domain_xlate_onecell, 122 122 };
+7 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 225 225 226 226 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), 227 227 sizeof(*mdp5_state), GFP_KERNEL); 228 + if (!mdp5_state) 229 + return NULL; 228 230 229 - if (mdp5_state && mdp5_state->base.fb) 230 - drm_framebuffer_reference(mdp5_state->base.fb); 231 + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); 231 232 232 233 return &mdp5_state->base; 233 234 } ··· 445 444 mdp5_pipe_release(state->state, old_hwpipe); 446 445 mdp5_pipe_release(state->state, old_right_hwpipe); 447 446 } 447 + } else { 448 + mdp5_pipe_release(state->state, mdp5_state->hwpipe); 449 + mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); 450 + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; 448 451 } 449 452 450 453 return 0;
+1
drivers/gpu/drm/msm/msm_drv.c
··· 830 830 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 831 831 .gem_prime_export = drm_gem_prime_export, 832 832 .gem_prime_import = drm_gem_prime_import, 833 + .gem_prime_res_obj = msm_gem_prime_res_obj, 833 834 .gem_prime_pin = msm_gem_prime_pin, 834 835 .gem_prime_unpin = msm_gem_prime_unpin, 835 836 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+1
drivers/gpu/drm/msm/msm_drv.h
··· 224 224 void *msm_gem_prime_vmap(struct drm_gem_object *obj); 225 225 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 226 226 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 227 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); 227 228 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 228 229 struct dma_buf_attachment *attach, struct sg_table *sg); 229 230 int msm_gem_prime_pin(struct drm_gem_object *obj);
+2 -8
drivers/gpu/drm/msm/msm_fence.c
··· 99 99 } 100 100 101 101 struct msm_fence { 102 - struct msm_fence_context *fctx; 103 102 struct dma_fence base; 103 + struct msm_fence_context *fctx; 104 104 }; 105 105 106 106 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) ··· 130 130 return fence_completed(f->fctx, f->base.seqno); 131 131 } 132 132 133 - static void msm_fence_release(struct dma_fence *fence) 134 - { 135 - struct msm_fence *f = to_msm_fence(fence); 136 - kfree_rcu(f, base.rcu); 137 - } 138 - 139 133 static const struct dma_fence_ops msm_fence_ops = { 140 134 .get_driver_name = msm_fence_get_driver_name, 141 135 .get_timeline_name = msm_fence_get_timeline_name, 142 136 .enable_signaling = msm_fence_enable_signaling, 143 137 .signaled = msm_fence_signaled, 144 138 .wait = dma_fence_default_wait, 145 - .release = msm_fence_release, 139 + .release = dma_fence_free, 146 140 }; 147 141 148 142 struct dma_fence *
+6
drivers/gpu/drm/msm/msm_gem.c
··· 758 758 struct msm_gem_object *msm_obj; 759 759 bool use_vram = false; 760 760 761 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 762 + 761 763 switch (flags & MSM_BO_CACHE_MASK) { 762 764 case MSM_BO_UNCACHED: 763 765 case MSM_BO_CACHED: ··· 855 853 856 854 size = PAGE_ALIGN(dmabuf->size); 857 855 856 + /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 857 + mutex_lock(&dev->struct_mutex); 858 858 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 859 + mutex_unlock(&dev->struct_mutex); 860 + 859 861 if (ret) 860 862 goto fail; 861 863
+7
drivers/gpu/drm/msm/msm_gem_prime.c
··· 70 70 if (!obj->import_attach) 71 71 msm_gem_put_pages(obj); 72 72 } 73 + 74 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) 75 + { 76 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 77 + 78 + return msm_obj->resv; 79 + }
+7 -7
drivers/gpu/drm/msm/msm_gem_submit.c
··· 410 410 if (!in_fence) 411 411 return -EINVAL; 412 412 413 - /* TODO if we get an array-fence due to userspace merging multiple 414 - * fences, we need a way to determine if all the backing fences 415 - * are from our own context.. 413 + /* 414 + * Wait if the fence is from a foreign context, or if the fence 415 + * array contains any fence from a foreign context. 416 416 */ 417 - 418 - if (in_fence->context != gpu->fctx->context) { 417 + if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { 419 418 ret = dma_fence_wait(in_fence, true); 420 419 if (ret) 421 420 return ret; ··· 495 496 goto out; 496 497 } 497 498 498 - if ((submit_cmd.size + submit_cmd.submit_offset) >= 499 - msm_obj->base.size) { 499 + if (!submit_cmd.size || 500 + ((submit_cmd.size + submit_cmd.submit_offset) > 501 + msm_obj->base.size)) { 500 502 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); 501 503 ret = -EINVAL; 502 504 goto out;
+2 -2
drivers/gpu/drm/msm/msm_gpu.c
··· 549 549 gpu->grp_clks[i] = get_clock(dev, name); 550 550 551 551 /* Remember the key clocks that we need to control later */ 552 - if (!strcmp(name, "core")) 552 + if (!strcmp(name, "core") || !strcmp(name, "core_clk")) 553 553 gpu->core_clk = gpu->grp_clks[i]; 554 - else if (!strcmp(name, "rbbmtimer")) 554 + else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) 555 555 gpu->rbbmtimer_clk = gpu->grp_clks[i]; 556 556 557 557 ++i;
+4 -2
drivers/hid/Kconfig
··· 275 275 - Trio Linker Plus II 276 276 277 277 config HID_ELECOM 278 - tristate "ELECOM BM084 bluetooth mouse" 278 + tristate "ELECOM HID devices" 279 279 depends on HID 280 280 ---help--- 281 - Support for the ELECOM BM084 (bluetooth mouse). 281 + Support for ELECOM devices: 282 + - BM084 Bluetooth Mouse 283 + - DEFT Trackball (Wired and wireless) 282 284 283 285 config HID_ELO 284 286 tristate "ELO USB 4000/4500 touchscreen"
+12
drivers/hid/hid-asus.c
··· 69 69 #define QUIRK_IS_MULTITOUCH BIT(3) 70 70 #define QUIRK_NO_CONSUMER_USAGES BIT(4) 71 71 #define QUIRK_USE_KBD_BACKLIGHT BIT(5) 72 + #define QUIRK_T100_KEYBOARD BIT(6) 72 73 73 74 #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ 74 75 QUIRK_NO_INIT_REPORTS | \ ··· 537 536 drvdata->kbd_backlight->removed = true; 538 537 cancel_work_sync(&drvdata->kbd_backlight->work); 539 538 } 539 + 540 + hid_hw_stop(hdev); 540 541 } 541 542 542 543 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, ··· 551 548 hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); 552 549 rdesc[55] = 0xdd; 553 550 } 551 + if (drvdata->quirks & QUIRK_T100_KEYBOARD && 552 + *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) { 553 + hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n"); 554 + rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT; 555 + } 556 + 554 557 return rdesc; 555 558 } 556 559 ··· 569 560 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 570 561 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 571 562 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, 563 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 564 + USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD), 565 + QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, 572 566 { } 573 567 }; 574 568 MODULE_DEVICE_TABLE(hid, asus_devices);
+3
drivers/hid/hid-core.c
··· 1855 1855 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, 1856 1856 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 1857 1857 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, 1858 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, 1858 1859 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1859 1860 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1860 1861 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, ··· 1892 1891 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, 1893 1892 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, 1894 1893 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1894 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 1895 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 1895 1896 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 1896 1897 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, 1897 1898 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+53 -9
drivers/hid/hid-elecom.c
··· 1 1 /* 2 - * HID driver for Elecom BM084 (bluetooth mouse). 3 - * Removes a non-existing horizontal wheel from 4 - * the HID descriptor. 5 - * (This module is based on "hid-ortek".) 6 - * 2 + * HID driver for ELECOM devices. 7 3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> 4 + * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> 5 + * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> 8 6 */ 9 7 10 8 /* ··· 21 23 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, 22 24 unsigned int *rsize) 23 25 { 24 - if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 25 - hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); 26 - rdesc[47] = 0x00; 26 + switch (hdev->product) { 27 + case USB_DEVICE_ID_ELECOM_BM084: 28 + /* The BM084 Bluetooth mouse includes a non-existing horizontal 29 + * wheel in the HID descriptor. */ 30 + if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 31 + hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); 32 + rdesc[47] = 0x00; 33 + } 34 + break; 35 + case USB_DEVICE_ID_ELECOM_DEFT_WIRED: 36 + case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: 37 + /* The DEFT trackball has eight buttons, but its descriptor only 38 + * reports five, disabling the three Fn buttons on the top of 39 + * the mouse. 40 + * 41 + * Apply the following diff to the descriptor: 42 + * 43 + * Collection (Physical), Collection (Physical), 44 + * Report ID (1), Report ID (1), 45 + * Report Count (5), -> Report Count (8), 46 + * Report Size (1), Report Size (1), 47 + * Usage Page (Button), Usage Page (Button), 48 + * Usage Minimum (01h), Usage Minimum (01h), 49 + * Usage Maximum (05h), -> Usage Maximum (08h), 50 + * Logical Minimum (0), Logical Minimum (0), 51 + * Logical Maximum (1), Logical Maximum (1), 52 + * Input (Variable), Input (Variable), 53 + * Report Count (1), -> Report Count (0), 54 + * Report Size (3), Report Size (3), 55 + * Input (Constant), Input (Constant), 56 + * Report Size (16), Report Size (16), 57 + * Report Count (2), Report Count (2), 58 + * Usage Page (Desktop), Usage Page (Desktop), 59 + * Usage (X), Usage (X), 60 + * Usage (Y), Usage (Y), 61 + * Logical Minimum (-32768), Logical Minimum (-32768), 62 + * Logical Maximum (32767), Logical Maximum (32767), 63 + * Input (Variable, Relative), Input (Variable, Relative), 64 + * End Collection, End Collection, 65 + */ 66 + if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { 67 + hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); 68 + rdesc[13] = 8; /* Button/Variable Report Count */ 69 + rdesc[21] = 8; /* Button/Variable Usage Maximum */ 70 + rdesc[29] = 0; /* Button/Constant Report Count */ 71 + } 72 + break; 27 73 } 28 74 return rdesc; 29 75 } 30 76 31 77 static const struct hid_device_id elecom_devices[] = { 32 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)}, 78 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 79 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 80 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 33 81 { } 34 82 }; 35 83 MODULE_DEVICE_TABLE(hid, elecom_devices);
+3
drivers/hid/hid-ids.h
··· 173 173 #define USB_VENDOR_ID_ASUSTEK 0x0b05 174 174 #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 175 175 #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b 176 + #define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0 176 177 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 177 178 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 178 179 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 ··· 359 358 360 359 #define USB_VENDOR_ID_ELECOM 0x056e 361 360 #define USB_DEVICE_ID_ELECOM_BM084 0x0061 361 + #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe 362 + #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff 362 363 363 364 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 364 365 #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
+8 -7
drivers/hid/hid-magicmouse.c
··· 349 349 350 350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 351 351 magicmouse_emit_buttons(msc, clicks & 3); 352 + input_mt_report_pointer_emulation(input, true); 352 353 input_report_rel(input, REL_X, x); 353 354 input_report_rel(input, REL_Y, y); 354 355 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ ··· 389 388 __clear_bit(BTN_RIGHT, input->keybit); 390 389 __clear_bit(BTN_MIDDLE, input->keybit); 391 390 __set_bit(BTN_MOUSE, input->keybit); 392 - __set_bit(BTN_TOOL_FINGER, input->keybit); 393 - __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 394 - __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); 395 - __set_bit(BTN_TOOL_QUADTAP, input->keybit); 396 - __set_bit(BTN_TOOL_QUINTTAP, input->keybit); 397 - __set_bit(BTN_TOUCH, input->keybit); 398 - __set_bit(INPUT_PROP_POINTER, input->propbit); 399 391 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 400 392 } 401 393 394 + __set_bit(BTN_TOOL_FINGER, input->keybit); 395 + __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 396 + __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); 397 + __set_bit(BTN_TOOL_QUADTAP, input->keybit); 398 + __set_bit(BTN_TOOL_QUINTTAP, input->keybit); 399 + __set_bit(BTN_TOUCH, input->keybit); 400 + __set_bit(INPUT_PROP_POINTER, input->propbit); 402 401 403 402 __set_bit(EV_ABS, input->evbit); 404 403
+13
drivers/hid/i2c-hid/i2c-hid.c
··· 897 897 return 0; 898 898 } 899 899 900 + static void i2c_hid_acpi_fix_up_power(struct device *dev) 901 + { 902 + acpi_handle handle = ACPI_HANDLE(dev); 903 + struct acpi_device *adev; 904 + 905 + if (handle && acpi_bus_get_device(handle, &adev) == 0) 906 + acpi_device_fix_up_power(adev); 907 + } 908 + 900 909 static const struct acpi_device_id i2c_hid_acpi_match[] = { 901 910 {"ACPI0C50", 0 }, 902 911 {"PNP0C50", 0 }, ··· 918 909 { 919 910 return -ENODEV; 920 911 } 912 + 913 + static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {} 921 914 #endif 922 915 923 916 #ifdef CONFIG_OF ··· 1040 1029 ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE); 1041 1030 if (ret < 0) 1042 1031 goto err_regulator; 1032 + 1033 + i2c_hid_acpi_fix_up_power(&client->dev); 1043 1034 1044 1035 pm_runtime_get_noresume(&client->dev); 1045 1036 pm_runtime_set_active(&client->dev);
+24 -23
drivers/hid/wacom_wac.c
··· 1571 1571 { 1572 1572 unsigned char *data = wacom->data; 1573 1573 1574 - if (wacom->pen_input) 1574 + if (wacom->pen_input) { 1575 1575 dev_dbg(wacom->pen_input->dev.parent, 1576 1576 "%s: received report #%d\n", __func__, data[0]); 1577 - else if (wacom->touch_input) 1577 + 1578 + if (len == WACOM_PKGLEN_PENABLED || 1579 + data[0] == WACOM_REPORT_PENABLED) 1580 + return wacom_tpc_pen(wacom); 1581 + } 1582 + else if (wacom->touch_input) { 1578 1583 dev_dbg(wacom->touch_input->dev.parent, 1579 1584 "%s: received report #%d\n", __func__, data[0]); 1580 1585 1581 - switch (len) { 1582 - case WACOM_PKGLEN_TPC1FG: 1583 - return wacom_tpc_single_touch(wacom, len); 1584 - 1585 - case WACOM_PKGLEN_TPC2FG: 1586 - return wacom_tpc_mt_touch(wacom); 1587 - 1588 - case WACOM_PKGLEN_PENABLED: 1589 - return wacom_tpc_pen(wacom); 1590 - 1591 - default: 1592 - switch (data[0]) { 1593 - case WACOM_REPORT_TPC1FG: 1594 - case WACOM_REPORT_TPCHID: 1595 - case WACOM_REPORT_TPCST: 1596 - case WACOM_REPORT_TPC1FGE: 1586 + switch (len) { 1587 + case WACOM_PKGLEN_TPC1FG: 1597 1588 return wacom_tpc_single_touch(wacom, len); 1598 1589 1599 - case WACOM_REPORT_TPCMT: 1600 - case WACOM_REPORT_TPCMT2: 1601 - return wacom_mt_touch(wacom); 1590 + case WACOM_PKGLEN_TPC2FG: 1591 + return wacom_tpc_mt_touch(wacom); 1602 1592 1603 - case WACOM_REPORT_PENABLED: 1604 - return wacom_tpc_pen(wacom); 1593 + default: 1594 + switch (data[0]) { 1595 + case WACOM_REPORT_TPC1FG: 1596 + case WACOM_REPORT_TPCHID: 1597 + case WACOM_REPORT_TPCST: 1598 + case WACOM_REPORT_TPC1FGE: 1599 + return wacom_tpc_single_touch(wacom, len); 1600 + 1601 + case WACOM_REPORT_TPCMT: 1602 + case WACOM_REPORT_TPCMT2: 1603 + return wacom_mt_touch(wacom); 1604 + 1605 + } 1605 1606 } 1606 1607 } 1607 1608
+1
drivers/hwmon/Kconfig
··· 343 343 344 344 config SENSORS_ASPEED 345 345 tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" 346 + select REGMAP 346 347 help 347 348 This driver provides support for ASPEED AST2400/AST2500 PWM 348 349 and Fan Tacho controllers.
+35 -30
drivers/hwmon/aspeed-pwm-tacho.c
··· 7 7 */ 8 8 9 9 #include <linux/clk.h> 10 + #include <linux/errno.h> 10 11 #include <linux/gpio/consumer.h> 11 12 #include <linux/delay.h> 12 13 #include <linux/hwmon.h> ··· 495 494 return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); 496 495 } 497 496 498 - static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, 497 + static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, 499 498 u8 fan_tach_ch) 500 499 { 501 500 u32 raw_data, tach_div, clk_source, sec, val; ··· 511 510 msleep(sec); 512 511 513 512 regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); 513 + if (!(val & RESULT_STATUS_MASK)) 514 + return -ETIMEDOUT; 515 + 514 516 raw_data = val & RESULT_VALUE_MASK; 515 517 tach_div = priv->type_fan_tach_clock_division[type]; 516 518 tach_div = 0x4 << (tach_div * 2); ··· 565 561 { 566 562 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); 567 563 int index = sensor_attr->index; 568 - u32 rpm; 564 + int rpm; 569 565 struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); 570 566 571 567 rpm = aspeed_get_fan_tach_ch_rpm(priv, index); 568 + if (rpm < 0) 569 + return rpm; 572 570 573 - return sprintf(buf, "%u\n", rpm); 571 + return sprintf(buf, "%d\n", rpm); 574 572 } 575 573 576 574 static umode_t pwm_is_visible(struct kobject *kobj, ··· 597 591 return a->mode; 598 592 } 599 593 600 - static SENSOR_DEVICE_ATTR(pwm0, 0644, 601 - show_pwm, set_pwm, 0); 602 594 static SENSOR_DEVICE_ATTR(pwm1, 0644, 603 - show_pwm, set_pwm, 1); 595 + show_pwm, set_pwm, 0); 604 596 static SENSOR_DEVICE_ATTR(pwm2, 0644, 605 - show_pwm, set_pwm, 2); 597 + show_pwm, set_pwm, 1); 606 598 static SENSOR_DEVICE_ATTR(pwm3, 0644, 607 - show_pwm, set_pwm, 3); 599 + show_pwm, set_pwm, 2); 608 600 static SENSOR_DEVICE_ATTR(pwm4, 0644, 609 - show_pwm, set_pwm, 4); 601 + show_pwm, set_pwm, 3); 610 602 static SENSOR_DEVICE_ATTR(pwm5, 0644, 611 - show_pwm, set_pwm, 5); 603 + show_pwm, set_pwm, 4); 612 604 static SENSOR_DEVICE_ATTR(pwm6, 0644, 613 - show_pwm, set_pwm, 6); 605 + show_pwm, set_pwm, 5); 614 606 static SENSOR_DEVICE_ATTR(pwm7, 0644, 607 + show_pwm, set_pwm, 6); 608 + static SENSOR_DEVICE_ATTR(pwm8, 0644, 615 609 show_pwm, set_pwm, 7); 616 610 static struct attribute *pwm_dev_attrs[] = { 617 - &sensor_dev_attr_pwm0.dev_attr.attr, 618 611 &sensor_dev_attr_pwm1.dev_attr.attr, 619 612 &sensor_dev_attr_pwm2.dev_attr.attr, 620 613 &sensor_dev_attr_pwm3.dev_attr.attr, ··· 621 616 &sensor_dev_attr_pwm5.dev_attr.attr, 622 617 &sensor_dev_attr_pwm6.dev_attr.attr, 623 618 &sensor_dev_attr_pwm7.dev_attr.attr, 619 + &sensor_dev_attr_pwm8.dev_attr.attr, 624 620 NULL, 625 621 }; 626 622 ··· 630 624 .is_visible = pwm_is_visible, 631 625 }; 632 626 633 - static SENSOR_DEVICE_ATTR(fan0_input, 0444, 634 - show_rpm, NULL, 0); 635 627 static SENSOR_DEVICE_ATTR(fan1_input, 0444, 636 - show_rpm, NULL, 1); 628 + show_rpm, NULL, 0); 637 629 static SENSOR_DEVICE_ATTR(fan2_input, 0444, 638 - show_rpm, NULL, 2); 630 + show_rpm, NULL, 1); 639 631 static SENSOR_DEVICE_ATTR(fan3_input, 0444, 640 - show_rpm, NULL, 3); 632 + show_rpm, NULL, 2); 641 633 static SENSOR_DEVICE_ATTR(fan4_input, 0444, 642 - show_rpm, NULL, 4); 634 + show_rpm, NULL, 3); 643 635 static SENSOR_DEVICE_ATTR(fan5_input, 0444, 644 - show_rpm, NULL, 5); 636 + show_rpm, NULL, 4); 645 637 static SENSOR_DEVICE_ATTR(fan6_input, 0444, 646 - show_rpm, NULL, 6); 638 + show_rpm, NULL, 5); 647 639 static SENSOR_DEVICE_ATTR(fan7_input, 0444, 648 - show_rpm, NULL, 7); 640 + show_rpm, NULL, 6); 649 641 static SENSOR_DEVICE_ATTR(fan8_input, 0444, 650 - show_rpm, NULL, 8); 642 + show_rpm, NULL, 7); 651 643 static SENSOR_DEVICE_ATTR(fan9_input, 0444, 652 - show_rpm, NULL, 9); 644 + show_rpm, NULL, 8); 653 645 static SENSOR_DEVICE_ATTR(fan10_input, 0444, 654 - show_rpm, NULL, 10); 646 + show_rpm, NULL, 9); 655 647 static SENSOR_DEVICE_ATTR(fan11_input, 0444, 656 - show_rpm, NULL, 11); 648 + show_rpm, NULL, 10); 657 649 static SENSOR_DEVICE_ATTR(fan12_input, 0444, 658 - show_rpm, NULL, 12); 650 + show_rpm, NULL, 11); 659 651 static SENSOR_DEVICE_ATTR(fan13_input, 0444, 660 - show_rpm, NULL, 13); 652 + show_rpm, NULL, 12); 661 653 static SENSOR_DEVICE_ATTR(fan14_input, 0444, 662 - show_rpm, NULL, 14); 654 + show_rpm, NULL, 13); 663 655 static SENSOR_DEVICE_ATTR(fan15_input, 0444, 656 + show_rpm, NULL, 14); 657 + static SENSOR_DEVICE_ATTR(fan16_input, 0444, 664 658 show_rpm, NULL, 15); 665 659 static struct attribute *fan_dev_attrs[] = { 666 - &sensor_dev_attr_fan0_input.dev_attr.attr, 667 660 &sensor_dev_attr_fan1_input.dev_attr.attr, 668 661 &sensor_dev_attr_fan2_input.dev_attr.attr, 669 662 &sensor_dev_attr_fan3_input.dev_attr.attr, ··· 678 673 &sensor_dev_attr_fan13_input.dev_attr.attr, 679 674 &sensor_dev_attr_fan14_input.dev_attr.attr, 680 675 &sensor_dev_attr_fan15_input.dev_attr.attr, 676 + &sensor_dev_attr_fan16_input.dev_attr.attr, 681 677 NULL 682 678 }; 683 679 ··· 808 802 if (ret) 809 803 return ret; 810 804 } 811 - of_node_put(np); 812 805 813 806 priv->groups[0] = &pwm_dev_group; 814 807 priv->groups[1] = &fan_dev_group;
+2 -2
drivers/infiniband/core/cm.c
··· 1429 1429 primary_path->packet_life_time = 1430 1430 cm_req_get_primary_local_ack_timeout(req_msg); 1431 1431 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1432 - sa_path_set_service_id(primary_path, req_msg->service_id); 1432 + primary_path->service_id = req_msg->service_id; 1433 1433 1434 1434 if (req_msg->alt_local_lid) { 1435 1435 alt_path->dgid = req_msg->alt_local_gid; ··· 1452 1452 alt_path->packet_life_time = 1453 1453 cm_req_get_alt_local_ack_timeout(req_msg); 1454 1454 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1455 - sa_path_set_service_id(alt_path, req_msg->service_id); 1455 + alt_path->service_id = req_msg->service_id; 1456 1456 } 1457 1457 } 1458 1458
+6 -7
drivers/infiniband/core/cma.c
··· 1140 1140 ib->sib_pkey = path->pkey; 1141 1141 ib->sib_flowinfo = path->flow_label; 1142 1142 memcpy(&ib->sib_addr, &path->sgid, 16); 1143 - ib->sib_sid = sa_path_get_service_id(path); 1143 + ib->sib_sid = path->service_id; 1144 1144 ib->sib_scope_id = 0; 1145 1145 } else { 1146 1146 ib->sib_pkey = listen_ib->sib_pkey; ··· 1274 1274 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1275 1275 sizeof(req->local_gid)); 1276 1276 req->has_gid = true; 1277 - req->service_id = 1278 - sa_path_get_service_id(req_param->primary_path); 1277 + req->service_id = req_param->primary_path->service_id; 1279 1278 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1280 1279 if (req->pkey != req_param->bth_pkey) 1281 1280 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" ··· 1826 1827 struct rdma_route *rt; 1827 1828 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1828 1829 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 1829 - const __be64 service_id = sa_path_get_service_id(path); 1830 + const __be64 service_id = 1831 + ib_event->param.req_rcvd.primary_path->service_id; 1830 1832 int ret; 1831 1833 1832 1834 id = rdma_create_id(listen_id->route.addr.dev_addr.net, ··· 2345 2345 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2346 2346 path_rec.numb_path = 1; 2347 2347 path_rec.reversible = 1; 2348 - sa_path_set_service_id(&path_rec, 2349 - rdma_get_service_id(&id_priv->id, 2350 - cma_dst_addr(id_priv))); 2348 + path_rec.service_id = rdma_get_service_id(&id_priv->id, 2349 + cma_dst_addr(id_priv)); 2351 2350 2352 2351 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2353 2352 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
+10
drivers/infiniband/core/core_priv.h
··· 169 169 int ib_sa_init(void); 170 170 void ib_sa_cleanup(void); 171 171 172 + int ibnl_init(void); 173 + void ibnl_cleanup(void); 174 + 175 + /** 176 + * Check if there are any listeners to the netlink group 177 + * @group: the netlink group ID 178 + * Returns 0 on success or a negative for no listeners. 179 + */ 180 + int ibnl_chk_listeners(unsigned int group); 181 + 172 182 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 173 183 struct netlink_callback *cb); 174 184 int ib_nl_handle_set_timeout(struct sk_buff *skb,
+1 -1
drivers/infiniband/core/netlink.c
··· 37 37 #include <net/net_namespace.h> 38 38 #include <net/sock.h> 39 39 #include <rdma/rdma_netlink.h> 40 + #include "core_priv.h" 40 41 41 42 struct ibnl_client { 42 43 struct list_head list; ··· 56 55 return -1; 57 56 return 0; 58 57 } 59 - EXPORT_SYMBOL(ibnl_chk_listeners); 60 58 61 59 int ibnl_add_client(int index, int nops, 62 60 const struct ibnl_client_cbs cb_table[])
+3 -3
drivers/infiniband/core/sa_query.c
··· 194 194 .field_name = "sa_path_rec:" #field 195 195 196 196 static const struct ib_field path_rec_table[] = { 197 - { PATH_REC_FIELD(ib.service_id), 197 + { PATH_REC_FIELD(service_id), 198 198 .offset_words = 0, 199 199 .offset_bits = 0, 200 200 .size_bits = 64 }, ··· 296 296 .field_name = "sa_path_rec:" #field 297 297 298 298 static const struct ib_field opa_path_rec_table[] = { 299 - { OPA_PATH_REC_FIELD(opa.service_id), 299 + { OPA_PATH_REC_FIELD(service_id), 300 300 .offset_words = 0, 301 301 .offset_bits = 0, 302 302 .size_bits = 64 }, ··· 774 774 775 775 /* Now build the attributes */ 776 776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 777 - val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); 777 + val64 = be64_to_cpu(sa_rec->service_id); 778 778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 779 779 sizeof(val64), &val64); 780 780 }
+1 -1
drivers/infiniband/core/umem.c
··· 58 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 59 59 60 60 page = sg_page(sg); 61 - if (umem->writable && dirty) 61 + if (!PageDirty(page) && umem->writable && dirty) 62 62 set_page_dirty_lock(page); 63 63 put_page(page); 64 64 }
+5 -1
drivers/infiniband/core/umem_odp.c
··· 321 321 struct vm_area_struct *vma; 322 322 struct hstate *h; 323 323 324 + down_read(&mm->mmap_sem); 324 325 vma = find_vma(mm, ib_umem_start(umem)); 325 - if (!vma || !is_vm_hugetlb_page(vma)) 326 + if (!vma || !is_vm_hugetlb_page(vma)) { 327 + up_read(&mm->mmap_sem); 326 328 return -EINVAL; 329 + } 327 330 h = hstate_vma(vma); 328 331 umem->page_shift = huge_page_shift(h); 332 + up_read(&mm->mmap_sem); 329 333 umem->hugetlb = 1; 330 334 } else { 331 335 umem->hugetlb = 0;
+4 -4
drivers/infiniband/core/uverbs_marshall.c
··· 96 96 } 97 97 EXPORT_SYMBOL(ib_copy_qp_attr_to_user); 98 98 99 - void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 100 - struct sa_path_rec *src) 99 + static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 100 + struct sa_path_rec *src) 101 101 { 102 - memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); 103 - memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); 102 + memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); 103 + memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); 104 104 105 105 dst->dlid = htons(ntohl(sa_path_get_dlid(src))); 106 106 dst->slid = htons(ntohl(sa_path_get_slid(src)));
+7 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 488 488 489 489 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 490 490 release_ep_resources(ep); 491 + kfree_skb(skb); 491 492 return 0; 492 493 } 493 494 ··· 499 498 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 500 499 c4iw_put_ep(&ep->parent_ep->com); 501 500 release_ep_resources(ep); 501 + kfree_skb(skb); 502 502 return 0; 503 503 } 504 504 ··· 571 569 572 570 pr_debug("%s rdev %p\n", __func__, rdev); 573 571 req->cmd = CPL_ABORT_NO_RST; 572 + skb_get(skb); 574 573 ret = c4iw_ofld_send(rdev, skb); 575 574 if (ret) { 576 575 __state_set(&ep->com, DEAD); 577 576 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 578 - } 577 + } else 578 + kfree_skb(skb); 579 579 } 580 580 581 581 static int send_flowc(struct c4iw_ep *ep) ··· 2521 2517 goto reject; 2522 2518 } 2523 2519 2524 - hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2520 + hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + 2521 + sizeof(struct tcphdr) + 2525 2522 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2526 2523 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2527 2524 child_ep->mtu = peer_mss + hdrs;
+1 -1
drivers/infiniband/hw/cxgb4/device.c
··· 971 971 devp->rdev.lldi.sge_egrstatuspagesize); 972 972 973 973 devp->rdev.hw_queue.t4_eq_status_entries = 974 - devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; 974 + devp->rdev.lldi.sge_egrstatuspagesize / 64; 975 975 devp->rdev.hw_queue.t4_max_eq_size = 65520; 976 976 devp->rdev.hw_queue.t4_max_iq_size = 65520; 977 977 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
+50 -17
drivers/infiniband/hw/hfi1/chip.c
··· 6312 6312 } 6313 6313 } 6314 6314 6315 - static void write_global_credit(struct hfi1_devdata *dd, 6316 - u8 vau, u16 total, u16 shared) 6315 + /* 6316 + * Set up allocation unit vaulue. 6317 + */ 6318 + void set_up_vau(struct hfi1_devdata *dd, u8 vau) 6317 6319 { 6318 - write_csr(dd, SEND_CM_GLOBAL_CREDIT, 6319 - ((u64)total << 6320 - SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | 6321 - ((u64)shared << 6322 - SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | 6323 - ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); 6320 + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6321 + 6322 + /* do not modify other values in the register */ 6323 + reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; 6324 + reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; 6325 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6324 6326 } 6325 6327 6326 6328 /* 6327 6329 * Set up initial VL15 credits of the remote. Assumes the rest of 6328 - * the CM credit registers are zero from a previous global or credit reset . 6330 + * the CM credit registers are zero from a previous global or credit reset. 6331 + * Shared limit for VL15 will always be 0. 6329 6332 */ 6330 - void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) 6333 + void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) 6331 6334 { 6332 - /* leave shared count at zero for both global and VL15 */ 6333 - write_global_credit(dd, vau, vl15buf, 0); 6335 + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6336 + 6337 + /* set initial values for total and shared credit limit */ 6338 + reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | 6339 + SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); 6340 + 6341 + /* 6342 + * Set total limit to be equal to VL15 credits. 6343 + * Leave shared limit at 0. 6344 + */ 6345 + reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 6346 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6334 6347 6335 6348 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6336 6349 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); ··· 6361 6348 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6362 6349 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6363 6350 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6364 - write_global_credit(dd, 0, 0, 0); 6351 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); 6365 6352 /* reset the CM block */ 6366 6353 pio_send_control(dd, PSC_CM_RESET); 6354 + /* reset cached value */ 6355 + dd->vl15buf_cached = 0; 6367 6356 } 6368 6357 6369 6358 /* convert a vCU to a CU */ ··· 6854 6839 { 6855 6840 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6856 6841 link_up_work); 6842 + struct hfi1_devdata *dd = ppd->dd; 6843 + 6857 6844 set_link_state(ppd, HLS_UP_INIT); 6858 6845 6859 6846 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6860 - read_ltp_rtt(ppd->dd); 6847 + read_ltp_rtt(dd); 6861 6848 /* 6862 6849 * OPA specifies that certain counters are cleared on a transition 6863 6850 * to link up, so do that. 6864 6851 */ 6865 - clear_linkup_counters(ppd->dd); 6852 + clear_linkup_counters(dd); 6866 6853 /* 6867 6854 * And (re)set link up default values. 6868 6855 */ 6869 6856 set_linkup_defaults(ppd); 6870 6857 6858 + /* 6859 + * Set VL15 credits. Use cached value from verify cap interrupt. 6860 + * In case of quick linkup or simulator, vl15 value will be set by 6861 + * handle_linkup_change. VerifyCap interrupt handler will not be 6862 + * called in those scenarios. 6863 + */ 6864 + if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) 6865 + set_up_vl15(dd, dd->vl15buf_cached); 6866 + 6871 6867 /* enforce link speed enabled */ 6872 6868 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6873 6869 /* oops - current speed is not enabled, bounce */ 6874 - dd_dev_err(ppd->dd, 6870 + dd_dev_err(dd, 6875 6871 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6876 6872 ppd->link_speed_active, ppd->link_speed_enabled); 6877 6873 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, ··· 7383 7357 */ 7384 7358 if (vau == 0) 7385 7359 vau = 1; 7386 - set_up_vl15(dd, vau, vl15buf); 7360 + set_up_vau(dd, vau); 7361 + 7362 + /* 7363 + * Set VL15 credits to 0 in global credit register. Cache remote VL15 7364 + * credits value and wait for link-up interrupt ot set it. 7365 + */ 7366 + set_up_vl15(dd, 0); 7367 + dd->vl15buf_cached = vl15buf; 7387 7368 7388 7369 /* set up the LCB CRC mode */ 7389 7370 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
+2
drivers/infiniband/hw/hfi1/chip_registers.h
··· 839 839 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull 840 840 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull 841 841 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) 842 + #define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull 842 843 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 844 + #define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull 843 845 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull 844 846 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull 845 847 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
+10 -1
drivers/infiniband/hw/hfi1/hfi.h
··· 1045 1045 /* initial vl15 credits to use */ 1046 1046 u16 vl15_init; 1047 1047 1048 + /* 1049 + * Cached value for vl15buf, read during verify cap interrupt. VL15 1050 + * credits are to be kept at 0 and set when handling the link-up 1051 + * interrupt. This removes the possibility of receiving VL15 MAD 1052 + * packets before this HFI is ready. 1053 + */ 1054 + u16 vl15buf_cached; 1055 + 1048 1056 /* Misc small ints */ 1049 1057 u8 n_krcv_queues; 1050 1058 u8 qos_shift; ··· 1606 1598 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1607 1599 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1608 1600 1609 - void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); 1601 + void set_up_vau(struct hfi1_devdata *dd, u8 vau); 1602 + void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); 1610 1603 void reset_link_credits(struct hfi1_devdata *dd); 1611 1604 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1612 1605
+2 -1
drivers/infiniband/hw/hfi1/intr.c
··· 130 130 * the remote values. Both sides must be using the values. 131 131 */ 132 132 if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 133 - set_up_vl15(dd, dd->vau, dd->vl15_init); 133 + set_up_vau(dd, dd->vau); 134 + set_up_vl15(dd, dd->vl15_init); 134 135 assign_remote_cm_au_table(dd, dd->vcu); 135 136 } 136 137
+2 -2
drivers/infiniband/hw/hfi1/pcie.c
··· 207 207 /* 208 208 * Save BARs and command to rewrite after device reset. 209 209 */ 210 - dd->pcibar0 = addr; 211 - dd->pcibar1 = addr >> 32; 210 + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); 211 + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); 212 212 pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); 213 213 pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); 214 214 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
+4 -1
drivers/infiniband/hw/hfi1/rc.c
··· 2159 2159 ret = hfi1_rvt_get_rwqe(qp, 1); 2160 2160 if (ret < 0) 2161 2161 goto nack_op_err; 2162 - if (!ret) 2162 + if (!ret) { 2163 + /* peer will send again */ 2164 + rvt_put_ss(&qp->r_sge); 2163 2165 goto rnr_nak; 2166 + } 2164 2167 wc.ex.imm_data = ohdr->u.rc.imm_data; 2165 2168 wc.wc_flags = IB_WC_WITH_IMM; 2166 2169 goto send_last;
+2 -1
drivers/infiniband/hw/hfi1/sysfs.c
··· 196 196 }; 197 197 198 198 static struct attribute *port_cc_default_attributes[] = { 199 - &cc_prescan_attr.attr 199 + &cc_prescan_attr.attr, 200 + NULL 200 201 }; 201 202 202 203 static struct kobj_type port_cc_ktype = {
+1 -2
drivers/infiniband/hw/i40iw/i40iw_cm.c
··· 784 784 } 785 785 786 786 ctrl_ird |= IETF_PEER_TO_PEER; 787 - ctrl_ird |= IETF_FLPDU_ZERO_LEN; 788 787 789 788 switch (mpa_key) { 790 789 case MPA_KEY_REQUEST: ··· 2445 2446 } else { 2446 2447 type = I40IW_CM_EVENT_CONNECTED; 2447 2448 cm_node->state = I40IW_CM_STATE_OFFLOADED; 2448 - i40iw_send_ack(cm_node); 2449 2449 } 2450 + i40iw_send_ack(cm_node); 2450 2451 break; 2451 2452 default: 2452 2453 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
+1 -11
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
··· 285 285 struct i40iw_sc_dev *dev = vsi->dev; 286 286 struct i40iw_sc_qp *qp = NULL; 287 287 bool qs_handle_change = false; 288 - bool mss_change = false; 289 288 unsigned long flags; 290 289 u16 qs_handle; 291 290 int i; 292 291 293 - if (vsi->mss != l2params->mss) { 294 - mss_change = true; 295 - vsi->mss = l2params->mss; 296 - } 292 + vsi->mss = l2params->mss; 297 293 298 294 i40iw_fill_qos_list(l2params->qs_handle_list); 299 295 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { 300 296 qs_handle = l2params->qs_handle_list[i]; 301 297 if (vsi->qos[i].qs_handle != qs_handle) 302 298 qs_handle_change = true; 303 - else if (!mss_change) 304 - continue; /* no MSS nor qs handle change */ 305 299 spin_lock_irqsave(&vsi->qos[i].lock, flags); 306 300 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); 307 301 while (qp) { 308 - if (mss_change) 309 - i40iw_qp_mss_modify(dev, qp); 310 302 if (qs_handle_change) { 311 303 qp->qs_handle = qs_handle; 312 304 /* issue cqp suspend command */ ··· 2387 2395 2388 2396 set_64bit_val(wqe, 2389 2397 8, 2390 - LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | 2391 2398 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); 2392 2399 2393 2400 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); ··· 2401 2410 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | 2402 2411 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | 2403 2412 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | 2404 - LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | 2405 2413 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | 2406 2414 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | 2407 2415 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+13 -7
drivers/infiniband/hw/i40iw/i40iw_main.c
··· 1319 1319 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, 1320 1320 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); 1321 1321 if (status) 1322 - goto exit; 1322 + goto error; 1323 1323 info.fpm_query_buf_pa = mem.pa; 1324 1324 info.fpm_query_buf = mem.va; 1325 1325 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, 1326 1326 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); 1327 1327 if (status) 1328 - goto exit; 1328 + goto error; 1329 1329 info.fpm_commit_buf_pa = mem.pa; 1330 1330 info.fpm_commit_buf = mem.va; 1331 1331 info.hmc_fn_id = ldev->fid; ··· 1347 1347 info.exception_lan_queue = 1; 1348 1348 info.vchnl_send = i40iw_virtchnl_send; 1349 1349 status = i40iw_device_init(&iwdev->sc_dev, &info); 1350 - exit: 1351 - if (status) { 1352 - kfree(iwdev->hmc_info_mem); 1353 - iwdev->hmc_info_mem = NULL; 1354 - } 1350 + 1351 + if (status) 1352 + goto error; 1355 1353 memset(&vsi_info, 0, sizeof(vsi_info)); 1356 1354 vsi_info.dev = &iwdev->sc_dev; 1357 1355 vsi_info.back_vsi = (void *)iwdev; ··· 1360 1362 memset(&stats_info, 0, sizeof(stats_info)); 1361 1363 stats_info.fcn_id = ldev->fid; 1362 1364 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); 1365 + if (!stats_info.pestat) { 1366 + status = I40IW_ERR_NO_MEMORY; 1367 + goto error; 1368 + } 1363 1369 stats_info.stats_initialize = true; 1364 1370 if (stats_info.pestat) 1365 1371 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); 1366 1372 } 1373 + return status; 1374 + error: 1375 + kfree(iwdev->hmc_info_mem); 1376 + iwdev->hmc_info_mem = NULL; 1367 1377 return status; 1368 1378 } 1369 1379
-1
drivers/infiniband/hw/i40iw/i40iw_osdep.h
··· 199 199 struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); 200 200 void *i40iw_remove_head(struct list_head *list); 201 201 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); 202 - void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); 203 202 204 203 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); 205 204 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
-2
drivers/infiniband/hw/i40iw/i40iw_type.h
··· 541 541 struct i40iw_modify_qp_info { 542 542 u64 rx_win0; 543 543 u64 rx_win1; 544 - u16 new_mss; 545 544 u8 next_iwarp_state; 546 545 u8 termlen; 547 546 bool ord_valid; ··· 553 554 bool dont_send_term; 554 555 bool dont_send_fin; 555 556 bool cached_var_valid; 556 - bool mss_change; 557 557 bool force_loopback; 558 558 }; 559 559
-17
drivers/infiniband/hw/i40iw/i40iw_utils.c
··· 757 757 } 758 758 759 759 /** 760 - * i40iw_qp_mss_modify - modify mss for qp 761 - * @dev: hardware control device structure 762 - * @qp: hardware control qp 763 - */ 764 - void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) 765 - { 766 - struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 767 - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; 768 - struct i40iw_modify_qp_info info; 769 - 770 - memset(&info, 0, sizeof(info)); 771 - info.mss_change = true; 772 - info.new_mss = qp->vsi->mss; 773 - i40iw_hw_modify_qp(iwdev, iwqp, &info, false); 774 - } 775 - 776 - /** 777 760 * i40iw_term_modify_qp - modify qp for term message 778 761 * @qp: hardware control qp 779 762 * @next_state: qp's next state
+1 -4
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
··· 443 443 if (!dev->vchnl_up) 444 444 return I40IW_ERR_NOT_READY; 445 445 if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { 446 - if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) 447 - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 448 - else 449 - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 446 + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 450 447 return I40IW_SUCCESS; 451 448 } 452 449 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+1
drivers/infiniband/hw/mlx4/mad.c
··· 1578 1578 if (port < 0) 1579 1579 return; 1580 1580 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); 1581 + ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); 1581 1582 1582 1583 mlx4_ib_query_ah(&ah.ibah, &ah_attr); 1583 1584 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
+14
drivers/infiniband/hw/mlx5/main.c
··· 2979 2979 return ret; 2980 2980 } 2981 2981 2982 + static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 2983 + { 2984 + switch (umr_fence_cap) { 2985 + case MLX5_CAP_UMR_FENCE_NONE: 2986 + return MLX5_FENCE_MODE_NONE; 2987 + case MLX5_CAP_UMR_FENCE_SMALL: 2988 + return MLX5_FENCE_MODE_INITIATOR_SMALL; 2989 + default: 2990 + return MLX5_FENCE_MODE_STRONG_ORDERING; 2991 + } 2992 + } 2993 + 2982 2994 static int create_dev_resources(struct mlx5_ib_resources *devr) 2983 2995 { 2984 2996 struct ib_srq_init_attr attr; ··· 3704 3692 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; 3705 3693 3706 3694 mlx5_ib_internal_fill_odp_caps(dev); 3695 + 3696 + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 3707 3697 3708 3698 if (MLX5_CAP_GEN(mdev, imaicl)) { 3709 3699 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
+2 -1
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 349 349 struct mlx5_ib_wq rq; 350 350 351 351 u8 sq_signal_bits; 352 - u8 fm_cache; 352 + u8 next_fence; 353 353 struct mlx5_ib_wq sq; 354 354 355 355 /* serialize qp state modifications ··· 654 654 struct mlx5_ib_port *port; 655 655 struct mlx5_sq_bfreg bfreg; 656 656 struct mlx5_sq_bfreg fp_bfreg; 657 + u8 umr_fence; 657 658 }; 658 659 659 660 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+23 -36
drivers/infiniband/hw/mlx5/qp.c
··· 3738 3738 } 3739 3739 } 3740 3740 3741 - static u8 get_fence(u8 fence, struct ib_send_wr *wr) 3742 - { 3743 - if (unlikely(wr->opcode == IB_WR_LOCAL_INV && 3744 - wr->send_flags & IB_SEND_FENCE)) 3745 - return MLX5_FENCE_MODE_STRONG_ORDERING; 3746 - 3747 - if (unlikely(fence)) { 3748 - if (wr->send_flags & IB_SEND_FENCE) 3749 - return MLX5_FENCE_MODE_SMALL_AND_FENCE; 3750 - else 3751 - return fence; 3752 - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { 3753 - return MLX5_FENCE_MODE_FENCE; 3754 - } 3755 - 3756 - return 0; 3757 - } 3758 - 3759 3741 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3760 3742 struct mlx5_wqe_ctrl_seg **ctrl, 3761 3743 struct ib_send_wr *wr, unsigned *idx, ··· 3766 3784 static void finish_wqe(struct mlx5_ib_qp *qp, 3767 3785 struct mlx5_wqe_ctrl_seg *ctrl, 3768 3786 u8 size, unsigned idx, u64 wr_id, 3769 - int nreq, u8 fence, u8 next_fence, 3770 - u32 mlx5_opcode) 3787 + int nreq, u8 fence, u32 mlx5_opcode) 3771 3788 { 3772 3789 u8 opmod = 0; 3773 3790 ··· 3774 3793 mlx5_opcode | ((u32)opmod << 24)); 3775 3794 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 3776 3795 ctrl->fm_ce_se |= fence; 3777 - qp->fm_cache = next_fence; 3778 3796 if (unlikely(qp->wq_sig)) 3779 3797 ctrl->signature = wq_sig(ctrl); 3780 3798 ··· 3833 3853 goto out; 3834 3854 } 3835 3855 3836 - fence = qp->fm_cache; 3837 3856 num_sge = wr->num_sge; 3838 3857 if (unlikely(num_sge > qp->sq.max_gs)) { 3839 3858 mlx5_ib_warn(dev, "\n"); ··· 3847 3868 err = -ENOMEM; 3848 3869 *bad_wr = wr; 3849 3870 goto out; 3871 + } 3872 + 3873 + if (wr->opcode == IB_WR_LOCAL_INV || 3874 + wr->opcode == IB_WR_REG_MR) { 3875 + fence = dev->umr_fence; 3876 + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3877 + } else if (wr->send_flags & IB_SEND_FENCE) { 3878 + if (qp->next_fence) 3879 + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 3880 + else 3881 + fence = MLX5_FENCE_MODE_FENCE; 3882 + } else { 3883 + fence = qp->next_fence; 3850 3884 } 3851 3885 3852 3886 switch (ibqp->qp_type) { ··· 3888 3896 goto out; 3889 3897 3890 3898 case IB_WR_LOCAL_INV: 3891 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3892 3899 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 3893 3900 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 3894 3901 set_linv_wr(qp, &seg, &size); ··· 3895 3904 break; 3896 3905 3897 3906 case IB_WR_REG_MR: 3898 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3899 3907 qp->sq.wr_data[idx] = IB_WR_REG_MR; 3900 3908 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 3901 3909 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); ··· 3917 3927 goto out; 3918 3928 } 3919 3929 3920 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3921 - nreq, get_fence(fence, wr), 3922 - next_fence, MLX5_OPCODE_UMR); 3930 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3931 + fence, MLX5_OPCODE_UMR); 3923 3932 /* 3924 3933 * SET_PSV WQEs are not signaled and solicited 3925 3934 * on error ··· 3943 3954 goto out; 3944 3955 } 3945 3956 3946 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3947 - nreq, get_fence(fence, wr), 3948 - next_fence, MLX5_OPCODE_SET_PSV); 3957 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3958 + fence, MLX5_OPCODE_SET_PSV); 3949 3959 err = begin_wqe(qp, &seg, &ctrl, wr, 3950 3960 &idx, &size, nreq); 3951 3961 if (err) { ··· 3954 3966 goto out; 3955 3967 } 3956 3968 3957 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3958 3969 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, 3959 3970 mr->sig->psv_wire.psv_idx, &seg, 3960 3971 &size); ··· 3963 3976 goto out; 3964 3977 } 3965 3978 3966 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3967 - nreq, get_fence(fence, wr), 3968 - next_fence, MLX5_OPCODE_SET_PSV); 3979 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3980 + fence, MLX5_OPCODE_SET_PSV); 3981 + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3969 3982 num_sge = 0; 3970 3983 goto skip_psv; 3971 3984 ··· 4076 4089 } 4077 4090 } 4078 4091 4079 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4080 - get_fence(fence, wr), next_fence, 4092 + qp->next_fence = next_fence; 4093 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, 4081 4094 mlx5_ib_opcode[wr->opcode]); 4082 4095 skip_psv: 4083 4096 if (0)
+1 -2
drivers/infiniband/hw/nes/nes_cm.c
··· 610 610 ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; 611 611 } 612 612 ctrl_ird |= IETF_PEER_TO_PEER; 613 - ctrl_ird |= IETF_FLPDU_ZERO_LEN; 614 613 615 614 switch (mpa_key) { 616 615 case MPA_KEY_REQUEST: ··· 1825 1826 type = NES_CM_EVENT_CONNECTED; 1826 1827 cm_node->state = NES_CM_STATE_TSA; 1827 1828 } 1828 - 1829 + send_ack(cm_node, NULL); 1829 1830 break; 1830 1831 default: 1831 1832 WARN_ON(1);
+6 -4
drivers/infiniband/hw/qedr/qedr_cm.c
··· 270 270 return rc; 271 271 } 272 272 273 - vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 274 - if (vlan_id < VLAN_CFI_MASK) 275 - has_vlan = true; 276 - if (sgid_attr.ndev) 273 + if (sgid_attr.ndev) { 274 + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 275 + if (vlan_id < VLAN_CFI_MASK) 276 + has_vlan = true; 277 + 277 278 dev_put(sgid_attr.ndev); 279 + } 278 280 279 281 if (!memcmp(&sgid, &zgid, sizeof(sgid))) { 280 282 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+3 -1
drivers/infiniband/hw/qib/qib_rc.c
··· 1956 1956 ret = qib_get_rwqe(qp, 1); 1957 1957 if (ret < 0) 1958 1958 goto nack_op_err; 1959 - if (!ret) 1959 + if (!ret) { 1960 + rvt_put_ss(&qp->r_sge); 1960 1961 goto rnr_nak; 1962 + } 1961 1963 wc.ex.imm_data = ohdr->u.rc.imm_data; 1962 1964 hdrsize += 4; 1963 1965 wc.wc_flags = IB_WC_WITH_IMM;
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 178 178 static int ipoib_get_link_ksettings(struct net_device *netdev, 179 179 struct ethtool_link_ksettings *cmd) 180 180 { 181 - struct ipoib_dev_priv *priv = netdev_priv(netdev); 181 + struct ipoib_dev_priv *priv = ipoib_priv(netdev); 182 182 struct ib_port_attr attr; 183 183 int ret, speed, width; 184 184
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1590 1590 wait_for_completion(&priv->ntbl.deleted); 1591 1591 } 1592 1592 1593 - void ipoib_dev_uninit_default(struct net_device *dev) 1593 + static void ipoib_dev_uninit_default(struct net_device *dev) 1594 1594 { 1595 1595 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1596 1596
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 320 320 ch->path.sgid = target->sgid; 321 321 ch->path.dgid = target->orig_dgid; 322 322 ch->path.pkey = target->pkey; 323 - sa_path_set_service_id(&ch->path, target->service_id); 323 + ch->path.service_id = target->service_id; 324 324 325 325 return 0; 326 326 } ··· 575 575 return 0; 576 576 577 577 err_qp: 578 - srp_destroy_qp(ch, qp); 578 + ib_destroy_qp(qp); 579 579 580 580 err_send_cq: 581 581 ib_free_cq(send_cq);
+1 -1
drivers/input/keyboard/tm2-touchkey.c
··· 213 213 /* led device */ 214 214 touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME; 215 215 touchkey->led_dev.brightness = LED_FULL; 216 - touchkey->led_dev.max_brightness = LED_FULL; 216 + touchkey->led_dev.max_brightness = LED_ON; 217 217 touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set; 218 218 219 219 error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
+37 -7
drivers/input/misc/axp20x-pek.c
··· 256 256 return 0; 257 257 } 258 258 259 + #ifdef CONFIG_ACPI 260 + static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, 261 + struct platform_device *pdev) 262 + { 263 + unsigned long long hrv = 0; 264 + acpi_status status; 265 + 266 + if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) && 267 + axp20x_pek->axp20x->variant == AXP288_ID) { 268 + status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent), 269 + "_HRV", NULL, &hrv); 270 + if (ACPI_FAILURE(status)) 271 + dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n"); 272 + 273 + /* 274 + * On Cherry Trail platforms (hrv == 3), do not register the 275 + * input device if there is an "INTCFD9" or "ACPI0011" gpio 276 + * button ACPI device, as that handles the power button too, 277 + * and otherwise we end up reporting all presses twice. 278 + */ 279 + if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) || 280 + acpi_dev_present("ACPI0011", NULL, -1))) 281 + return false; 282 + 283 + } 284 + 285 + return true; 286 + } 287 + #else 288 + static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, 289 + struct platform_device *pdev) 290 + { 291 + return true; 292 + } 293 + #endif 294 + 259 295 static int axp20x_pek_probe(struct platform_device *pdev) 260 296 { 261 297 struct axp20x_pek *axp20x_pek; ··· 304 268 305 269 axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); 306 270 307 - /* 308 - * Do not register the input device if there is an "INTCFD9" 309 - * gpio button ACPI device, that handles the power button too, 310 - * and otherwise we end up reporting all presses twice. 311 - */ 312 - if (!acpi_dev_found("INTCFD9") || 313 - !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) { 271 + if (axp20x_pek_should_register_input(axp20x_pek, pdev)) { 314 272 error = axp20x_pek_probe_input_device(axp20x_pek, pdev); 315 273 if (error) 316 274 return error;
+28 -9
drivers/input/mouse/synaptics.c
··· 176 176 NULL 177 177 }; 178 178 179 + static const char * const forcepad_pnp_ids[] = { 180 + "SYN300D", 181 + "SYN3014", 182 + NULL 183 + }; 184 + 179 185 /* 180 186 * Send a command to the synpatics touchpad by special commands 181 187 */ ··· 403 397 { 404 398 int error; 405 399 400 + memset(info, 0, sizeof(*info)); 401 + 406 402 error = synaptics_identify(psmouse, info); 407 403 if (error) 408 404 return error; ··· 486 478 1264, 5675, 1171, 4688 487 479 }, 488 480 { } 489 - }; 490 - 491 - /* This list has been kindly provided by Synaptics. */ 492 - static const char * const forcepad_pnp_ids[] = { 493 - "SYN300D", 494 - "SYN3014", 495 - NULL 496 481 }; 497 482 498 483 /***************************************************************************** ··· 1688 1687 SYNAPTICS_INTERTOUCH_ON, 1689 1688 }; 1690 1689 1691 - static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET; 1690 + static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ? 1691 + SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF; 1692 1692 module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644); 1693 1693 MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device."); 1694 1694 ··· 1739 1737 1740 1738 if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) { 1741 1739 if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && 1742 - !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) 1740 + !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) { 1741 + 1742 + if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) 1743 + psmouse_info(psmouse, 1744 + "Your touchpad (%s) says it can support a different bus. " 1745 + "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n", 1746 + psmouse->ps2dev.serio->firmware_id); 1747 + 1743 1748 return -ENXIO; 1749 + } 1744 1750 } 1745 1751 1746 1752 psmouse_info(psmouse, "Trying to set up SMBus access\n"); ··· 1820 1810 } 1821 1811 1822 1812 if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) { 1813 + if ((!IS_ENABLED(CONFIG_RMI4_SMB) || 1814 + !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) && 1815 + /* Forcepads need F21, which is not ready */ 1816 + !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) { 1817 + psmouse_warn(psmouse, 1818 + "The touchpad can support a better bus than the too old PS/2 protocol. " 1819 + "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n"); 1820 + } 1821 + 1823 1822 error = synaptics_setup_intertouch(psmouse, &info, true); 1824 1823 if (!error) 1825 1824 return PSMOUSE_SYNAPTICS_SMBUS;
+3
drivers/input/touchscreen/silead.c
··· 526 526 { 527 527 struct i2c_client *client = to_i2c_client(dev); 528 528 529 + disable_irq(client->irq); 529 530 silead_ts_set_power(client, SILEAD_POWER_OFF); 530 531 return 0; 531 532 } ··· 551 550 dev_err(dev, "Resume error, status: 0x%02x\n", status); 552 551 return -ENODEV; 553 552 } 553 + 554 + enable_irq(client->irq); 554 555 555 556 return 0; 556 557 }
+4 -4
drivers/md/bitmap.c
··· 485 485 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 486 486 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); 487 487 pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 488 - *(__u32 *)(sb->uuid+0), 489 - *(__u32 *)(sb->uuid+4), 490 - *(__u32 *)(sb->uuid+8), 491 - *(__u32 *)(sb->uuid+12)); 488 + le32_to_cpu(*(__u32 *)(sb->uuid+0)), 489 + le32_to_cpu(*(__u32 *)(sb->uuid+4)), 490 + le32_to_cpu(*(__u32 *)(sb->uuid+8)), 491 + le32_to_cpu(*(__u32 *)(sb->uuid+12))); 492 492 pr_debug(" events: %llu\n", 493 493 (unsigned long long) le64_to_cpu(sb->events)); 494 494 pr_debug("events cleared: %llu\n",
+1 -1
drivers/md/dm-bufio.c
··· 1334 1334 { 1335 1335 struct dm_io_request io_req = { 1336 1336 .bi_op = REQ_OP_WRITE, 1337 - .bi_op_flags = REQ_PREFLUSH, 1337 + .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, 1338 1338 .mem.type = DM_IO_KMEM, 1339 1339 .mem.ptr.addr = NULL, 1340 1340 .client = c->dm_io,
+8 -22
drivers/md/dm-integrity.c
··· 783 783 for (i = 0; i < commit_sections; i++) 784 784 rw_section_mac(ic, commit_start + i, true); 785 785 } 786 - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); 786 + rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, 787 + commit_sections, &io_comp); 787 788 } else { 788 789 unsigned to_end; 789 790 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); ··· 2375 2374 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 2376 2375 } 2377 2376 2378 - /* FIXME: use new kvmalloc */ 2379 - static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp) 2380 - { 2381 - void *ptr = NULL; 2382 - 2383 - if (size <= PAGE_SIZE) 2384 - ptr = kmalloc(size, GFP_KERNEL | gfp); 2385 - if (!ptr && size <= KMALLOC_MAX_SIZE) 2386 - ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp); 2387 - if (!ptr) 2388 - ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL); 2389 - 2390 - return ptr; 2391 - } 2392 - 2393 2377 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) 2394 2378 { 2395 2379 unsigned i; ··· 2393 2407 struct page_list *pl; 2394 2408 unsigned i; 2395 2409 2396 - pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); 2410 + pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO); 2397 2411 if (!pl) 2398 2412 return NULL; 2399 2413 ··· 2423 2437 struct scatterlist **sl; 2424 2438 unsigned i; 2425 2439 2426 - sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); 2440 + sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); 2427 2441 if (!sl) 2428 2442 return NULL; 2429 2443 ··· 2439 2453 2440 2454 n_pages = (end_index - start_index + 1); 2441 2455 2442 - s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); 2456 + s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); 2443 2457 if (!s) { 2444 2458 dm_integrity_free_journal_scatterlist(ic, sl); 2445 2459 return NULL; ··· 2603 2617 goto bad; 2604 2618 } 2605 2619 2606 - sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); 2620 + sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); 2607 2621 if (!sg) { 2608 2622 *error = "Unable to allocate sg list"; 2609 2623 r = -ENOMEM; ··· 2659 2673 r = -ENOMEM; 2660 2674 goto bad; 2661 2675 } 2662 - ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); 2676 + ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); 2663 2677 if (!ic->sk_requests) { 2664 2678 *error = "Unable to allocate sk requests"; 2665 2679 r = -ENOMEM; ··· 2726 2740 r = -ENOMEM; 2727 2741 goto bad; 2728 2742 } 2729 - ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); 2743 + ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); 2730 2744 if (!ic->journal_tree) { 2731 2745 *error = "Could not allocate memory for journal tree"; 2732 2746 r = -ENOMEM;
+3 -2
drivers/md/dm-ioctl.c
··· 1710 1710 } 1711 1711 1712 1712 /* 1713 - * Try to avoid low memory issues when a device is suspended. 1713 + * Use __GFP_HIGH to avoid low memory issues when a device is 1714 + * suspended and the ioctl is needed to resume it. 1714 1715 * Use kmalloc() rather than vmalloc() when we can. 1715 1716 */ 1716 1717 dmi = NULL; 1717 1718 noio_flag = memalloc_noio_save(); 1718 - dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); 1719 + dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); 1719 1720 memalloc_noio_restore(noio_flag); 1720 1721 1721 1722 if (!dmi) {
+1 -1
drivers/md/dm-raid1.c
··· 260 260 struct mirror *m; 261 261 struct dm_io_request io_req = { 262 262 .bi_op = REQ_OP_WRITE, 263 - .bi_op_flags = REQ_PREFLUSH, 263 + .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, 264 264 .mem.type = DM_IO_KMEM, 265 265 .mem.ptr.addr = NULL, 266 266 .client = ms->io_client,
+2 -1
drivers/md/dm-snap-persistent.c
··· 741 741 /* 742 742 * Commit exceptions to disk. 743 743 */ 744 - if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) 744 + if (ps->valid && area_io(ps, REQ_OP_WRITE, 745 + REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) 745 746 ps->valid = 0; 746 747 747 748 /*
+2 -2
drivers/md/dm-verity-target.c
··· 166 166 return r; 167 167 } 168 168 169 - if (likely(v->version >= 1)) 169 + if (likely(v->salt_size && (v->version >= 1))) 170 170 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 171 171 172 172 return r; ··· 177 177 { 178 178 int r; 179 179 180 - if (unlikely(!v->version)) { 180 + if (unlikely(v->salt_size && (!v->version))) { 181 181 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 182 182 183 183 if (r < 0) {
+1 -1
drivers/md/dm.c
··· 1657 1657 1658 1658 bio_init(&md->flush_bio, NULL, 0); 1659 1659 md->flush_bio.bi_bdev = md->bdev; 1660 - md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1660 + md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1661 1661 1662 1662 dm_stats_init(&md->stats); 1663 1663
+3 -1
drivers/md/md-cluster.c
··· 1311 1311 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 1312 1312 lock_comm(cinfo, 1); 1313 1313 ret = __sendmsg(cinfo, &cmsg); 1314 - if (ret) 1314 + if (ret) { 1315 + unlock_comm(cinfo); 1315 1316 return ret; 1317 + } 1316 1318 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; 1317 1319 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); 1318 1320 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+1 -1
drivers/md/md.c
··· 765 765 test_bit(FailFast, &rdev->flags) && 766 766 !test_bit(LastDev, &rdev->flags)) 767 767 ff = MD_FAILFAST; 768 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff; 768 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 769 769 770 770 atomic_inc(&mddev->pending_writes); 771 771 submit_bio(bio);
+2 -2
drivers/md/raid5-cache.c
··· 1782 1782 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 1783 1783 mb, PAGE_SIZE)); 1784 1784 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1785 - REQ_FUA, false)) { 1785 + REQ_SYNC | REQ_FUA, false)) { 1786 1786 __free_page(page); 1787 1787 return -EIO; 1788 1788 } ··· 2388 2388 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 2389 2389 mb, PAGE_SIZE)); 2390 2390 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2391 - REQ_OP_WRITE, REQ_FUA, false); 2391 + REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); 2392 2392 sh->log_start = ctx->pos; 2393 2393 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 2394 2394 atomic_inc(&log->stripe_in_journal_count);
+2 -2
drivers/md/raid5-ppl.c
··· 907 907 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); 908 908 909 909 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 910 - PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0, 911 - false)) { 910 + PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | 911 + REQ_FUA, 0, false)) { 912 912 md_error(rdev->mddev, rdev); 913 913 ret = -EIO; 914 914 }
+14 -4
drivers/md/raid5.c
··· 4085 4085 set_bit(STRIPE_INSYNC, &sh->state); 4086 4086 else { 4087 4087 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4088 - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4088 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4089 4089 /* don't try to repair!! */ 4090 4090 set_bit(STRIPE_INSYNC, &sh->state); 4091 - else { 4091 + pr_warn_ratelimited("%s: mismatch sector in range " 4092 + "%llu-%llu\n", mdname(conf->mddev), 4093 + (unsigned long long) sh->sector, 4094 + (unsigned long long) sh->sector + 4095 + STRIPE_SECTORS); 4096 + } else { 4092 4097 sh->check_state = check_state_compute_run; 4093 4098 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4094 4099 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); ··· 4242 4237 } 4243 4238 } else { 4244 4239 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4245 - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4240 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4246 4241 /* don't try to repair!! */ 4247 4242 set_bit(STRIPE_INSYNC, &sh->state); 4248 - else { 4243 + pr_warn_ratelimited("%s: mismatch sector in range " 4244 + "%llu-%llu\n", mdname(conf->mddev), 4245 + (unsigned long long) sh->sector, 4246 + (unsigned long long) sh->sector + 4247 + STRIPE_SECTORS); 4248 + } else { 4249 4249 int *target = &sh->ops.target; 4250 4250 4251 4251 sh->ops.target = -1;
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
··· 493 493 } 494 494 495 495 static struct vdec_common_if vdec_h264_if = { 496 - vdec_h264_init, 497 - vdec_h264_decode, 498 - vdec_h264_get_param, 499 - vdec_h264_deinit, 496 + .init = vdec_h264_init, 497 + .decode = vdec_h264_decode, 498 + .get_param = vdec_h264_get_param, 499 + .deinit = vdec_h264_deinit, 500 500 }; 501 501 502 502 struct vdec_common_if *get_h264_dec_comm_if(void);
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
··· 620 620 } 621 621 622 622 static struct vdec_common_if vdec_vp8_if = { 623 - vdec_vp8_init, 624 - vdec_vp8_decode, 625 - vdec_vp8_get_param, 626 - vdec_vp8_deinit, 623 + .init = vdec_vp8_init, 624 + .decode = vdec_vp8_decode, 625 + .get_param = vdec_vp8_get_param, 626 + .deinit = vdec_vp8_deinit, 627 627 }; 628 628 629 629 struct vdec_common_if *get_vp8_dec_comm_if(void);
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
··· 979 979 } 980 980 981 981 static struct vdec_common_if vdec_vp9_if = { 982 - vdec_vp9_init, 983 - vdec_vp9_decode, 984 - vdec_vp9_get_param, 985 - vdec_vp9_deinit, 982 + .init = vdec_vp9_init, 983 + .decode = vdec_vp9_decode, 984 + .get_param = vdec_vp9_get_param, 985 + .deinit = vdec_vp9_deinit, 986 986 }; 987 987 988 988 struct vdec_common_if *get_vp9_dec_comm_if(void);
+11 -1
drivers/misc/sgi-xp/xp.h
··· 309 309 xpc_send(short partid, int ch_number, u32 flags, void *payload, 310 310 u16 payload_size) 311 311 { 312 + if (!xpc_interface.send) 313 + return xpNotLoaded; 314 + 312 315 return xpc_interface.send(partid, ch_number, flags, payload, 313 316 payload_size); 314 317 } ··· 320 317 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, 321 318 u16 payload_size, xpc_notify_func func, void *key) 322 319 { 320 + if (!xpc_interface.send_notify) 321 + return xpNotLoaded; 322 + 323 323 return xpc_interface.send_notify(partid, ch_number, flags, payload, 324 324 payload_size, func, key); 325 325 } ··· 330 324 static inline void 331 325 xpc_received(short partid, int ch_number, void *payload) 332 326 { 333 - return xpc_interface.received(partid, ch_number, payload); 327 + if (xpc_interface.received) 328 + xpc_interface.received(partid, ch_number, payload); 334 329 } 335 330 336 331 static inline enum xp_retval 337 332 xpc_partid_to_nasids(short partid, void *nasids) 338 333 { 334 + if (!xpc_interface.partid_to_nasids) 335 + return xpNotLoaded; 336 + 339 337 return xpc_interface.partid_to_nasids(partid, nasids); 340 338 } 341 339
+7 -29
drivers/misc/sgi-xp/xp_main.c
··· 69 69 EXPORT_SYMBOL_GPL(xpc_registrations); 70 70 71 71 /* 72 - * Initialize the XPC interface to indicate that XPC isn't loaded. 72 + * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. 73 73 */ 74 - static enum xp_retval 75 - xpc_notloaded(void) 76 - { 77 - return xpNotLoaded; 78 - } 79 - 80 - struct xpc_interface xpc_interface = { 81 - (void (*)(int))xpc_notloaded, 82 - (void (*)(int))xpc_notloaded, 83 - (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, 84 - (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, 85 - void *))xpc_notloaded, 86 - (void (*)(short, int, void *))xpc_notloaded, 87 - (enum xp_retval(*)(short, void *))xpc_notloaded 88 - }; 74 + struct xpc_interface xpc_interface = { }; 89 75 EXPORT_SYMBOL_GPL(xpc_interface); 90 76 91 77 /* ··· 101 115 void 102 116 xpc_clear_interface(void) 103 117 { 104 - xpc_interface.connect = (void (*)(int))xpc_notloaded; 105 - xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 106 - xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) 107 - xpc_notloaded; 108 - xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, 109 - u16, xpc_notify_func, 110 - void *))xpc_notloaded; 111 - xpc_interface.received = (void (*)(short, int, void *)) 112 - xpc_notloaded; 113 - xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) 114 - xpc_notloaded; 118 + memset(&xpc_interface, 0, sizeof(xpc_interface)); 115 119 } 116 120 EXPORT_SYMBOL_GPL(xpc_clear_interface); 117 121 ··· 164 188 165 189 mutex_unlock(&registration->mutex); 166 190 167 - xpc_interface.connect(ch_number); 191 + if (xpc_interface.connect) 192 + xpc_interface.connect(ch_number); 168 193 169 194 return xpSuccess; 170 195 } ··· 214 237 registration->assigned_limit = 0; 215 238 registration->idle_limit = 0; 216 239 217 - xpc_interface.disconnect(ch_number); 240 + if (xpc_interface.disconnect) 241 + xpc_interface.disconnect(ch_number); 218 242 219 243 mutex_unlock(&registration->mutex); 220 244
+35 -11
drivers/mtd/nand/nand_base.c
··· 202 202 return 0; 203 203 } 204 204 205 - const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 205 + static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 206 206 .ecc = nand_ooblayout_ecc_lp_hamming, 207 207 .free = nand_ooblayout_free_lp_hamming, 208 208 }; ··· 4361 4361 /* Initialize the ->data_interface field. */ 4362 4362 ret = nand_init_data_interface(chip); 4363 4363 if (ret) 4364 - return ret; 4364 + goto err_nand_init; 4365 4365 4366 4366 /* 4367 4367 * Setup the data interface correctly on the chip and controller side. ··· 4373 4373 */ 4374 4374 ret = nand_setup_data_interface(chip); 4375 4375 if (ret) 4376 - return ret; 4376 + goto err_nand_init; 4377 4377 4378 4378 nand_maf_id = chip->id.data[0]; 4379 4379 nand_dev_id = chip->id.data[1]; ··· 4404 4404 mtd->size = i * chip->chipsize; 4405 4405 4406 4406 return 0; 4407 + 4408 + err_nand_init: 4409 + /* Free manufacturer priv data. */ 4410 + nand_manufacturer_cleanup(chip); 4411 + 4412 + return ret; 4407 4413 } 4408 4414 EXPORT_SYMBOL(nand_scan_ident); 4409 4415 ··· 4580 4574 4581 4575 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 4582 4576 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 4583 - !(chip->bbt_options & NAND_BBT_USE_FLASH))) 4584 - return -EINVAL; 4577 + !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 4578 + ret = -EINVAL; 4579 + goto err_ident; 4580 + } 4585 4581 4586 4582 if (invalid_ecc_page_accessors(chip)) { 4587 4583 pr_err("Invalid ECC page accessors setup\n"); 4588 - return -EINVAL; 4584 + ret = -EINVAL; 4585 + goto err_ident; 4589 4586 } 4590 4587 4591 4588 if (!(chip->options & NAND_OWN_BUFFERS)) { 4592 4589 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); 4593 - if (!nbuf) 4594 - return -ENOMEM; 4590 + if (!nbuf) { 4591 + ret = -ENOMEM; 4592 + goto err_ident; 4593 + } 4595 4594 4596 4595 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); 4597 4596 if (!nbuf->ecccalc) { ··· 4619 4608 4620 4609 chip->buffers = nbuf; 4621 4610 } else { 4622 - if (!chip->buffers) 4623 - return -ENOMEM; 4611 + if (!chip->buffers) { 4612 + ret = -ENOMEM; 4613 + goto err_ident; 4614 + } 4624 4615 } 4625 4616 4626 4617 /* Set the internal oob buffer location, just after the page data */ ··· 4855 4842 return 0; 4856 4843 4857 4844 /* Build bad block table */ 4858 - return chip->scan_bbt(mtd); 4845 + ret = chip->scan_bbt(mtd); 4846 + if (ret) 4847 + goto err_free; 4848 + return 0; 4849 + 4859 4850 err_free: 4860 4851 if (nbuf) { 4861 4852 kfree(nbuf->databuf); ··· 4867 4850 kfree(nbuf->ecccalc); 4868 4851 kfree(nbuf); 4869 4852 } 4853 + 4854 + err_ident: 4855 + /* Clean up nand_scan_ident(). */ 4856 + 4857 + /* Free manufacturer priv data. */ 4858 + nand_manufacturer_cleanup(chip); 4859 + 4870 4860 return ret; 4871 4861 } 4872 4862 EXPORT_SYMBOL(nand_scan_tail);
-1
drivers/mtd/nand/nand_ids.c
··· 6 6 * published by the Free Software Foundation. 7 7 * 8 8 */ 9 - #include <linux/module.h> 10 9 #include <linux/mtd/nand.h> 11 10 #include <linux/sizes.h> 12 11
+3
drivers/mtd/nand/nand_samsung.c
··· 84 84 case 7: 85 85 chip->ecc_strength_ds = 60; 86 86 break; 87 + default: 88 + WARN(1, "Could not decode ECC info"); 89 + chip->ecc_step_ds = 0; 87 90 } 88 91 } 89 92 } else {
+16 -7
drivers/mtd/nand/tango_nand.c
··· 55 55 * byte 1 for other packets in the page (PKT_N, for N > 0) 56 56 * ERR_COUNT_PKT_N is the max error count over all but the first packet. 57 57 */ 58 - #define DECODE_OK_PKT_0(v) ((v) & BIT(7)) 59 - #define DECODE_OK_PKT_N(v) ((v) & BIT(15)) 60 58 #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) 61 59 #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) 60 + #define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0) 61 + #define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0) 62 62 63 63 /* Offsets relative to pbus_base */ 64 64 #define PBUS_CS_CTRL 0x83c ··· 193 193 chip->ecc.strength); 194 194 if (res < 0) 195 195 mtd->ecc_stats.failed++; 196 + else 197 + mtd->ecc_stats.corrected += res; 196 198 197 199 bitflips = max(res, bitflips); 198 200 buf += pkt_size; ··· 204 202 return bitflips; 205 203 } 206 204 207 - static int decode_error_report(struct tango_nfc *nfc) 205 + static int decode_error_report(struct nand_chip *chip) 208 206 { 209 207 u32 status, res; 208 + struct mtd_info *mtd = nand_to_mtd(chip); 209 + struct tango_nfc *nfc = to_tango_nfc(chip->controller); 210 210 211 211 status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); 212 212 if (status & PAGE_IS_EMPTY) ··· 216 212 217 213 res = readl_relaxed(nfc->mem_base + ERROR_REPORT); 218 214 219 - if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) 220 - return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); 215 + if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res)) 216 + return -EBADMSG; 221 217 222 - return -EBADMSG; 218 + /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */ 219 + mtd->ecc_stats.corrected += 220 + ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res); 221 + 222 + return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); 223 223 } 224 224 225 225 static void tango_dma_callback(void *arg) ··· 290 282 if (err) 291 283 return err; 292 284 293 - res = decode_error_report(nfc); 285 + res = decode_error_report(chip); 294 286 if (res < 0) { 295 287 chip->ecc.read_oob_raw(mtd, chip, page); 296 288 res = check_erased_page(chip, buf); ··· 671 663 { .compatible = "sigma,smp8758-nand" }, 672 664 { /* sentinel */ } 673 665 }; 666 + MODULE_DEVICE_TABLE(of, tango_nand_ids); 674 667 675 668 static struct platform_driver tango_nand_driver = { 676 669 .probe = tango_nand_probe,
+11
drivers/perf/arm_pmu_acpi.c
··· 29 29 return -EINVAL; 30 30 31 31 gsi = gicc->performance_interrupt; 32 + 33 + /* 34 + * Per the ACPI spec, the MADT cannot describe a PMU that doesn't 35 + * have an interrupt. QEMU advertises this by using a GSI of zero, 36 + * which is not known to be valid on any hardware despite being 37 + * valid per the spec. Take the pragmatic approach and reject a 38 + * GSI of zero for now. 39 + */ 40 + if (!gsi) 41 + return 0; 42 + 32 43 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) 33 44 trigger = ACPI_EDGE_SENSITIVE; 34 45 else
+3 -17
drivers/pinctrl/core.c
··· 680 680 * pinctrl_generic_free_groups() - removes all pin groups 681 681 * @pctldev: pin controller device 682 682 * 683 - * Note that the caller must take care of locking. 683 + * Note that the caller must take care of locking. The pinctrl groups 684 + * are allocated with devm_kzalloc() so no need to free them here. 684 685 */ 685 686 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) 686 687 { 687 688 struct radix_tree_iter iter; 688 - struct group_desc *group; 689 - unsigned long *indices; 690 689 void **slot; 691 - int i = 0; 692 - 693 - indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * 694 - pctldev->num_groups, GFP_KERNEL); 695 - if (!indices) 696 - return; 697 690 698 691 radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) 699 - indices[i++] = iter.index; 700 - 701 - for (i = 0; i < pctldev->num_groups; i++) { 702 - group = radix_tree_lookup(&pctldev->pin_group_tree, 703 - indices[i]); 704 - radix_tree_delete(&pctldev->pin_group_tree, indices[i]); 705 - devm_kfree(pctldev->dev, group); 706 - } 692 + radix_tree_delete(&pctldev->pin_group_tree, iter.index); 707 693 708 694 pctldev->num_groups = 0; 709 695 }
+12 -4
drivers/pinctrl/freescale/pinctrl-mxs.c
··· 194 194 return 0; 195 195 } 196 196 197 + static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) 198 + { 199 + u32 tmp; 200 + 201 + tmp = readl(reg); 202 + tmp &= ~(mask << shift); 203 + tmp |= value << shift; 204 + writel(tmp, reg); 205 + } 206 + 197 207 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, 198 208 unsigned group) 199 209 { ··· 221 211 reg += bank * 0x20 + pin / 16 * 0x10; 222 212 shift = pin % 16 * 2; 223 213 224 - writel(0x3 << shift, reg + CLR); 225 - writel(g->muxsel[i] << shift, reg + SET); 214 + mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); 226 215 } 227 216 228 217 return 0; ··· 288 279 /* mA */ 289 280 if (config & MA_PRESENT) { 290 281 shift = pin % 8 * 4; 291 - writel(0x3 << shift, reg + CLR); 292 - writel(ma << shift, reg + SET); 282 + mxs_pinctrl_rmwl(ma, 0x3, shift, reg); 293 283 } 294 284 295 285 /* vol */
+19 -5
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1539 1539 * is not listed below. 1540 1540 */ 1541 1541 static const struct dmi_system_id chv_no_valid_mask[] = { 1542 + /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ 1542 1543 { 1543 - /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ 1544 - .ident = "Acer Chromebook (CYAN)", 1544 + .ident = "Intel_Strago based Chromebooks (All models)", 1545 1545 .matches = { 1546 1546 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1547 - DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), 1548 - DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), 1547 + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1549 1548 }, 1550 - } 1549 + }, 1550 + { 1551 + .ident = "Acer Chromebook R11 (Cyan)", 1552 + .matches = { 1553 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1554 + DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1555 + }, 1556 + }, 1557 + { 1558 + .ident = "Samsung Chromebook 3 (Celes)", 1559 + .matches = { 1560 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1561 + DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1562 + }, 1563 + }, 1564 + {} 1551 1565 }; 1552 1566 1553 1567 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
-3
drivers/pinctrl/pinconf-generic.c
··· 35 35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 36 36 "input bias pull to pin specific state", NULL, false), 37 37 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), 38 - PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false), 39 38 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), 40 39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), 41 40 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), ··· 160 161 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, 161 162 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, 162 163 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, 163 - { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 }, 164 164 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, 165 165 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, 166 166 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, ··· 172 174 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, 173 175 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, 174 176 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, 175 - { "output-enable", PIN_CONFIG_OUTPUT, 1, }, 176 177 { "output-high", PIN_CONFIG_OUTPUT, 1, }, 177 178 { "output-low", PIN_CONFIG_OUTPUT, 0, }, 178 179 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
+4 -17
drivers/pinctrl/pinmux.c
··· 826 826 * pinmux_generic_free_functions() - removes all functions 827 827 * @pctldev: pin controller device 828 828 * 829 - * Note that the caller must take care of locking. 829 + * Note that the caller must take care of locking. The pinctrl 830 + * functions are allocated with devm_kzalloc() so no need to free 831 + * them here. 830 832 */ 831 833 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) 832 834 { 833 835 struct radix_tree_iter iter; 834 - struct function_desc *function; 835 - unsigned long *indices; 836 836 void **slot; 837 - int i = 0; 838 - 839 - indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * 840 - pctldev->num_functions, GFP_KERNEL); 841 - if (!indices) 842 - return; 843 837 844 838 radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) 845 - indices[i++] = iter.index; 846 - 847 - for (i = 0; i < pctldev->num_functions; i++) { 848 - function = radix_tree_lookup(&pctldev->pin_function_tree, 849 - indices[i]); 850 - radix_tree_delete(&pctldev->pin_function_tree, indices[i]); 851 - devm_kfree(pctldev->dev, function); 852 - } 839 + radix_tree_delete(&pctldev->pin_function_tree, iter.index); 853 840 854 841 pctldev->num_functions = 0; 855 842 }
+1 -1
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
··· 394 394 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), 395 395 SUNXI_FUNCTION(0x0, "gpio_in"), 396 396 SUNXI_FUNCTION(0x1, "gpio_out"), 397 - SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ 397 + SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ 398 398 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), 399 399 SUNXI_FUNCTION(0x0, "gpio_in"), 400 400 SUNXI_FUNCTION(0x1, "gpio_out")),
+18 -7
drivers/scsi/cxgbi/libcxgbi.c
··· 1873 1873 tcp_task->dd_data = tdata; 1874 1874 task->hdr = NULL; 1875 1875 1876 + if (tdata->skb) { 1877 + kfree_skb(tdata->skb); 1878 + tdata->skb = NULL; 1879 + } 1880 + 1876 1881 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1877 1882 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1878 1883 (opcode == ISCSI_OP_SCSI_CMD && ··· 1895 1890 return -ENOMEM; 1896 1891 } 1897 1892 1893 + skb_get(tdata->skb); 1898 1894 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1899 1895 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1900 1896 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ ··· 2041 2035 unsigned int datalen; 2042 2036 int err; 2043 2037 2044 - if (!skb) { 2038 + if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { 2045 2039 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2046 - "task 0x%p, skb NULL.\n", task); 2040 + "task 0x%p, skb 0x%p\n", task, skb); 2047 2041 return 0; 2048 2042 } 2049 2043 ··· 2056 2050 } 2057 2051 2058 2052 datalen = skb->data_len; 2059 - tdata->skb = NULL; 2060 2053 2061 2054 /* write ppod first if using ofldq to write ppod */ 2062 2055 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { ··· 2083 2078 pdulen += ISCSI_DIGEST_SIZE; 2084 2079 2085 2080 task->conn->txdata_octets += pdulen; 2081 + cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); 2086 2082 return 0; 2087 2083 } 2088 2084 ··· 2092 2086 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2093 2087 task, skb, skb->len, skb->data_len, err); 2094 2088 /* reset skb to send when we are called again */ 2095 - tdata->skb = skb; 2096 2089 return err; 2097 2090 } 2098 2091 ··· 2099 2094 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2100 2095 task->itt, skb, skb->len, skb->data_len, err); 2101 2096 2102 - kfree_skb(skb); 2097 + __kfree_skb(tdata->skb); 2098 + tdata->skb = NULL; 2103 2099 2104 2100 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2105 2101 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); ··· 2119 2113 2120 2114 tcp_task->dd_data = NULL; 2121 2115 /* never reached the xmit task callout */ 2122 - if (tdata->skb) 2123 - __kfree_skb(tdata->skb); 2116 + if (tdata->skb) { 2117 + kfree_skb(tdata->skb); 2118 + tdata->skb = NULL; 2119 + } 2124 2120 2125 2121 task_release_itt(task, task->hdr_itt); 2126 2122 memset(tdata, 0, sizeof(*tdata)); ··· 2722 2714 static int __init libcxgbi_init_module(void) 2723 2715 { 2724 2716 pr_info("%s", version); 2717 + 2718 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 2719 + sizeof(struct cxgbi_skb_cb)); 2725 2720 return 0; 2726 2721 } 2727 2722
+8 -8
drivers/scsi/cxgbi/libcxgbi.h
··· 195 195 }; 196 196 197 197 struct cxgbi_skb_tx_cb { 198 - void *l2t; 198 + void *handle; 199 + void *arp_err_handler; 199 200 struct sk_buff *wr_next; 200 201 }; 201 202 ··· 204 203 SKCBF_TX_NEED_HDR, /* packet needs a header */ 205 204 SKCBF_TX_MEM_WRITE, /* memory write */ 206 205 SKCBF_TX_FLAG_COMPL, /* wr completion flag */ 206 + SKCBF_TX_DONE, /* skb tx done */ 207 207 SKCBF_RX_COALESCED, /* received whole pdu */ 208 208 SKCBF_RX_HDR, /* received pdu header */ 209 209 SKCBF_RX_DATA, /* received pdu payload */ ··· 217 215 }; 218 216 219 217 struct cxgbi_skb_cb { 220 - unsigned char ulp_mode; 221 - unsigned long flags; 222 - unsigned int seq; 223 218 union { 224 219 struct cxgbi_skb_rx_cb rx; 225 220 struct cxgbi_skb_tx_cb tx; 226 221 }; 222 + unsigned char ulp_mode; 223 + unsigned long flags; 224 + unsigned int seq; 227 225 }; 228 226 229 227 #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) ··· 376 374 cxgbi_skcb_tx_wr_next(skb) = NULL; 377 375 /* 378 376 * We want to take an extra reference since both us and the driver 379 - * need to free the packet before it's really freed. We know there's 380 - * just one user currently so we use atomic_set rather than skb_get 381 - * to avoid the atomic op. 377 + * need to free the packet before it's really freed. 382 378 */ 383 - atomic_set(&skb->users, 2); 379 + skb_get(skb); 384 380 385 381 if (!csk->wr_pending_head) 386 382 csk->wr_pending_head = skb;
+4 -6
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 265 265 struct list_head *list, 266 266 unsigned char *cdb) 267 267 { 268 - struct scsi_device *sdev = ctlr->ms_sdev; 269 - struct rdac_dh_data *h = sdev->handler_data; 270 268 struct rdac_mode_common *common; 271 269 unsigned data_size; 272 270 struct rdac_queue_data *qdata; 273 271 u8 *lun_table; 274 272 275 - if (h->ctlr->use_ms10) { 273 + if (ctlr->use_ms10) { 276 274 struct rdac_pg_expanded *rdac_pg; 277 275 278 276 data_size = sizeof(struct rdac_pg_expanded); 279 - rdac_pg = &h->ctlr->mode_select.expanded; 277 + rdac_pg = &ctlr->mode_select.expanded; 280 278 memset(rdac_pg, 0, data_size); 281 279 common = &rdac_pg->common; 282 280 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; ··· 286 288 struct rdac_pg_legacy *rdac_pg; 287 289 288 290 data_size = sizeof(struct rdac_pg_legacy); 289 - rdac_pg = &h->ctlr->mode_select.legacy; 291 + rdac_pg = &ctlr->mode_select.legacy; 290 292 memset(rdac_pg, 0, data_size); 291 293 common = &rdac_pg->common; 292 294 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; ··· 302 304 } 303 305 304 306 /* Prepare the command. */ 305 - if (h->ctlr->use_ms10) { 307 + if (ctlr->use_ms10) { 306 308 cdb[0] = MODE_SELECT_10; 307 309 cdb[7] = data_size >> 8; 308 310 cdb[8] = data_size & 0xff;
+23 -4
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 1170 1170 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1171 1171 struct ibmvscsis_cmd, list); 1172 1172 if (cmd) { 1173 + if (cmd->abort_cmd) 1174 + cmd->abort_cmd = NULL; 1173 1175 cmd->flags &= ~(DELAY_SEND); 1174 1176 list_del(&cmd->list); 1175 1177 cmd->iue = iue; ··· 1776 1774 if (cmd->abort_cmd) { 1777 1775 retry = true; 1778 1776 cmd->abort_cmd->flags &= ~(DELAY_SEND); 1777 + cmd->abort_cmd = NULL; 1779 1778 } 1780 1779 1781 1780 /* ··· 1791 1788 list_del(&cmd->list); 1792 1789 ibmvscsis_free_cmd_resources(vscsi, 1793 1790 cmd); 1791 + /* 1792 + * With a successfully aborted op 1793 + * through LIO we want to increment the 1794 + * the vscsi credit so that when we dont 1795 + * send a rsp to the original scsi abort 1796 + * op (h_send_crq), but the tm rsp to 1797 + * the abort is sent, the credit is 1798 + * correctly sent with the abort tm rsp. 1799 + * We would need 1 for the abort tm rsp 1800 + * and 1 credit for the aborted scsi op. 1801 + * Thus we need to increment here. 1802 + * Also we want to increment the credit 1803 + * here because we want to make sure 1804 + * cmd is actually released first 1805 + * otherwise the client will think it 1806 + * it can send a new cmd, and we could 1807 + * find ourselves short of cmd elements. 1808 + */ 1809 + vscsi->credit += 1; 1794 1810 } else { 1795 1811 iue = cmd->iue; 1796 1812 ··· 2984 2962 2985 2963 rsp->opcode = SRP_RSP; 2986 2964 2987 - if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 2988 - rsp->req_lim_delta = cpu_to_be32(vscsi->credit); 2989 - else 2990 - rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2965 + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2991 2966 rsp->tag = cmd->rsp.tag; 2992 2967 rsp->flags = 0; 2993 2968
+2 -1
drivers/scsi/qedi/qedi.h
··· 38 38 #define QEDI_MAX_ISCSI_TASK 4096 39 39 #define QEDI_MAX_TASK_NUM 0x0FFF 40 40 #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 41 - #define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ 41 + #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ 42 42 #define MAX_OUSTANDING_TASKS_PER_CON 1024 43 43 44 44 #define QEDI_MAX_BD_LEN 0xffff ··· 63 63 #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) 64 64 65 65 #define QEDI_PAGE_SIZE 4096 66 + #define QEDI_HW_DMA_BOUNDARY 0xfff 66 67 #define QEDI_PATH_HANDLE 0xFE0000000UL 67 68 68 69 struct qedi_uio_ctrl {
+2
drivers/scsi/qedi/qedi_fw.c
··· 1494 1494 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1495 1495 qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1496 1496 ep = qedi_conn->ep; 1497 + if (!ep) 1498 + return -ENODEV; 1497 1499 1498 1500 tid = qedi_get_task_idx(qedi); 1499 1501 if (tid == -1)
+6 -1
drivers/scsi/qedi/qedi_iscsi.c
··· 59 59 .this_id = -1, 60 60 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, 61 61 .max_sectors = 0xffff, 62 + .dma_boundary = QEDI_HW_DMA_BOUNDARY, 62 63 .cmd_per_lun = 128, 63 64 .use_clustering = ENABLE_CLUSTERING, 64 65 .shost_attrs = qedi_shost_attrs, ··· 1224 1223 1225 1224 iscsi_cid = (u32)path_data->handle; 1226 1225 qedi_ep = qedi->ep_tbl[iscsi_cid]; 1227 - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1226 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1228 1227 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); 1228 + if (!qedi_ep) { 1229 + ret = -EINVAL; 1230 + goto set_path_exit; 1231 + } 1229 1232 1230 1233 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1231 1234 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+13 -12
drivers/scsi/qedi/qedi_main.c
··· 151 151 152 152 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) 153 153 { 154 + if (udev->uctrl) { 155 + free_page((unsigned long)udev->uctrl); 156 + udev->uctrl = NULL; 157 + } 158 + 154 159 if (udev->ll2_ring) { 155 160 free_page((unsigned long)udev->ll2_ring); 156 161 udev->ll2_ring = NULL; ··· 174 169 __qedi_free_uio_rings(udev); 175 170 176 171 pci_dev_put(udev->pdev); 177 - kfree(udev->uctrl); 178 172 kfree(udev); 179 173 } 180 174 ··· 212 208 if (udev->ll2_ring || udev->ll2_buf) 213 209 return rc; 214 210 211 + /* Memory for control area. */ 212 + udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); 213 + if (!udev->uctrl) 214 + return -ENOMEM; 215 + 215 216 /* Allocating memory for LL2 ring */ 216 217 udev->ll2_ring_size = QEDI_PAGE_SIZE; 217 218 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); ··· 246 237 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) 247 238 { 248 239 struct qedi_uio_dev *udev = NULL; 249 - struct qedi_uio_ctrl *uctrl = NULL; 250 240 int rc = 0; 251 241 252 242 list_for_each_entry(udev, &qedi_udev_list, list) { ··· 266 258 goto err_udev; 267 259 } 268 260 269 - uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); 270 - if (!uctrl) { 271 - rc = -ENOMEM; 272 - goto err_uctrl; 273 - } 274 - 275 261 udev->uio_dev = -1; 276 262 277 263 udev->qedi = qedi; 278 264 udev->pdev = qedi->pdev; 279 - udev->uctrl = uctrl; 280 265 281 266 rc = __qedi_alloc_uio_rings(udev); 282 267 if (rc) 283 - goto err_uio_rings; 268 + goto err_uctrl; 284 269 285 270 list_add(&udev->list, &qedi_udev_list); 286 271 ··· 284 283 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 285 284 return 0; 286 285 287 - err_uio_rings: 288 - kfree(uctrl); 289 286 err_uctrl: 290 287 kfree(udev); 291 288 err_udev: ··· 827 828 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; 828 829 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; 829 830 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; 831 + qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; 832 + qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; 830 833 831 834 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 832 835 if ((1 << log_page_size) == PAGE_SIZE)
+24 -6
drivers/target/iscsi/iscsi_target.c
··· 3790 3790 { 3791 3791 int ret = 0; 3792 3792 struct iscsi_conn *conn = arg; 3793 + bool conn_freed = false; 3794 + 3793 3795 /* 3794 3796 * Allow ourselves to be interrupted by SIGINT so that a 3795 3797 * connection recovery / failure event can be triggered externally. ··· 3817 3815 goto transport_err; 3818 3816 3819 3817 ret = iscsit_handle_response_queue(conn); 3820 - if (ret == 1) 3818 + if (ret == 1) { 3821 3819 goto get_immediate; 3822 - else if (ret == -ECONNRESET) 3820 + } else if (ret == -ECONNRESET) { 3821 + conn_freed = true; 3823 3822 goto out; 3824 - else if (ret < 0) 3823 + } else if (ret < 0) { 3825 3824 goto transport_err; 3825 + } 3826 3826 } 3827 3827 3828 3828 transport_err: ··· 3834 3830 * responsible for cleaning up the early connection failure. 3835 3831 */ 3836 3832 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3837 - iscsit_take_action_for_connection_exit(conn); 3833 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 3838 3834 out: 3835 + if (!conn_freed) { 3836 + while (!kthread_should_stop()) { 3837 + msleep(100); 3838 + } 3839 + } 3839 3840 return 0; 3840 3841 } 3841 3842 ··· 4013 4004 { 4014 4005 int rc; 4015 4006 struct iscsi_conn *conn = arg; 4007 + bool conn_freed = false; 4016 4008 4017 4009 /* 4018 4010 * Allow ourselves to be interrupted by SIGINT so that a ··· 4026 4016 */ 4027 4017 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4028 4018 if (rc < 0 || iscsi_target_check_conn_state(conn)) 4029 - return 0; 4019 + goto out; 4030 4020 4031 4021 if (!conn->conn_transport->iscsit_get_rx_pdu) 4032 4022 return 0; ··· 4035 4025 4036 4026 if (!signal_pending(current)) 4037 4027 atomic_set(&conn->transport_failed, 1); 4038 - iscsit_take_action_for_connection_exit(conn); 4028 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 4029 + 4030 + out: 4031 + if (!conn_freed) { 4032 + while (!kthread_should_stop()) { 4033 + msleep(100); 4034 + } 4035 + } 4036 + 4039 4037 return 0; 4040 4038 } 4041 4039
+5 -1
drivers/target/iscsi/iscsi_target_erl0.c
··· 930 930 } 931 931 } 932 932 933 - void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 933 + void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) 934 934 { 935 + *conn_freed = false; 936 + 935 937 spin_lock_bh(&conn->state_lock); 936 938 if (atomic_read(&conn->connection_exit)) { 937 939 spin_unlock_bh(&conn->state_lock); ··· 944 942 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 945 943 spin_unlock_bh(&conn->state_lock); 946 944 iscsit_close_connection(conn); 945 + *conn_freed = true; 947 946 return; 948 947 } 949 948 ··· 958 955 spin_unlock_bh(&conn->state_lock); 959 956 960 957 iscsit_handle_connection_cleanup(conn); 958 + *conn_freed = true; 961 959 }
+1 -1
drivers/target/iscsi/iscsi_target_erl0.h
··· 15 15 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); 16 16 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); 17 17 extern void iscsit_fall_back_to_erl0(struct iscsi_session *); 18 - extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); 18 + extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); 19 19 20 20 #endif /*** ISCSI_TARGET_ERL0_H ***/
+4
drivers/target/iscsi/iscsi_target_login.c
··· 1464 1464 break; 1465 1465 } 1466 1466 1467 + while (!kthread_should_stop()) { 1468 + msleep(100); 1469 + } 1470 + 1467 1471 return 0; 1468 1472 }
+133 -63
drivers/target/iscsi/iscsi_target_nego.c
··· 493 493 494 494 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); 495 495 496 - static bool iscsi_target_sk_state_check(struct sock *sk) 496 + static bool __iscsi_target_sk_check_close(struct sock *sk) 497 497 { 498 498 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { 499 - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," 499 + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," 500 500 "returning FALSE\n"); 501 - return false; 501 + return true; 502 502 } 503 - return true; 503 + return false; 504 + } 505 + 506 + static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) 507 + { 508 + bool state = false; 509 + 510 + if (conn->sock) { 511 + struct sock *sk = conn->sock->sk; 512 + 513 + read_lock_bh(&sk->sk_callback_lock); 514 + state = (__iscsi_target_sk_check_close(sk) || 515 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 516 + read_unlock_bh(&sk->sk_callback_lock); 517 + } 518 + return state; 519 + } 520 + 521 + static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) 522 + { 523 + bool state = false; 524 + 525 + if (conn->sock) { 526 + struct sock *sk = conn->sock->sk; 527 + 528 + read_lock_bh(&sk->sk_callback_lock); 529 + state = test_bit(flag, &conn->login_flags); 530 + read_unlock_bh(&sk->sk_callback_lock); 531 + } 532 + return state; 533 + } 534 + 535 + static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) 536 + { 537 + bool state = false; 538 + 539 + if (conn->sock) { 540 + struct sock *sk = conn->sock->sk; 541 + 542 + write_lock_bh(&sk->sk_callback_lock); 543 + state = (__iscsi_target_sk_check_close(sk) || 544 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 545 + if (!state) 546 + clear_bit(flag, &conn->login_flags); 547 + write_unlock_bh(&sk->sk_callback_lock); 548 + } 549 + return state; 504 550 } 505 551 506 552 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) ··· 586 540 587 541 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 588 542 conn, current->comm, current->pid); 543 + /* 544 + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() 545 + * before initial PDU processing in iscsi_target_start_negotiation() 546 + * has completed, go ahead and retry until it's cleared. 547 + * 548 + * Otherwise if the TCP connection drops while this is occuring, 549 + * iscsi_target_start_negotiation() will detect the failure, call 550 + * cancel_delayed_work_sync(&conn->login_work), and cleanup the 551 + * remaining iscsi connection resources from iscsi_np process context. 552 + */ 553 + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { 554 + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); 555 + return; 556 + } 589 557 590 558 spin_lock(&tpg->tpg_state_lock); 591 559 state = (tpg->tpg_state == TPG_STATE_ACTIVE); ··· 607 547 608 548 if (!state) { 609 549 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 610 - iscsi_target_restore_sock_callbacks(conn); 611 - iscsi_target_login_drop(conn, login); 612 - iscsit_deaccess_np(np, tpg, tpg_np); 613 - return; 550 + goto err; 614 551 } 615 552 616 - if (conn->sock) { 617 - struct sock *sk = conn->sock->sk; 618 - 619 - read_lock_bh(&sk->sk_callback_lock); 620 - state = iscsi_target_sk_state_check(sk); 621 - read_unlock_bh(&sk->sk_callback_lock); 622 - 623 - if (!state) { 624 - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 625 - iscsi_target_restore_sock_callbacks(conn); 626 - iscsi_target_login_drop(conn, login); 627 - iscsit_deaccess_np(np, tpg, tpg_np); 628 - return; 629 - } 553 + if (iscsi_target_sk_check_close(conn)) { 554 + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 555 + goto err; 630 556 } 631 557 632 558 conn->login_kworker = current; ··· 630 584 flush_signals(current); 631 585 conn->login_kworker = NULL; 632 586 633 - if (rc < 0) { 634 - iscsi_target_restore_sock_callbacks(conn); 635 - iscsi_target_login_drop(conn, login); 636 - iscsit_deaccess_np(np, tpg, tpg_np); 637 - return; 638 - } 587 + if (rc < 0) 588 + goto err; 639 589 640 590 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", 641 591 conn, current->comm, current->pid); 642 592 643 593 rc = iscsi_target_do_login(conn, login); 644 594 if (rc < 0) { 645 - iscsi_target_restore_sock_callbacks(conn); 646 - iscsi_target_login_drop(conn, login); 647 - iscsit_deaccess_np(np, tpg, tpg_np); 595 + goto err; 648 596 } else if (!rc) { 649 - if (conn->sock) { 650 - struct sock *sk = conn->sock->sk; 651 - 652 - write_lock_bh(&sk->sk_callback_lock); 653 - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); 654 - write_unlock_bh(&sk->sk_callback_lock); 655 - } 597 + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) 598 + goto err; 656 599 } else if (rc == 1) { 657 600 iscsi_target_nego_release(conn); 658 601 iscsi_post_login_handler(np, conn, zero_tsih); 659 602 iscsit_deaccess_np(np, tpg, tpg_np); 660 603 } 604 + return; 605 + 606 + err: 607 + iscsi_target_restore_sock_callbacks(conn); 608 + iscsi_target_login_drop(conn, login); 609 + iscsit_deaccess_np(np, tpg, tpg_np); 661 610 } 662 611 663 612 static void iscsi_target_do_cleanup(struct work_struct *work) ··· 700 659 orig_state_change(sk); 701 660 return; 702 661 } 662 + state = __iscsi_target_sk_check_close(sk); 663 + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); 664 + 703 665 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 704 666 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" 705 667 " conn: %p\n", conn); 668 + if (state) 669 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 706 670 write_unlock_bh(&sk->sk_callback_lock); 707 671 orig_state_change(sk); 708 672 return; 709 673 } 710 - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 674 + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 711 675 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", 712 676 conn); 713 677 write_unlock_bh(&sk->sk_callback_lock); 714 678 orig_state_change(sk); 715 679 return; 716 680 } 717 - 718 - state = iscsi_target_sk_state_check(sk); 719 - write_unlock_bh(&sk->sk_callback_lock); 720 - 721 - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); 722 - 723 - if (!state) { 681 + /* 682 + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, 683 + * but only queue conn->login_work -> iscsi_target_do_login_rx() 684 + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. 685 + * 686 + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() 687 + * will detect the dropped TCP connection from delayed workqueue context. 688 + * 689 + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial 690 + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() 691 + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() 692 + * via iscsi_target_sk_check_and_clear() is responsible for detecting the 693 + * dropped TCP connection in iscsi_np process context, and cleaning up 694 + * the remaining iscsi connection resources. 695 + */ 696 + if (state) { 724 697 pr_debug("iscsi_target_sk_state_change got failed state\n"); 725 - schedule_delayed_work(&conn->login_cleanup_work, 0); 698 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 699 + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 700 + write_unlock_bh(&sk->sk_callback_lock); 701 + 702 + orig_state_change(sk); 703 + 704 + if (!state) 705 + schedule_delayed_work(&conn->login_work, 0); 726 706 return; 727 707 } 708 + write_unlock_bh(&sk->sk_callback_lock); 709 + 728 710 orig_state_change(sk); 729 711 } 730 712 ··· 1010 946 if (iscsi_target_handle_csg_one(conn, login) < 0) 1011 947 return -1; 1012 948 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 949 + /* 950 + * Check to make sure the TCP connection has not 951 + * dropped asynchronously while session reinstatement 952 + * was occuring in this kthread context, before 953 + * transitioning to full feature phase operation. 954 + */ 955 + if (iscsi_target_sk_check_close(conn)) 956 + return -1; 957 + 1013 958 login->tsih = conn->sess->tsih; 1014 959 login->login_complete = 1; 1015 960 iscsi_target_restore_sock_callbacks(conn); ··· 1043 970 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 1044 971 } 1045 972 break; 1046 - } 1047 - 1048 - if (conn->sock) { 1049 - struct sock *sk = conn->sock->sk; 1050 - bool state; 1051 - 1052 - read_lock_bh(&sk->sk_callback_lock); 1053 - state = iscsi_target_sk_state_check(sk); 1054 - read_unlock_bh(&sk->sk_callback_lock); 1055 - 1056 - if (!state) { 1057 - pr_debug("iscsi_target_do_login() failed state for" 1058 - " conn: %p\n", conn); 1059 - return -1; 1060 - } 1061 973 } 1062 974 1063 975 return 0; ··· 1313 1255 1314 1256 write_lock_bh(&sk->sk_callback_lock); 1315 1257 set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 1258 + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 1316 1259 write_unlock_bh(&sk->sk_callback_lock); 1317 1260 } 1318 - 1261 + /* 1262 + * If iscsi_target_do_login returns zero to signal more PDU 1263 + * exchanges are required to complete the login, go ahead and 1264 + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection 1265 + * is still active. 1266 + * 1267 + * Otherwise if TCP connection dropped asynchronously, go ahead 1268 + * and perform connection cleanup now. 1269 + */ 1319 1270 ret = iscsi_target_do_login(conn, login); 1271 + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) 1272 + ret = -1; 1273 + 1320 1274 if (ret < 0) { 1321 1275 cancel_delayed_work_sync(&conn->login_work); 1322 1276 cancel_delayed_work_sync(&conn->login_cleanup_work);
+18 -5
drivers/target/target_core_transport.c
··· 1160 1160 if (cmd->unknown_data_length) { 1161 1161 cmd->data_length = size; 1162 1162 } else if (size != cmd->data_length) { 1163 - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1163 + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1164 1164 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1165 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1166 1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1167 1167 1168 - if (cmd->data_direction == DMA_TO_DEVICE && 1169 - cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1170 - pr_err("Rejecting underflow/overflow WRITE data\n"); 1171 - return TCM_INVALID_CDB_FIELD; 1168 + if (cmd->data_direction == DMA_TO_DEVICE) { 1169 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1170 + pr_err_ratelimited("Rejecting underflow/overflow" 1171 + " for WRITE data CDB\n"); 1172 + return TCM_INVALID_CDB_FIELD; 1173 + } 1174 + /* 1175 + * Some fabric drivers like iscsi-target still expect to 1176 + * always reject overflow writes. Reject this case until 1177 + * full fabric driver level support for overflow writes 1178 + * is introduced tree-wide. 1179 + */ 1180 + if (size > cmd->data_length) { 1181 + pr_err_ratelimited("Rejecting overflow for" 1182 + " WRITE control CDB\n"); 1183 + return TCM_INVALID_CDB_FIELD; 1184 + } 1172 1185 } 1173 1186 /* 1174 1187 * Reject READ_* or WRITE_* with overflow/underflow for
+33 -13
drivers/target/target_core_user.c
··· 97 97 98 98 struct tcmu_dev { 99 99 struct list_head node; 100 - 100 + struct kref kref; 101 101 struct se_device se_dev; 102 102 103 103 char *name; ··· 969 969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 970 970 if (!udev) 971 971 return NULL; 972 + kref_init(&udev->kref); 972 973 973 974 udev->name = kstrdup(name, GFP_KERNEL); 974 975 if (!udev->name) { ··· 1146 1145 return 0; 1147 1146 } 1148 1147 1148 + static void tcmu_dev_call_rcu(struct rcu_head *p) 1149 + { 1150 + struct se_device *dev = container_of(p, struct se_device, rcu_head); 1151 + struct tcmu_dev *udev = TCMU_DEV(dev); 1152 + 1153 + kfree(udev->uio_info.name); 1154 + kfree(udev->name); 1155 + kfree(udev); 1156 + } 1157 + 1158 + static void tcmu_dev_kref_release(struct kref *kref) 1159 + { 1160 + struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1161 + struct se_device *dev = &udev->se_dev; 1162 + 1163 + call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1164 + } 1165 + 1149 1166 static int tcmu_release(struct uio_info *info, struct inode *inode) 1150 1167 { 1151 1168 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); ··· 1171 1152 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1172 1153 1173 1154 pr_debug("close\n"); 1174 - 1155 + /* release ref from configure */ 1156 + kref_put(&udev->kref, tcmu_dev_kref_release); 1175 1157 return 0; 1176 1158 } 1177 1159 ··· 1292 1272 dev->dev_attrib.hw_max_sectors = 128; 1293 1273 dev->dev_attrib.hw_queue_depth = 128; 1294 1274 1275 + /* 1276 + * Get a ref incase userspace does a close on the uio device before 1277 + * LIO has initiated tcmu_free_device. 1278 + */ 1279 + kref_get(&udev->kref); 1280 + 1295 1281 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1296 1282 udev->uio_info.uio_dev->minor); 1297 1283 if (ret) ··· 1310 1284 return 0; 1311 1285 1312 1286 err_netlink: 1287 + kref_put(&udev->kref, tcmu_dev_kref_release); 1313 1288 uio_unregister_device(&udev->uio_info); 1314 1289 err_register: 1315 1290 vfree(udev->mb_addr); 1316 1291 err_vzalloc: 1317 1292 kfree(info->name); 1293 + info->name = NULL; 1318 1294 1319 1295 return ret; 1320 1296 } ··· 1328 1300 return 0; 1329 1301 } 1330 1302 return -EINVAL; 1331 - } 1332 - 1333 - static void tcmu_dev_call_rcu(struct rcu_head *p) 1334 - { 1335 - struct se_device *dev = container_of(p, struct se_device, rcu_head); 1336 - struct tcmu_dev *udev = TCMU_DEV(dev); 1337 - 1338 - kfree(udev); 1339 1303 } 1340 1304 1341 1305 static bool tcmu_dev_configured(struct tcmu_dev *udev) ··· 1384 1364 udev->uio_info.uio_dev->minor); 1385 1365 1386 1366 uio_unregister_device(&udev->uio_info); 1387 - kfree(udev->uio_info.name); 1388 - kfree(udev->name); 1389 1367 } 1390 - call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1368 + 1369 + /* release ref from init */ 1370 + kref_put(&udev->kref, tcmu_dev_kref_release); 1391 1371 } 1392 1372 1393 1373 enum {
-2
drivers/tty/tty_port.c
··· 34 34 if (!disc) 35 35 return 0; 36 36 37 - mutex_lock(&tty->atomic_write_lock); 38 37 ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); 39 - mutex_unlock(&tty->atomic_write_lock); 40 38 41 39 tty_ldisc_deref(disc); 42 40
+23
fs/dax.c
··· 1155 1155 } 1156 1156 1157 1157 /* 1158 + * It is possible, particularly with mixed reads & writes to private 1159 + * mappings, that we have raced with a PMD fault that overlaps with 1160 + * the PTE we need to set up. If so just return and the fault will be 1161 + * retried. 1162 + */ 1163 + if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1164 + vmf_ret = VM_FAULT_NOPAGE; 1165 + goto unlock_entry; 1166 + } 1167 + 1168 + /* 1158 1169 * Note that we don't bother to use iomap_apply here: DAX required 1159 1170 * the file system block size to be equal the page size, which means 1160 1171 * that we never have to deal with more than a single extent here. ··· 1407 1396 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1408 1397 if (IS_ERR(entry)) 1409 1398 goto fallback; 1399 + 1400 + /* 1401 + * It is possible, particularly with mixed reads & writes to private 1402 + * mappings, that we have raced with a PTE fault that overlaps with 1403 + * the PMD we need to set up. If so just return and the fault will be 1404 + * retried. 1405 + */ 1406 + if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1407 + !pmd_devmap(*vmf->pmd)) { 1408 + result = 0; 1409 + goto unlock_entry; 1410 + } 1410 1411 1411 1412 /* 1412 1413 * Note that we don't use iomap_apply here. We aren't doing I/O, only
+1 -1
fs/gfs2/log.c
··· 659 659 struct gfs2_log_header *lh; 660 660 unsigned int tail; 661 661 u32 hash; 662 - int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; 662 + int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; 663 663 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 664 664 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 665 665 lh = page_address(page);
+1
fs/nfs/flexfilelayout/flexfilelayout.c
··· 454 454 goto out_err_free; 455 455 456 456 /* fh */ 457 + rc = -EIO; 457 458 p = xdr_inline_decode(&stream, 4); 458 459 if (!p) 459 460 goto out_err_free;
-2
fs/nfs/internal.h
··· 398 398 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t); 399 399 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *, 400 400 struct nfs_subversion *); 401 - void nfs_initialise_sb(struct super_block *); 402 401 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 403 402 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 404 403 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *, ··· 457 458 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 458 459 459 460 /* super.c */ 460 - void nfs_clone_super(struct super_block *, struct nfs_mount_info *); 461 461 void nfs_umount_begin(struct super_block *); 462 462 int nfs_statfs(struct dentry *, struct kstatfs *); 463 463 int nfs_show_options(struct seq_file *, struct dentry *);
+1 -1
fs/nfs/namespace.c
··· 246 246 247 247 devname = nfs_devname(dentry, page, PAGE_SIZE); 248 248 if (IS_ERR(devname)) 249 - mnt = (struct vfsmount *)devname; 249 + mnt = ERR_CAST(devname); 250 250 else 251 251 mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata); 252 252
+1 -1
fs/nfs/nfs42proc.c
··· 177 177 if (status) 178 178 goto out; 179 179 180 - if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 180 + if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 181 181 &res->commit_res.verf->verifier)) { 182 182 status = -EAGAIN; 183 183 goto out;
-1
fs/nfs/nfs4client.c
··· 582 582 */ 583 583 nfs4_schedule_path_down_recovery(pos); 584 584 default: 585 - spin_lock(&nn->nfs_client_lock); 586 585 goto out; 587 586 } 588 587
+17 -8
fs/nfs/pnfs.c
··· 2094 2094 } 2095 2095 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); 2096 2096 2097 + /* 2098 + * Check for any intersection between the request and the pgio->pg_lseg, 2099 + * and if none, put this pgio->pg_lseg away. 2100 + */ 2101 + static void 2102 + pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2103 + { 2104 + if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) { 2105 + pnfs_put_lseg(pgio->pg_lseg); 2106 + pgio->pg_lseg = NULL; 2107 + } 2108 + } 2109 + 2097 2110 void 2098 2111 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2099 2112 { 2100 2113 u64 rd_size = req->wb_bytes; 2101 2114 2102 2115 pnfs_generic_pg_check_layout(pgio); 2116 + pnfs_generic_pg_check_range(pgio, req); 2103 2117 if (pgio->pg_lseg == NULL) { 2104 2118 if (pgio->pg_dreq == NULL) 2105 2119 rd_size = i_size_read(pgio->pg_inode) - req_offset(req); ··· 2145 2131 struct nfs_page *req, u64 wb_size) 2146 2132 { 2147 2133 pnfs_generic_pg_check_layout(pgio); 2134 + pnfs_generic_pg_check_range(pgio, req); 2148 2135 if (pgio->pg_lseg == NULL) { 2149 2136 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 2150 2137 req->wb_context, ··· 2206 2191 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset, 2207 2192 pgio->pg_lseg->pls_range.length); 2208 2193 req_start = req_offset(req); 2209 - WARN_ON_ONCE(req_start >= seg_end); 2194 + 2210 2195 /* start of request is past the last byte of this segment */ 2211 - if (req_start >= seg_end) { 2212 - /* reference the new lseg */ 2213 - if (pgio->pg_ops->pg_cleanup) 2214 - pgio->pg_ops->pg_cleanup(pgio); 2215 - if (pgio->pg_ops->pg_init) 2216 - pgio->pg_ops->pg_init(pgio, req); 2196 + if (req_start >= seg_end) 2217 2197 return 0; 2218 - } 2219 2198 2220 2199 /* adjust 'size' iff there are fewer bytes left in the 2221 2200 * segment than what nfs_generic_pg_test returned */
+10
fs/nfs/pnfs.h
··· 593 593 return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); 594 594 } 595 595 596 + static inline bool 597 + pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req) 598 + { 599 + u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length); 600 + u64 req_last = req_offset(req) + req->wb_bytes; 601 + 602 + return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last, 603 + req_offset(req), req_last); 604 + } 605 + 596 606 extern unsigned int layoutstats_timer; 597 607 598 608 #ifdef NFS_DEBUG
+3 -2
fs/nfs/super.c
··· 2301 2301 /* 2302 2302 * Initialise the common bits of the superblock 2303 2303 */ 2304 - inline void nfs_initialise_sb(struct super_block *sb) 2304 + static void nfs_initialise_sb(struct super_block *sb) 2305 2305 { 2306 2306 struct nfs_server *server = NFS_SB(sb); 2307 2307 ··· 2348 2348 /* 2349 2349 * Finish setting up a cloned NFS2/3/4 superblock 2350 2350 */ 2351 - void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2351 + static void nfs_clone_super(struct super_block *sb, 2352 + struct nfs_mount_info *mount_info) 2352 2353 { 2353 2354 const struct super_block *old_sb = mount_info->cloned->sb; 2354 2355 struct nfs_server *server = NFS_SB(sb);
+6 -17
fs/nfsd/nfs3xdr.c
··· 334 334 if (!p) 335 335 return 0; 336 336 p = xdr_decode_hyper(p, &args->offset); 337 + 337 338 args->count = ntohl(*p++); 338 - 339 - if (!xdr_argsize_check(rqstp, p)) 340 - return 0; 341 - 342 339 len = min(args->count, max_blocksize); 343 340 344 341 /* set up the kvec */ ··· 349 352 v++; 350 353 } 351 354 args->vlen = v; 352 - return 1; 355 + return xdr_argsize_check(rqstp, p); 353 356 } 354 357 355 358 int ··· 541 544 p = decode_fh(p, &args->fh); 542 545 if (!p) 543 546 return 0; 544 - if (!xdr_argsize_check(rqstp, p)) 545 - return 0; 546 547 args->buffer = page_address(*(rqstp->rq_next_page++)); 547 548 548 - return 1; 549 + return xdr_argsize_check(rqstp, p); 549 550 } 550 551 551 552 int ··· 569 574 args->verf = p; p += 2; 570 575 args->dircount = ~0; 571 576 args->count = ntohl(*p++); 572 - 573 - if (!xdr_argsize_check(rqstp, p)) 574 - return 0; 575 - 576 577 args->count = min_t(u32, args->count, PAGE_SIZE); 577 578 args->buffer = page_address(*(rqstp->rq_next_page++)); 578 579 579 - return 1; 580 + return xdr_argsize_check(rqstp, p); 580 581 } 581 582 582 583 int ··· 590 599 args->dircount = ntohl(*p++); 591 600 args->count = ntohl(*p++); 592 601 593 - if (!xdr_argsize_check(rqstp, p)) 594 - return 0; 595 - 596 602 len = args->count = min(args->count, max_blocksize); 597 603 while (len > 0) { 598 604 struct page *p = *(rqstp->rq_next_page++); ··· 597 609 args->buffer = page_address(p); 598 610 len -= PAGE_SIZE; 599 611 } 600 - return 1; 612 + 613 + return xdr_argsize_check(rqstp, p); 601 614 } 602 615 603 616 int
+6 -7
fs/nfsd/nfs4proc.c
··· 1769 1769 opdesc->op_get_currentstateid(cstate, &op->u); 1770 1770 op->status = opdesc->op_func(rqstp, cstate, &op->u); 1771 1771 1772 + /* Only from SEQUENCE */ 1773 + if (cstate->status == nfserr_replay_cache) { 1774 + dprintk("%s NFS4.1 replay from cache\n", __func__); 1775 + status = op->status; 1776 + goto out; 1777 + } 1772 1778 if (!op->status) { 1773 1779 if (opdesc->op_set_currentstateid) 1774 1780 opdesc->op_set_currentstateid(cstate, &op->u); ··· 1785 1779 if (need_wrongsec_check(rqstp)) 1786 1780 op->status = check_nfsd_access(current_fh->fh_export, rqstp); 1787 1781 } 1788 - 1789 1782 encode_op: 1790 - /* Only from SEQUENCE */ 1791 - if (cstate->status == nfserr_replay_cache) { 1792 - dprintk("%s NFS4.1 replay from cache\n", __func__); 1793 - status = op->status; 1794 - goto out; 1795 - } 1796 1783 if (op->status == nfserr_replay_me) { 1797 1784 op->replay = &cstate->replay_owner->so_replay; 1798 1785 nfsd4_encode_replay(&resp->xdr, op);
+3 -10
fs/nfsd/nfsxdr.c
··· 257 257 len = args->count = ntohl(*p++); 258 258 p++; /* totalcount - unused */ 259 259 260 - if (!xdr_argsize_check(rqstp, p)) 261 - return 0; 262 - 263 260 len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); 264 261 265 262 /* set up somewhere to store response. ··· 272 275 v++; 273 276 } 274 277 args->vlen = v; 275 - return 1; 278 + return xdr_argsize_check(rqstp, p); 276 279 } 277 280 278 281 int ··· 362 365 p = decode_fh(p, &args->fh); 363 366 if (!p) 364 367 return 0; 365 - if (!xdr_argsize_check(rqstp, p)) 366 - return 0; 367 368 args->buffer = page_address(*(rqstp->rq_next_page++)); 368 369 369 - return 1; 370 + return xdr_argsize_check(rqstp, p); 370 371 } 371 372 372 373 int ··· 402 407 args->cookie = ntohl(*p++); 403 408 args->count = ntohl(*p++); 404 409 args->count = min_t(u32, args->count, PAGE_SIZE); 405 - if (!xdr_argsize_check(rqstp, p)) 406 - return 0; 407 410 args->buffer = page_address(*(rqstp->rq_next_page++)); 408 411 409 - return 1; 412 + return xdr_argsize_check(rqstp, p); 410 413 } 411 414 412 415 /*
+1 -1
fs/ntfs/namei.c
··· 159 159 PTR_ERR(dent_inode)); 160 160 kfree(name); 161 161 /* Return the error code. */ 162 - return (struct dentry *)dent_inode; 162 + return ERR_CAST(dent_inode); 163 163 } 164 164 /* It is guaranteed that @name is no longer allocated at this point. */ 165 165 if (MREF_ERR(mref) == -ENOENT) {
+1 -1
fs/ocfs2/export.c
··· 119 119 120 120 if (IS_ERR(inode)) { 121 121 mlog_errno(PTR_ERR(inode)); 122 - result = (void *)inode; 122 + result = ERR_CAST(inode); 123 123 goto bail; 124 124 } 125 125
+1
fs/overlayfs/Kconfig
··· 1 1 config OVERLAY_FS 2 2 tristate "Overlay filesystem support" 3 + select EXPORTFS 3 4 help 4 5 An overlay filesystem combines two filesystems - an 'upper' filesystem 5 6 and a 'lower' filesystem. When a name exists in both filesystems, the
+17 -7
fs/overlayfs/copy_up.c
··· 300 300 return PTR_ERR(fh); 301 301 } 302 302 303 - err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0); 303 + /* 304 + * Do not fail when upper doesn't support xattrs. 305 + */ 306 + err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh, 307 + fh ? fh->len : 0, 0); 304 308 kfree(fh); 305 309 306 310 return err; ··· 346 342 if (tmpfile) 347 343 temp = ovl_do_tmpfile(upperdir, stat->mode); 348 344 else 349 - temp = ovl_lookup_temp(workdir, dentry); 350 - err = PTR_ERR(temp); 351 - if (IS_ERR(temp)) 352 - goto out1; 353 - 345 + temp = ovl_lookup_temp(workdir); 354 346 err = 0; 355 - if (!tmpfile) 347 + if (IS_ERR(temp)) { 348 + err = PTR_ERR(temp); 349 + temp = NULL; 350 + } 351 + 352 + if (!err && !tmpfile) 356 353 err = ovl_create_real(wdir, temp, &cattr, NULL, true); 357 354 358 355 if (new_creds) { ··· 458 453 459 454 ovl_path_upper(parent, &parentpath); 460 455 upperdir = parentpath.dentry; 456 + 457 + /* Mark parent "impure" because it may now contain non-pure upper */ 458 + err = ovl_set_impure(parent, upperdir); 459 + if (err) 460 + return err; 461 461 462 462 err = vfs_getattr(&parentpath, &pstat, 463 463 STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
+47 -14
fs/overlayfs/dir.c
··· 41 41 } 42 42 } 43 43 44 - struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) 44 + struct dentry *ovl_lookup_temp(struct dentry *workdir) 45 45 { 46 46 struct dentry *temp; 47 47 char name[20]; ··· 68 68 struct dentry *whiteout; 69 69 struct inode *wdir = workdir->d_inode; 70 70 71 - whiteout = ovl_lookup_temp(workdir, dentry); 71 + whiteout = ovl_lookup_temp(workdir); 72 72 if (IS_ERR(whiteout)) 73 73 return whiteout; 74 74 ··· 127 127 return err; 128 128 } 129 129 130 - static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) 130 + static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, 131 + int xerr) 131 132 { 132 133 int err; 133 134 134 - err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); 135 + err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr); 135 136 if (!err) 136 137 ovl_dentry_set_opaque(dentry); 137 138 138 139 return err; 140 + } 141 + 142 + static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) 143 + { 144 + /* 145 + * Fail with -EIO when trying to create opaque dir and upper doesn't 146 + * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to 147 + * return a specific error for noxattr case. 148 + */ 149 + return ovl_set_opaque_xerr(dentry, upperdentry, -EIO); 139 150 } 140 151 141 152 /* Common operations required to be done after creation of file on upper */ ··· 171 160 static bool ovl_type_merge(struct dentry *dentry) 172 161 { 173 162 return OVL_TYPE_MERGE(ovl_path_type(dentry)); 163 + } 164 + 165 + static bool ovl_type_origin(struct dentry *dentry) 166 + { 167 + return OVL_TYPE_ORIGIN(ovl_path_type(dentry)); 174 168 } 175 169 176 170 static int ovl_create_upper(struct dentry *dentry, struct inode *inode, ··· 266 250 if (upper->d_parent->d_inode != udir) 267 251 goto out_unlock; 268 252 269 - opaquedir = ovl_lookup_temp(workdir, dentry); 253 + opaquedir = ovl_lookup_temp(workdir); 270 254 err = PTR_ERR(opaquedir); 271 255 if (IS_ERR(opaquedir)) 272 256 goto out_unlock; ··· 398 382 if (err) 399 383 goto out; 400 384 401 - newdentry = ovl_lookup_temp(workdir, dentry); 385 + newdentry = ovl_lookup_temp(workdir); 402 386 err = PTR_ERR(newdentry); 403 387 if (IS_ERR(newdentry)) 404 388 goto out_unlock; ··· 862 846 if (IS_ERR(redirect)) 863 847 return PTR_ERR(redirect); 864 848 865 - err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, 866 - redirect, strlen(redirect), 0); 849 + err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry), 850 + OVL_XATTR_REDIRECT, 851 + redirect, strlen(redirect), -EXDEV); 867 852 if (!err) { 868 853 spin_lock(&dentry->d_lock); 869 854 ovl_dentry_set_redirect(dentry, redirect); 870 855 spin_unlock(&dentry->d_lock); 871 856 } else { 872 857 kfree(redirect); 873 - if (err == -EOPNOTSUPP) 874 - ovl_clear_redirect_dir(dentry->d_sb); 875 - else 876 - pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 858 + pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 877 859 /* Fall back to userspace copy-up */ 878 860 err = -EXDEV; 879 861 } ··· 957 943 old_upperdir = ovl_dentry_upper(old->d_parent); 958 944 new_upperdir = ovl_dentry_upper(new->d_parent); 959 945 946 + if (!samedir) { 947 + /* 948 + * When moving a merge dir or non-dir with copy up origin into 949 + * a new parent, we are marking the new parent dir "impure". 950 + * When ovl_iterate() iterates an "impure" upper dir, it will 951 + * lookup the origin inodes of the entries to fill d_ino. 952 + */ 953 + if (ovl_type_origin(old)) { 954 + err = ovl_set_impure(new->d_parent, new_upperdir); 955 + if (err) 956 + goto out_revert_creds; 957 + } 958 + if (!overwrite && ovl_type_origin(new)) { 959 + err = ovl_set_impure(old->d_parent, old_upperdir); 960 + if (err) 961 + goto out_revert_creds; 962 + } 963 + } 964 + 960 965 trap = lock_rename(new_upperdir, old_upperdir); 961 966 962 967 olddentry = lookup_one_len(old->d_name.name, old_upperdir, ··· 1025 992 if (ovl_type_merge_or_lower(old)) 1026 993 err = ovl_set_redirect(old, samedir); 1027 994 else if (!old_opaque && ovl_type_merge(new->d_parent)) 1028 - err = ovl_set_opaque(old, olddentry); 995 + err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); 1029 996 if (err) 1030 997 goto out_dput; 1031 998 } ··· 1033 1000 if (ovl_type_merge_or_lower(new)) 1034 1001 err = ovl_set_redirect(new, samedir); 1035 1002 else if (!new_opaque && ovl_type_merge(old->d_parent)) 1036 - err = ovl_set_opaque(new, newdentry); 1003 + err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); 1037 1004 if (err) 1038 1005 goto out_dput; 1039 1006 }
+11 -1
fs/overlayfs/inode.c
··· 240 240 return res; 241 241 } 242 242 243 + static bool ovl_can_list(const char *s) 244 + { 245 + /* List all non-trusted xatts */ 246 + if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) 247 + return true; 248 + 249 + /* Never list trusted.overlay, list other trusted for superuser only */ 250 + return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); 251 + } 252 + 243 253 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 244 254 { 245 255 struct dentry *realdentry = ovl_dentry_real(dentry); ··· 273 263 return -EIO; 274 264 275 265 len -= slen; 276 - if (ovl_is_private_xattr(s)) { 266 + if (!ovl_can_list(s)) { 277 267 res -= slen; 278 268 memmove(s, s + slen, len); 279 269 } else {
+5 -11
fs/overlayfs/namei.c
··· 169 169 170 170 static bool ovl_is_opaquedir(struct dentry *dentry) 171 171 { 172 - int res; 173 - char val; 174 - 175 - if (!d_is_dir(dentry)) 176 - return false; 177 - 178 - res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); 179 - if (res == 1 && val == 'y') 180 - return true; 181 - 182 - return false; 172 + return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE); 183 173 } 184 174 185 175 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, ··· 341 351 unsigned int ctr = 0; 342 352 struct inode *inode = NULL; 343 353 bool upperopaque = false; 354 + bool upperimpure = false; 344 355 char *upperredirect = NULL; 345 356 struct dentry *this; 346 357 unsigned int i; ··· 386 395 poe = roe; 387 396 } 388 397 upperopaque = d.opaque; 398 + if (upperdentry && d.is_dir) 399 + upperimpure = ovl_is_impuredir(upperdentry); 389 400 } 390 401 391 402 if (!d.stop && poe->numlower) { ··· 456 463 457 464 revert_creds(old_cred); 458 465 oe->opaque = upperopaque; 466 + oe->impure = upperimpure; 459 467 oe->redirect = upperredirect; 460 468 oe->__upperdentry = upperdentry; 461 469 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+14 -2
fs/overlayfs/overlayfs.h
··· 24 24 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque" 25 25 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect" 26 26 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin" 27 + #define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure" 27 28 28 29 /* 29 30 * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, ··· 204 203 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); 205 204 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); 206 205 bool ovl_dentry_is_opaque(struct dentry *dentry); 206 + bool ovl_dentry_is_impure(struct dentry *dentry); 207 207 bool ovl_dentry_is_whiteout(struct dentry *dentry); 208 208 void ovl_dentry_set_opaque(struct dentry *dentry); 209 209 bool ovl_redirect_dir(struct super_block *sb); 210 - void ovl_clear_redirect_dir(struct super_block *sb); 211 210 const char *ovl_dentry_get_redirect(struct dentry *dentry); 212 211 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); 213 212 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); ··· 220 219 struct file *ovl_path_open(struct path *path, int flags); 221 220 int ovl_copy_up_start(struct dentry *dentry); 222 221 void ovl_copy_up_end(struct dentry *dentry); 222 + bool ovl_check_dir_xattr(struct dentry *dentry, const char *name); 223 + int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, 224 + const char *name, const void *value, size_t size, 225 + int xerr); 226 + int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry); 227 + 228 + static inline bool ovl_is_impuredir(struct dentry *dentry) 229 + { 230 + return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE); 231 + } 232 + 223 233 224 234 /* namei.c */ 225 235 int ovl_path_next(int idx, struct dentry *dentry, struct path *path); ··· 275 263 276 264 /* dir.c */ 277 265 extern const struct inode_operations ovl_dir_inode_operations; 278 - struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); 266 + struct dentry *ovl_lookup_temp(struct dentry *workdir); 279 267 struct cattr { 280 268 dev_t rdev; 281 269 umode_t mode;
+2
fs/overlayfs/ovl_entry.h
··· 28 28 /* creds of process who forced instantiation of super block */ 29 29 const struct cred *creator_cred; 30 30 bool tmpfile; 31 + bool noxattr; 31 32 wait_queue_head_t copyup_wq; 32 33 /* sb common to all layers */ 33 34 struct super_block *same_sb; ··· 43 42 u64 version; 44 43 const char *redirect; 45 44 bool opaque; 45 + bool impure; 46 46 bool copying; 47 47 }; 48 48 struct rcu_head rcu;
+17 -1
fs/overlayfs/super.c
··· 891 891 dput(temp); 892 892 else 893 893 pr_warn("overlayfs: upper fs does not support tmpfile.\n"); 894 + 895 + /* 896 + * Check if upper/work fs supports trusted.overlay.* 897 + * xattr 898 + */ 899 + err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE, 900 + "0", 1, 0); 901 + if (err) { 902 + ufs->noxattr = true; 903 + pr_warn("overlayfs: upper fs does not support xattr.\n"); 904 + } else { 905 + vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE); 906 + } 894 907 } 895 908 } 896 909 ··· 974 961 path_put(&workpath); 975 962 kfree(lowertmp); 976 963 977 - oe->__upperdentry = upperpath.dentry; 964 + if (upperpath.dentry) { 965 + oe->__upperdentry = upperpath.dentry; 966 + oe->impure = ovl_is_impuredir(upperpath.dentry); 967 + } 978 968 for (i = 0; i < numlower; i++) { 979 969 oe->lowerstack[i].dentry = stack[i].dentry; 980 970 oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+64 -8
fs/overlayfs/util.c
··· 175 175 return oe->opaque; 176 176 } 177 177 178 + bool ovl_dentry_is_impure(struct dentry *dentry) 179 + { 180 + struct ovl_entry *oe = dentry->d_fsdata; 181 + 182 + return oe->impure; 183 + } 184 + 178 185 bool ovl_dentry_is_whiteout(struct dentry *dentry) 179 186 { 180 187 return !dentry->d_inode && ovl_dentry_is_opaque(dentry); ··· 198 191 { 199 192 struct ovl_fs *ofs = sb->s_fs_info; 200 193 201 - return ofs->config.redirect_dir; 202 - } 203 - 204 - void ovl_clear_redirect_dir(struct super_block *sb) 205 - { 206 - struct ovl_fs *ofs = sb->s_fs_info; 207 - 208 - ofs->config.redirect_dir = false; 194 + return ofs->config.redirect_dir && !ofs->noxattr; 209 195 } 210 196 211 197 const char *ovl_dentry_get_redirect(struct dentry *dentry) ··· 302 302 oe->copying = false; 303 303 wake_up_locked(&ofs->copyup_wq); 304 304 spin_unlock(&ofs->copyup_wq.lock); 305 + } 306 + 307 + bool ovl_check_dir_xattr(struct dentry *dentry, const char *name) 308 + { 309 + int res; 310 + char val; 311 + 312 + if (!d_is_dir(dentry)) 313 + return false; 314 + 315 + res = vfs_getxattr(dentry, name, &val, 1); 316 + if (res == 1 && val == 'y') 317 + return true; 318 + 319 + return false; 320 + } 321 + 322 + int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, 323 + const char *name, const void *value, size_t size, 324 + int xerr) 325 + { 326 + int err; 327 + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; 328 + 329 + if (ofs->noxattr) 330 + return xerr; 331 + 332 + err = ovl_do_setxattr(upperdentry, name, value, size, 0); 333 + 334 + if (err == -EOPNOTSUPP) { 335 + pr_warn("overlayfs: cannot set %s xattr on upper\n", name); 336 + ofs->noxattr = true; 337 + return xerr; 338 + } 339 + 340 + return err; 341 + } 342 + 343 + int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry) 344 + { 345 + int err; 346 + struct ovl_entry *oe = dentry->d_fsdata; 347 + 348 + if (oe->impure) 349 + return 0; 350 + 351 + /* 352 + * Do not fail when upper doesn't support xattrs. 353 + * Upper inodes won't have origin nor redirect xattr anyway. 354 + */ 355 + err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE, 356 + "y", 1, 0); 357 + if (!err) 358 + oe->impure = true; 359 + 360 + return err; 305 361 }
+1 -1
fs/proc/base.c
··· 821 821 if (!mmget_not_zero(mm)) 822 822 goto free; 823 823 824 - flags = write ? FOLL_WRITE : 0; 824 + flags = FOLL_FORCE | (write ? FOLL_WRITE : 0); 825 825 826 826 while (count > 0) { 827 827 int this_len = min_t(int, count, PAGE_SIZE);
+2 -2
fs/reiserfs/journal.c
··· 1112 1112 depth = reiserfs_write_unlock_nested(s); 1113 1113 if (reiserfs_barrier_flush(s)) 1114 1114 __sync_dirty_buffer(jl->j_commit_bh, 1115 - REQ_PREFLUSH | REQ_FUA); 1115 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 1116 1116 else 1117 1117 sync_dirty_buffer(jl->j_commit_bh); 1118 1118 reiserfs_write_lock_nested(s, depth); ··· 1271 1271 1272 1272 if (reiserfs_barrier_flush(sb)) 1273 1273 __sync_dirty_buffer(journal->j_header_bh, 1274 - REQ_PREFLUSH | REQ_FUA); 1274 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 1275 1275 else 1276 1276 sync_dirty_buffer(journal->j_header_bh); 1277 1277
+2 -3
fs/ufs/super.c
··· 812 812 uspi->s_dirblksize = UFS_SECTOR_SIZE; 813 813 super_block_offset=UFS_SBLOCK; 814 814 815 - /* Keep 2Gig file limit. Some UFS variants need to override 816 - this but as I don't know which I'll let those in the know loosen 817 - the rules */ 815 + sb->s_maxbytes = MAX_LFS_FILESIZE; 816 + 818 817 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { 819 818 case UFS_MOUNT_UFSTYPE_44BSD: 820 819 UFSD("ufstype=44bsd\n");
+26 -12
fs/xfs/xfs_buf.c
··· 97 97 xfs_buf_ioacct_inc( 98 98 struct xfs_buf *bp) 99 99 { 100 - if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT)) 100 + if (bp->b_flags & XBF_NO_IOACCT) 101 101 return; 102 102 103 103 ASSERT(bp->b_flags & XBF_ASYNC); 104 - bp->b_flags |= _XBF_IN_FLIGHT; 105 - percpu_counter_inc(&bp->b_target->bt_io_count); 104 + spin_lock(&bp->b_lock); 105 + if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { 106 + bp->b_state |= XFS_BSTATE_IN_FLIGHT; 107 + percpu_counter_inc(&bp->b_target->bt_io_count); 108 + } 109 + spin_unlock(&bp->b_lock); 106 110 } 107 111 108 112 /* ··· 114 110 * freed and unaccount from the buftarg. 115 111 */ 116 112 static inline void 113 + __xfs_buf_ioacct_dec( 114 + struct xfs_buf *bp) 115 + { 116 + ASSERT(spin_is_locked(&bp->b_lock)); 117 + 118 + if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 119 + bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 120 + percpu_counter_dec(&bp->b_target->bt_io_count); 121 + } 122 + } 123 + 124 + static inline void 117 125 xfs_buf_ioacct_dec( 118 126 struct xfs_buf *bp) 119 127 { 120 - if (!(bp->b_flags & _XBF_IN_FLIGHT)) 121 - return; 122 - 123 - bp->b_flags &= ~_XBF_IN_FLIGHT; 124 - percpu_counter_dec(&bp->b_target->bt_io_count); 128 + spin_lock(&bp->b_lock); 129 + __xfs_buf_ioacct_dec(bp); 130 + spin_unlock(&bp->b_lock); 125 131 } 126 132 127 133 /* ··· 163 149 * unaccounted (released to LRU) before that occurs. Drop in-flight 164 150 * status now to preserve accounting consistency. 165 151 */ 166 - xfs_buf_ioacct_dec(bp); 167 - 168 152 spin_lock(&bp->b_lock); 153 + __xfs_buf_ioacct_dec(bp); 154 + 169 155 atomic_set(&bp->b_lru_ref, 0); 170 156 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 171 157 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) ··· 993 979 * ensures the decrement occurs only once per-buf. 994 980 */ 995 981 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 996 - xfs_buf_ioacct_dec(bp); 982 + __xfs_buf_ioacct_dec(bp); 997 983 goto out_unlock; 998 984 } 999 985 1000 986 /* the last reference has been dropped ... */ 1001 - xfs_buf_ioacct_dec(bp); 987 + __xfs_buf_ioacct_dec(bp); 1002 988 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1003 989 /* 1004 990 * If the buffer is added to the LRU take a new reference to the
+2 -3
fs/xfs/xfs_buf.h
··· 63 63 #define _XBF_KMEM (1 << 21)/* backed by heap memory */ 64 64 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 65 65 #define _XBF_COMPOUND (1 << 23)/* compound buffer */ 66 - #define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */ 67 66 68 67 typedef unsigned int xfs_buf_flags_t; 69 68 ··· 83 84 { _XBF_PAGES, "PAGES" }, \ 84 85 { _XBF_KMEM, "KMEM" }, \ 85 86 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 86 - { _XBF_COMPOUND, "COMPOUND" }, \ 87 - { _XBF_IN_FLIGHT, "IN_FLIGHT" } 87 + { _XBF_COMPOUND, "COMPOUND" } 88 88 89 89 90 90 /* 91 91 * Internal state flags. 92 92 */ 93 93 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 94 + #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ 94 95 95 96 /* 96 97 * The xfs_buftarg contains 2 notions of "sector size" -
+51
include/drm/drm_dp_helper.h
··· 913 913 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); 914 914 int drm_dp_stop_crc(struct drm_dp_aux *aux); 915 915 916 + struct drm_dp_dpcd_ident { 917 + u8 oui[3]; 918 + u8 device_id[6]; 919 + u8 hw_rev; 920 + u8 sw_major_rev; 921 + u8 sw_minor_rev; 922 + } __packed; 923 + 924 + /** 925 + * struct drm_dp_desc - DP branch/sink device descriptor 926 + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). 927 + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. 928 + */ 929 + struct drm_dp_desc { 930 + struct drm_dp_dpcd_ident ident; 931 + u32 quirks; 932 + }; 933 + 934 + int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, 935 + bool is_branch); 936 + 937 + /** 938 + * enum drm_dp_quirk - Display Port sink/branch device specific quirks 939 + * 940 + * Display Port sink and branch devices in the wild have a variety of bugs, try 941 + * to collect them here. The quirks are shared, but it's up to the drivers to 942 + * implement workarounds for them. 943 + */ 944 + enum drm_dp_quirk { 945 + /** 946 + * @DP_DPCD_QUIRK_LIMITED_M_N: 947 + * 948 + * The device requires main link attributes Mvid and Nvid to be limited 949 + * to 16 bits. 950 + */ 951 + DP_DPCD_QUIRK_LIMITED_M_N, 952 + }; 953 + 954 + /** 955 + * drm_dp_has_quirk() - does the DP device have a specific quirk 956 + * @desc: Device decriptor filled by drm_dp_read_desc() 957 + * @quirk: Quirk to query for 958 + * 959 + * Return true if DP device identified by @desc has @quirk. 960 + */ 961 + static inline bool 962 + drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) 963 + { 964 + return desc->quirks & BIT(quirk); 965 + } 966 + 916 967 #endif /* _DRM_DP_HELPER_H_ */
+1 -1
include/linux/gfp.h
··· 41 41 #define ___GFP_WRITE 0x800000u 42 42 #define ___GFP_KSWAPD_RECLAIM 0x1000000u 43 43 #ifdef CONFIG_LOCKDEP 44 - #define ___GFP_NOLOCKDEP 0x4000000u 44 + #define ___GFP_NOLOCKDEP 0x2000000u 45 45 #else 46 46 #define ___GFP_NOLOCKDEP 0 47 47 #endif
+7
include/linux/gpio/machine.h
··· 56 56 .flags = _flags, \ 57 57 } 58 58 59 + #ifdef CONFIG_GPIOLIB 59 60 void gpiod_add_lookup_table(struct gpiod_lookup_table *table); 60 61 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); 62 + #else 63 + static inline 64 + void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} 65 + static inline 66 + void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} 67 + #endif 61 68 62 69 #endif /* __LINUX_GPIO_MACHINE_H */
+5 -1
include/linux/jiffies.h
··· 64 64 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 65 65 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 66 66 67 + #ifndef __jiffy_arch_data 68 + #define __jiffy_arch_data 69 + #endif 70 + 67 71 /* 68 72 * The 64-bit value is not atomic - you MUST NOT read it 69 73 * without sampling the sequence number in jiffies_lock. 70 74 * get_jiffies_64() will do this for you as appropriate. 71 75 */ 72 76 extern u64 __cacheline_aligned_in_smp jiffies_64; 73 - extern unsigned long volatile __cacheline_aligned_in_smp jiffies; 77 + extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; 74 78 75 79 #if (BITS_PER_LONG < 64) 76 80 u64 get_jiffies_64(void);
+8
include/linux/memblock.h
··· 425 425 } 426 426 #endif 427 427 428 + extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 429 + phys_addr_t end_addr); 428 430 #else 429 431 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 432 + { 433 + return 0; 434 + } 435 + 436 + static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 437 + phys_addr_t end_addr) 430 438 { 431 439 return 0; 432 440 }
+9 -1
include/linux/mlx5/mlx5_ifc.h
··· 766 766 MLX5_CAP_PORT_TYPE_ETH = 0x1, 767 767 }; 768 768 769 + enum { 770 + MLX5_CAP_UMR_FENCE_STRONG = 0x0, 771 + MLX5_CAP_UMR_FENCE_SMALL = 0x1, 772 + MLX5_CAP_UMR_FENCE_NONE = 0x2, 773 + }; 774 + 769 775 struct mlx5_ifc_cmd_hca_cap_bits { 770 776 u8 reserved_at_0[0x80]; 771 777 ··· 881 875 u8 reserved_at_202[0x1]; 882 876 u8 ipoib_enhanced_offloads[0x1]; 883 877 u8 ipoib_basic_offloads[0x1]; 884 - u8 reserved_at_205[0xa]; 878 + u8 reserved_at_205[0x5]; 879 + u8 umr_fence[0x2]; 880 + u8 reserved_at_20c[0x3]; 885 881 u8 drain_sigerr[0x1]; 886 882 u8 cmdif_checksum[0x2]; 887 883 u8 sigerr_cqe[0x1];
+11
include/linux/mm.h
··· 2327 2327 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2328 2328 #define FOLL_COW 0x4000 /* internal GUP flag */ 2329 2329 2330 + static inline int vm_fault_to_errno(int vm_fault, int foll_flags) 2331 + { 2332 + if (vm_fault & VM_FAULT_OOM) 2333 + return -ENOMEM; 2334 + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 2335 + return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 2336 + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 2337 + return -EFAULT; 2338 + return 0; 2339 + } 2340 + 2330 2341 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2331 2342 void *data); 2332 2343 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+1
include/linux/mmzone.h
··· 678 678 * is the first PFN that needs to be initialised. 679 679 */ 680 680 unsigned long first_deferred_pfn; 681 + unsigned long static_init_size; 681 682 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 682 683 683 684 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+1
include/linux/mod_devicetable.h
··· 467 467 DMI_PRODUCT_VERSION, 468 468 DMI_PRODUCT_SERIAL, 469 469 DMI_PRODUCT_UUID, 470 + DMI_PRODUCT_FAMILY, 470 471 DMI_BOARD_VENDOR, 471 472 DMI_BOARD_NAME, 472 473 DMI_BOARD_VERSION,
-3
include/linux/pinctrl/pinconf-generic.h
··· 42 42 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high 43 43 * impedance to VDD). If the argument is != 0 pull-up is enabled, 44 44 * if it is 0, pull-up is total, i.e. the pin is connected to VDD. 45 - * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous 46 - * input and output operations. 47 45 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open 48 46 * collector) which means it is usually wired with other output ports 49 47 * which are then pulled up with an external resistor. Setting this ··· 96 98 PIN_CONFIG_BIAS_PULL_DOWN, 97 99 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 98 100 PIN_CONFIG_BIAS_PULL_UP, 99 - PIN_CONFIG_BIDIRECTIONAL, 100 101 PIN_CONFIG_DRIVE_OPEN_DRAIN, 101 102 PIN_CONFIG_DRIVE_OPEN_SOURCE, 102 103 PIN_CONFIG_DRIVE_PUSH_PULL,
+2 -1
include/linux/sunrpc/svc.h
··· 336 336 { 337 337 char *cp = (char *)p; 338 338 struct kvec *vec = &rqstp->rq_arg.head[0]; 339 - return cp == (char *)vec->iov_base + vec->iov_len; 339 + return cp >= (char*)vec->iov_base 340 + && cp <= (char*)vec->iov_base + vec->iov_len; 340 341 } 341 342 342 343 static inline int
+3 -22
include/rdma/ib_sa.h
··· 158 158 }; 159 159 160 160 struct sa_path_rec_ib { 161 - __be64 service_id; 162 161 __be16 dlid; 163 162 __be16 slid; 164 163 u8 raw_traffic; ··· 173 174 }; 174 175 175 176 struct sa_path_rec_opa { 176 - __be64 service_id; 177 177 __be32 dlid; 178 178 __be32 slid; 179 179 u8 raw_traffic; ··· 187 189 struct sa_path_rec { 188 190 union ib_gid dgid; 189 191 union ib_gid sgid; 192 + __be64 service_id; 190 193 /* reserved */ 191 194 __be32 flow_label; 192 195 u8 hop_limit; ··· 261 262 ib->ib.dlid = htons(ntohl(opa->opa.dlid)); 262 263 ib->ib.slid = htons(ntohl(opa->opa.slid)); 263 264 } 264 - ib->ib.service_id = opa->opa.service_id; 265 + ib->service_id = opa->service_id; 265 266 ib->ib.raw_traffic = opa->opa.raw_traffic; 266 267 } 267 268 ··· 280 281 } 281 282 opa->opa.slid = slid; 282 283 opa->opa.dlid = dlid; 283 - opa->opa.service_id = ib->ib.service_id; 284 + opa->service_id = ib->service_id; 284 285 opa->opa.raw_traffic = ib->ib.raw_traffic; 285 286 } 286 287 ··· 590 591 (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); 591 592 } 592 593 593 - static inline void sa_path_set_service_id(struct sa_path_rec *rec, 594 - __be64 service_id) 595 - { 596 - if (rec->rec_type == SA_PATH_REC_TYPE_IB) 597 - rec->ib.service_id = service_id; 598 - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 599 - rec->opa.service_id = service_id; 600 - } 601 - 602 594 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) 603 595 { 604 596 if (rec->rec_type == SA_PATH_REC_TYPE_IB) ··· 613 623 rec->ib.raw_traffic = raw_traffic; 614 624 else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 615 625 rec->opa.raw_traffic = raw_traffic; 616 - } 617 - 618 - static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec) 619 - { 620 - if (rec->rec_type == SA_PATH_REC_TYPE_IB) 621 - return rec->ib.service_id; 622 - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 623 - return rec->opa.service_id; 624 - return 0; 625 626 } 626 627 627 628 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
-10
include/rdma/rdma_netlink.h
··· 10 10 struct module *module; 11 11 }; 12 12 13 - int ibnl_init(void); 14 - void ibnl_cleanup(void); 15 - 16 13 /** 17 14 * Add a a client to the list of IB netlink exporters. 18 15 * @index: Index of the added client ··· 73 76 */ 74 77 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, 75 78 unsigned int group, gfp_t flags); 76 - 77 - /** 78 - * Check if there are any listeners to the netlink group 79 - * @group: the netlink group ID 80 - * Returns 0 on success or a negative for no listeners. 81 - */ 82 - int ibnl_chk_listeners(unsigned int group); 83 79 84 80 #endif /* _RDMA_NETLINK_H */
+1
include/target/iscsi/iscsi_target_core.h
··· 557 557 #define LOGIN_FLAGS_READ_ACTIVE 1 558 558 #define LOGIN_FLAGS_CLOSED 2 559 559 #define LOGIN_FLAGS_READY 4 560 + #define LOGIN_FLAGS_INITIAL_PDU 8 560 561 unsigned long login_flags; 561 562 struct delayed_work login_work; 562 563 struct delayed_work login_cleanup_work;
+1
kernel/livepatch/Kconfig
··· 10 10 depends on SYSFS 11 11 depends on KALLSYMS_ALL 12 12 depends on HAVE_LIVEPATCH 13 + depends on !TRIM_UNUSED_KSYMS 13 14 help 14 15 Say Y here if you want to support kernel live patching. 15 16 This option has no runtime impact until a kernel "patch"
+8 -12
mm/gup.c
··· 407 407 408 408 ret = handle_mm_fault(vma, address, fault_flags); 409 409 if (ret & VM_FAULT_ERROR) { 410 - if (ret & VM_FAULT_OOM) 411 - return -ENOMEM; 412 - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 413 - return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 414 - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 415 - return -EFAULT; 410 + int err = vm_fault_to_errno(ret, *flags); 411 + 412 + if (err) 413 + return err; 416 414 BUG(); 417 415 } 418 416 ··· 721 723 ret = handle_mm_fault(vma, address, fault_flags); 722 724 major |= ret & VM_FAULT_MAJOR; 723 725 if (ret & VM_FAULT_ERROR) { 724 - if (ret & VM_FAULT_OOM) 725 - return -ENOMEM; 726 - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 727 - return -EHWPOISON; 728 - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 729 - return -EFAULT; 726 + int err = vm_fault_to_errno(ret, 0); 727 + 728 + if (err) 729 + return err; 730 730 BUG(); 731 731 } 732 732
+5
mm/hugetlb.c
··· 4170 4170 } 4171 4171 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4172 4172 if (ret & VM_FAULT_ERROR) { 4173 + int err = vm_fault_to_errno(ret, flags); 4174 + 4175 + if (err) 4176 + return err; 4177 + 4173 4178 remainder = 0; 4174 4179 break; 4175 4180 }
+1 -2
mm/ksm.c
··· 1028 1028 goto out; 1029 1029 1030 1030 if (PageTransCompound(page)) { 1031 - err = split_huge_page(page); 1032 - if (err) 1031 + if (split_huge_page(page)) 1033 1032 goto out_unlock; 1034 1033 } 1035 1034
+23
mm/memblock.c
··· 1739 1739 } 1740 1740 } 1741 1741 1742 + extern unsigned long __init_memblock 1743 + memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) 1744 + { 1745 + struct memblock_region *rgn; 1746 + unsigned long size = 0; 1747 + int idx; 1748 + 1749 + for_each_memblock_type((&memblock.reserved), rgn) { 1750 + phys_addr_t start, end; 1751 + 1752 + if (rgn->base + rgn->size < start_addr) 1753 + continue; 1754 + if (rgn->base > end_addr) 1755 + continue; 1756 + 1757 + start = rgn->base; 1758 + end = start + rgn->size; 1759 + size += end - start; 1760 + } 1761 + 1762 + return size; 1763 + } 1764 + 1742 1765 void __init_memblock __memblock_dump_all(void) 1743 1766 { 1744 1767 pr_info("MEMBLOCK configuration:\n");
+2 -6
mm/memory-failure.c
··· 1595 1595 if (ret) { 1596 1596 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", 1597 1597 pfn, ret, page->flags, &page->flags); 1598 - /* 1599 - * We know that soft_offline_huge_page() tries to migrate 1600 - * only one hugepage pointed to by hpage, so we need not 1601 - * run through the pagelist here. 1602 - */ 1603 - putback_active_hugepage(hpage); 1598 + if (!list_empty(&pagelist)) 1599 + putback_movable_pages(&pagelist); 1604 1600 if (ret > 0) 1605 1601 ret = -EIO; 1606 1602 } else {
+30 -10
mm/memory.c
··· 3029 3029 return ret; 3030 3030 } 3031 3031 3032 + /* 3033 + * The ordering of these checks is important for pmds with _PAGE_DEVMAP set. 3034 + * If we check pmd_trans_unstable() first we will trip the bad_pmd() check 3035 + * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly 3036 + * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. 3037 + */ 3038 + static int pmd_devmap_trans_unstable(pmd_t *pmd) 3039 + { 3040 + return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); 3041 + } 3042 + 3032 3043 static int pte_alloc_one_map(struct vm_fault *vmf) 3033 3044 { 3034 3045 struct vm_area_struct *vma = vmf->vma; ··· 3063 3052 map_pte: 3064 3053 /* 3065 3054 * If a huge pmd materialized under us just retry later. Use 3066 - * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd 3067 - * didn't become pmd_trans_huge under us and then back to pmd_none, as 3068 - * a result of MADV_DONTNEED running immediately after a huge pmd fault 3069 - * in a different thread of this mm, in turn leading to a misleading 3070 - * pmd_trans_huge() retval. All we have to ensure is that it is a 3071 - * regular pmd that we can walk with pte_offset_map() and we can do that 3072 - * through an atomic read in C, which is what pmd_trans_unstable() 3073 - * provides. 3055 + * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of 3056 + * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge 3057 + * under us and then back to pmd_none, as a result of MADV_DONTNEED 3058 + * running immediately after a huge pmd fault in a different thread of 3059 + * this mm, in turn leading to a misleading pmd_trans_huge() retval. 3060 + * All we have to ensure is that it is a regular pmd that we can walk 3061 + * with pte_offset_map() and we can do that through an atomic read in 3062 + * C, which is what pmd_trans_unstable() provides. 3074 3063 */ 3075 - if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3064 + if (pmd_devmap_trans_unstable(vmf->pmd)) 3076 3065 return VM_FAULT_NOPAGE; 3077 3066 3067 + /* 3068 + * At this point we know that our vmf->pmd points to a page of ptes 3069 + * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge() 3070 + * for the duration of the fault. If a racing MADV_DONTNEED runs and 3071 + * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still 3072 + * be valid and we will re-check to make sure the vmf->pte isn't 3073 + * pte_none() under vmf->ptl protection when we return to 3074 + * alloc_set_pte(). 3075 + */ 3078 3076 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3079 3077 &vmf->ptl); 3080 3078 return 0; ··· 3710 3690 vmf->pte = NULL; 3711 3691 } else { 3712 3692 /* See comment in pte_alloc_one_map() */ 3713 - if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3693 + if (pmd_devmap_trans_unstable(vmf->pmd)) 3714 3694 return 0; 3715 3695 /* 3716 3696 * A regular pmd is established and it can't morph into a huge
+3 -2
mm/mlock.c
··· 284 284 { 285 285 int i; 286 286 int nr = pagevec_count(pvec); 287 - int delta_munlocked; 287 + int delta_munlocked = -nr; 288 288 struct pagevec pvec_putback; 289 289 int pgrescued = 0; 290 290 ··· 304 304 continue; 305 305 else 306 306 __munlock_isolation_failed(page); 307 + } else { 308 + delta_munlocked++; 307 309 } 308 310 309 311 /* ··· 317 315 pagevec_add(&pvec_putback, pvec->pages[i]); 318 316 pvec->pages[i] = NULL; 319 317 } 320 - delta_munlocked = -nr + pagevec_count(&pvec_putback); 321 318 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 322 319 spin_unlock_irq(zone_lru_lock(zone)); 323 320
+25 -12
mm/page_alloc.c
··· 292 292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 293 293 static inline void reset_deferred_meminit(pg_data_t *pgdat) 294 294 { 295 + unsigned long max_initialise; 296 + unsigned long reserved_lowmem; 297 + 298 + /* 299 + * Initialise at least 2G of a node but also take into account that 300 + * two large system hashes that can take up 1GB for 0.25TB/node. 301 + */ 302 + max_initialise = max(2UL << (30 - PAGE_SHIFT), 303 + (pgdat->node_spanned_pages >> 8)); 304 + 305 + /* 306 + * Compensate the all the memblock reservations (e.g. crash kernel) 307 + * from the initial estimation to make sure we will initialize enough 308 + * memory to boot. 309 + */ 310 + reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, 311 + pgdat->node_start_pfn + max_initialise); 312 + max_initialise += reserved_lowmem; 313 + 314 + pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); 295 315 pgdat->first_deferred_pfn = ULONG_MAX; 296 316 } 297 317 ··· 334 314 unsigned long pfn, unsigned long zone_end, 335 315 unsigned long *nr_initialised) 336 316 { 337 - unsigned long max_initialise; 338 - 339 317 /* Always populate low zones for address-contrained allocations */ 340 318 if (zone_end < pgdat_end_pfn(pgdat)) 341 319 return true; 342 - /* 343 - * Initialise at least 2G of a node but also take into account that 344 - * two large system hashes that can take up 1GB for 0.25TB/node. 345 - */ 346 - max_initialise = max(2UL << (30 - PAGE_SHIFT), 347 - (pgdat->node_spanned_pages >> 8)); 348 - 349 320 (*nr_initialised)++; 350 - if ((*nr_initialised > max_initialise) && 321 + if ((*nr_initialised > pgdat->static_init_size) && 351 322 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 352 323 pgdat->first_deferred_pfn = pfn; 353 324 return false; ··· 3881 3870 goto got_pg; 3882 3871 3883 3872 /* Avoid allocations with no watermarks from looping endlessly */ 3884 - if (test_thread_flag(TIF_MEMDIE)) 3873 + if (test_thread_flag(TIF_MEMDIE) && 3874 + (alloc_flags == ALLOC_NO_WATERMARKS || 3875 + (gfp_mask & __GFP_NOMEMALLOC))) 3885 3876 goto nopage; 3886 3877 3887 3878 /* Retry as long as the OOM killer is making progress */ ··· 6149 6136 /* pg_data_t should be reset to zero when it's allocated */ 6150 6137 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6151 6138 6152 - reset_deferred_meminit(pgdat); 6153 6139 pgdat->node_id = nid; 6154 6140 pgdat->node_start_pfn = node_start_pfn; 6155 6141 pgdat->per_cpu_nodestats = NULL; ··· 6170 6158 (unsigned long)pgdat->node_mem_map); 6171 6159 #endif 6172 6160 6161 + reset_deferred_meminit(pgdat); 6173 6162 free_area_init_core(pgdat); 6174 6163 } 6175 6164
+4 -2
mm/slub.c
··· 5512 5512 char mbuf[64]; 5513 5513 char *buf; 5514 5514 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); 5515 + ssize_t len; 5515 5516 5516 5517 if (!attr || !attr->store || !attr->show) 5517 5518 continue; ··· 5537 5536 buf = buffer; 5538 5537 } 5539 5538 5540 - attr->show(root_cache, buf); 5541 - attr->store(s, buf, strlen(buf)); 5539 + len = attr->show(root_cache, buf); 5540 + if (len > 0) 5541 + attr->store(s, buf, len); 5542 5542 } 5543 5543 5544 5544 if (buffer)
+5 -2
mm/util.c
··· 357 357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 358 358 359 359 /* 360 - * Make sure that larger requests are not too disruptive - no OOM 361 - * killer and no allocation failure warnings as we have a fallback 360 + * We want to attempt a large physically contiguous block first because 361 + * it is less likely to fragment multiple larger blocks and therefore 362 + * contribute to a long term fragmentation less than vmalloc fallback. 363 + * However make sure that larger requests are not too disruptive - no 364 + * OOM killer and no allocation failure warnings as we have a fallback. 362 365 */ 363 366 if (size > PAGE_SIZE) { 364 367 kmalloc_flags |= __GFP_NOWARN;
+2 -4
net/sunrpc/xprtrdma/backchannel.c
··· 119 119 120 120 for (i = 0; i < (reqs << 1); i++) { 121 121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 122 - if (!rqst) { 123 - pr_err("RPC: %s: Failed to create bc rpc_rqst\n", 124 - __func__); 122 + if (!rqst) 125 123 goto out_free; 126 - } 124 + 127 125 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 128 126 129 127 rqst->rq_xprt = &r_xprt->rx_xprt;
+6 -1
net/sunrpc/xprtsock.c
··· 2432 2432 case -ENETUNREACH: 2433 2433 case -EADDRINUSE: 2434 2434 case -ENOBUFS: 2435 - /* retry with existing socket, after a delay */ 2435 + /* 2436 + * xs_tcp_force_close() wakes tasks with -EIO. 2437 + * We need to wake them first to ensure the 2438 + * correct error code. 2439 + */ 2440 + xprt_wake_pending_tasks(xprt, status); 2436 2441 xs_tcp_force_close(xprt); 2437 2442 goto out; 2438 2443 }
+5 -4
scripts/gdb/linux/dmesg.py
··· 23 23 super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) 24 24 25 25 def invoke(self, arg, from_tty): 26 - log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) 27 - log_first_idx = int(gdb.parse_and_eval("log_first_idx")) 28 - log_next_idx = int(gdb.parse_and_eval("log_next_idx")) 29 - log_buf_len = int(gdb.parse_and_eval("log_buf_len")) 26 + log_buf_addr = int(str(gdb.parse_and_eval( 27 + "'printk.c'::log_buf")).split()[0], 16) 28 + log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) 29 + log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) 30 + log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) 30 31 31 32 inf = gdb.inferiors()[0] 32 33 start = log_buf_addr + log_first_idx
+2 -2
sound/pci/hda/patch_realtek.c
··· 2324 2324 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), 2325 2325 2326 2326 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2327 - SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2328 - SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2329 2327 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2330 2328 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), 2329 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2331 2330 SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), 2331 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2332 2332 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2333 2333 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2334 2334 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+12 -7
sound/usb/mixer_us16x08.c
··· 698 698 struct snd_usb_audio *chip = elem->head.mixer->chip; 699 699 struct snd_us16x08_meter_store *store = elem->private_data; 700 700 u8 meter_urb[64]; 701 - char tmp[sizeof(mix_init_msg2)] = {0}; 702 701 703 702 switch (kcontrol->private_value) { 704 - case 0: 705 - snd_us16x08_send_urb(chip, (char *)mix_init_msg1, 706 - sizeof(mix_init_msg1)); 703 + case 0: { 704 + char tmp[sizeof(mix_init_msg1)]; 705 + 706 + memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1)); 707 + snd_us16x08_send_urb(chip, tmp, 4); 707 708 snd_us16x08_recv_urb(chip, meter_urb, 708 709 sizeof(meter_urb)); 709 710 kcontrol->private_value++; 710 711 break; 712 + } 711 713 case 1: 712 714 snd_us16x08_recv_urb(chip, meter_urb, 713 715 sizeof(meter_urb)); ··· 720 718 sizeof(meter_urb)); 721 719 kcontrol->private_value++; 722 720 break; 723 - case 3: 721 + case 3: { 722 + char tmp[sizeof(mix_init_msg2)]; 723 + 724 724 memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2)); 725 725 tmp[2] = snd_get_meter_comp_index(store); 726 - snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2)); 726 + snd_us16x08_send_urb(chip, tmp, 10); 727 727 snd_us16x08_recv_urb(chip, meter_urb, 728 728 sizeof(meter_urb)); 729 729 kcontrol->private_value = 0; 730 730 break; 731 + } 731 732 } 732 733 733 734 for (set = 0; set < 6; set++) ··· 1140 1135 .control_id = SND_US16X08_ID_EQLOWMIDWIDTH, 1141 1136 .type = USB_MIXER_U8, 1142 1137 .num_channels = 16, 1143 - .name = "EQ MidQLow Q", 1138 + .name = "EQ MidLow Q", 1144 1139 }, 1145 1140 { /* EQ mid high gain */ 1146 1141 .kcontrol_new = &snd_us16x08_eq_gain_ctl,
+1
usr/Kconfig
··· 220 220 endchoice 221 221 222 222 config INITRAMFS_COMPRESSION 223 + depends on INITRAMFS_SOURCE!="" 223 224 string 224 225 default "" if INITRAMFS_COMPRESSION_NONE 225 226 default ".gz" if INITRAMFS_COMPRESSION_GZIP