Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locking/urgent' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+82 -29
+1
arch/arm64/include/asm/kvm_arm.h
··· 182 182 #define CPTR_EL2_TCPAC (1 << 31) 183 183 #define CPTR_EL2_TTA (1 << 20) 184 184 #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) 185 + #define CPTR_EL2_DEFAULT 0x000033ff 185 186 186 187 /* Hyp Debug Configuration Register bits */ 187 188 #define MDCR_EL2_TDRA (1 << 11)
+6 -2
arch/arm64/include/asm/kvm_emulate.h
··· 127 127 128 128 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 129 129 { 130 - u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 130 + u32 mode; 131 131 132 - if (vcpu_mode_is_32bit(vcpu)) 132 + if (vcpu_mode_is_32bit(vcpu)) { 133 + mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; 133 134 return mode > COMPAT_PSR_MODE_USR; 135 + } 136 + 137 + mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 134 138 135 139 return mode != PSR_MODE_EL0t; 136 140 }
+6 -2
arch/arm64/kvm/hyp/switch.c
··· 36 36 write_sysreg(val, hcr_el2); 37 37 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ 38 38 write_sysreg(1 << 15, hstr_el2); 39 - write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); 39 + 40 + val = CPTR_EL2_DEFAULT; 41 + val |= CPTR_EL2_TTA | CPTR_EL2_TFP; 42 + write_sysreg(val, cptr_el2); 43 + 40 44 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 41 45 } 42 46 ··· 49 45 write_sysreg(HCR_RW, hcr_el2); 50 46 write_sysreg(0, hstr_el2); 51 47 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); 52 - write_sysreg(0, cptr_el2); 48 + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); 53 49 } 54 50 55 51 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+35 -3
arch/arm64/kvm/inject_fault.c
··· 27 27 28 28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ 29 29 PSR_I_BIT | PSR_D_BIT) 30 - #define EL1_EXCEPT_SYNC_OFFSET 0x200 30 + 31 + #define CURRENT_EL_SP_EL0_VECTOR 0x0 32 + #define CURRENT_EL_SP_ELx_VECTOR 0x200 33 + #define LOWER_EL_AArch64_VECTOR 0x400 34 + #define LOWER_EL_AArch32_VECTOR 0x600 31 35 32 36 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) 33 37 { ··· 101 97 *fsr = 0x14; 102 98 } 103 99 100 + enum exception_type { 101 + except_type_sync = 0, 102 + except_type_irq = 0x80, 103 + except_type_fiq = 0x100, 104 + except_type_serror = 0x180, 105 + }; 106 + 107 + static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type) 108 + { 109 + u64 exc_offset; 110 + 111 + switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) { 112 + case PSR_MODE_EL1t: 113 + exc_offset = CURRENT_EL_SP_EL0_VECTOR; 114 + break; 115 + case PSR_MODE_EL1h: 116 + exc_offset = CURRENT_EL_SP_ELx_VECTOR; 117 + break; 118 + case PSR_MODE_EL0t: 119 + exc_offset = LOWER_EL_AArch64_VECTOR; 120 + break; 121 + default: 122 + exc_offset = LOWER_EL_AArch32_VECTOR; 123 + } 124 + 125 + return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type; 126 + } 127 + 104 128 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 105 129 { 106 130 unsigned long cpsr = *vcpu_cpsr(vcpu); ··· 140 108 *vcpu_spsr(vcpu) = cpsr; 141 109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 142 110 111 + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); 143 112 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 144 - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; 145 113 146 114 vcpu_sys_reg(vcpu, FAR_EL1) = addr; 147 115 ··· 175 143 *vcpu_spsr(vcpu) = cpsr; 176 144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 177 145 146 + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); 178 147 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 179 - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; 180 148 181 149 /* 182 150 * Build an unknown exception, depending on the instruction
+4 -5
arch/arm64/kvm/sys_regs.c
··· 1007 1007 if (likely(r->access(vcpu, params, r))) { 1008 1008 /* Skip instruction, since it was emulated */ 1009 1009 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1010 + /* Handled */ 1011 + return 0; 1010 1012 } 1011 - 1012 - /* Handled */ 1013 - return 0; 1014 1013 } 1015 1014 1016 1015 /* Not handled */ ··· 1042 1043 } 1043 1044 1044 1045 /** 1045 - * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 1046 + * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 1046 1047 * @vcpu: The VCPU pointer 1047 1048 * @run: The kvm_run struct 1048 1049 */ ··· 1094 1095 } 1095 1096 1096 1097 /** 1097 - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 1098 + * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 1098 1099 * @vcpu: The VCPU pointer 1099 1100 * @run: The kvm_run struct 1100 1101 */
+1
arch/mips/boot/dts/brcm/bcm6328.dtsi
··· 74 74 timer: timer@10000040 { 75 75 compatible = "syscon"; 76 76 reg = <0x10000040 0x2c>; 77 + little-endian; 77 78 }; 78 79 79 80 reboot {
+1
arch/mips/boot/dts/brcm/bcm7125.dtsi
··· 98 98 sun_top_ctrl: syscon@404000 { 99 99 compatible = "brcm,bcm7125-sun-top-ctrl", "syscon"; 100 100 reg = <0x404000 0x60c>; 101 + little-endian; 101 102 }; 102 103 103 104 reboot {
+1
arch/mips/boot/dts/brcm/bcm7346.dtsi
··· 118 118 sun_top_ctrl: syscon@404000 { 119 119 compatible = "brcm,bcm7346-sun-top-ctrl", "syscon"; 120 120 reg = <0x404000 0x51c>; 121 + little-endian; 121 122 }; 122 123 123 124 reboot {
+1
arch/mips/boot/dts/brcm/bcm7358.dtsi
··· 112 112 sun_top_ctrl: syscon@404000 { 113 113 compatible = "brcm,bcm7358-sun-top-ctrl", "syscon"; 114 114 reg = <0x404000 0x51c>; 115 + little-endian; 115 116 }; 116 117 117 118 reboot {
+1
arch/mips/boot/dts/brcm/bcm7360.dtsi
··· 112 112 sun_top_ctrl: syscon@404000 { 113 113 compatible = "brcm,bcm7360-sun-top-ctrl", "syscon"; 114 114 reg = <0x404000 0x51c>; 115 + little-endian; 115 116 }; 116 117 117 118 reboot {
+1
arch/mips/boot/dts/brcm/bcm7362.dtsi
··· 118 118 sun_top_ctrl: syscon@404000 { 119 119 compatible = "brcm,bcm7362-sun-top-ctrl", "syscon"; 120 120 reg = <0x404000 0x51c>; 121 + little-endian; 121 122 }; 122 123 123 124 reboot {
+1
arch/mips/boot/dts/brcm/bcm7420.dtsi
··· 99 99 sun_top_ctrl: syscon@404000 { 100 100 compatible = "brcm,bcm7420-sun-top-ctrl", "syscon"; 101 101 reg = <0x404000 0x60c>; 102 + little-endian; 102 103 }; 103 104 104 105 reboot {
+1
arch/mips/boot/dts/brcm/bcm7425.dtsi
··· 100 100 sun_top_ctrl: syscon@404000 { 101 101 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; 102 102 reg = <0x404000 0x51c>; 103 + little-endian; 103 104 }; 104 105 105 106 reboot {
+1
arch/mips/boot/dts/brcm/bcm7435.dtsi
··· 114 114 sun_top_ctrl: syscon@404000 { 115 115 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; 116 116 reg = <0x404000 0x51c>; 117 + little-endian; 117 118 }; 118 119 119 120 reboot {
+8 -8
drivers/base/regmap/regmap-mmio.c
··· 133 133 while (val_size) { 134 134 switch (ctx->val_bytes) { 135 135 case 1: 136 - __raw_writeb(*(u8 *)val, ctx->regs + offset); 136 + writeb(*(u8 *)val, ctx->regs + offset); 137 137 break; 138 138 case 2: 139 - __raw_writew(*(u16 *)val, ctx->regs + offset); 139 + writew(*(u16 *)val, ctx->regs + offset); 140 140 break; 141 141 case 4: 142 - __raw_writel(*(u32 *)val, ctx->regs + offset); 142 + writel(*(u32 *)val, ctx->regs + offset); 143 143 break; 144 144 #ifdef CONFIG_64BIT 145 145 case 8: 146 - __raw_writeq(*(u64 *)val, ctx->regs + offset); 146 + writeq(*(u64 *)val, ctx->regs + offset); 147 147 break; 148 148 #endif 149 149 default: ··· 193 193 while (val_size) { 194 194 switch (ctx->val_bytes) { 195 195 case 1: 196 - *(u8 *)val = __raw_readb(ctx->regs + offset); 196 + *(u8 *)val = readb(ctx->regs + offset); 197 197 break; 198 198 case 2: 199 - *(u16 *)val = __raw_readw(ctx->regs + offset); 199 + *(u16 *)val = readw(ctx->regs + offset); 200 200 break; 201 201 case 4: 202 - *(u32 *)val = __raw_readl(ctx->regs + offset); 202 + *(u32 *)val = readl(ctx->regs + offset); 203 203 break; 204 204 #ifdef CONFIG_64BIT 205 205 case 8: 206 - *(u64 *)val = __raw_readq(ctx->regs + offset); 206 + *(u64 *)val = readq(ctx->regs + offset); 207 207 break; 208 208 #endif 209 209 default:
+10 -6
kernel/locking/lockdep.c
··· 1822 1822 */ 1823 1823 static int 1824 1824 check_prev_add(struct task_struct *curr, struct held_lock *prev, 1825 - struct held_lock *next, int distance, int trylock_loop) 1825 + struct held_lock *next, int distance, int *stack_saved) 1826 1826 { 1827 1827 struct lock_list *entry; 1828 1828 int ret; ··· 1883 1883 } 1884 1884 } 1885 1885 1886 - if (!trylock_loop && !save_trace(&trace)) 1887 - return 0; 1886 + if (!*stack_saved) { 1887 + if (!save_trace(&trace)) 1888 + return 0; 1889 + *stack_saved = 1; 1890 + } 1888 1891 1889 1892 /* 1890 1893 * Ok, all validations passed, add the new lock ··· 1910 1907 * Debugging printouts: 1911 1908 */ 1912 1909 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1910 + /* We drop graph lock, so another thread can overwrite trace. */ 1911 + *stack_saved = 0; 1913 1912 graph_unlock(); 1914 1913 printk("\n new dependency: "); 1915 1914 print_lock_name(hlock_class(prev)); ··· 1934 1929 check_prevs_add(struct task_struct *curr, struct held_lock *next) 1935 1930 { 1936 1931 int depth = curr->lockdep_depth; 1937 - int trylock_loop = 0; 1932 + int stack_saved = 0; 1938 1933 struct held_lock *hlock; 1939 1934 1940 1935 /* ··· 1961 1956 */ 1962 1957 if (hlock->read != 2 && hlock->check) { 1963 1958 if (!check_prev_add(curr, hlock, next, 1964 - distance, trylock_loop)) 1959 + distance, &stack_saved)) 1965 1960 return 0; 1966 1961 /* 1967 1962 * Stop after the first non-trylock entry, ··· 1984 1979 if (curr->held_locks[depth].irq_context != 1985 1980 curr->held_locks[depth-1].irq_context) 1986 1981 break; 1987 - trylock_loop = 1; 1988 1982 } 1989 1983 return 1; 1990 1984 out_bug:
+3 -3
lib/scatterlist.c
··· 598 598 * 599 599 * Description: 600 600 * Stops mapping iterator @miter. @miter should have been started 601 - * started using sg_miter_start(). A stopped iteration can be 602 - * resumed by calling sg_miter_next() on it. This is useful when 603 - * resources (kmap) need to be released during iteration. 601 + * using sg_miter_start(). A stopped iteration can be resumed by 602 + * calling sg_miter_next() on it. This is useful when resources (kmap) 603 + * need to be released during iteration. 604 604 * 605 605 * Context: 606 606 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care