Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"Another round of CR3/PCID related fixes (I think this addresses all
but one of the known problems with PCID support), an objtool fix plus
a Clang fix that (finally) solves all Clang quirks to build a bootable
x86 kernel as-is"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/asm: Fix inline asm call constraints for Clang
objtool: Handle another GCC stack pointer adjustment bug
x86/mm/32: Load a sane CR3 before cpu_init() on secondary CPUs
x86/mm/32: Move setup_clear_cpu_cap(X86_FEATURE_PCID) earlier
x86/mm/64: Stop using CR3.PCID == 0 in ASID-aware code
x86/mm: Factor out CR3-building code

+1 -2
arch/x86/include/asm/alternative.h
··· 218 218 #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ 219 219 output, input...) \ 220 220 { \ 221 - register void *__sp asm(_ASM_SP); \ 222 221 asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ 223 222 "call %P[new2]", feature2) \ 224 - : output, "+r" (__sp) \ 223 + : output, ASM_CALL_CONSTRAINT \ 225 224 : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ 226 225 [new2] "i" (newfunc2), ## input); \ 227 226 }
+11
arch/x86/include/asm/asm.h
··· 132 132 /* For C file, we already have NOKPROBE_SYMBOL macro */ 133 133 #endif 134 134 135 + #ifndef __ASSEMBLY__ 136 + /* 137 + * This output constraint should be used for any inline asm which has a "call" 138 + * instruction. Otherwise the asm may be inserted before the frame pointer 139 + * gets set up by the containing function. If you forget to do this, objtool 140 + * may print a "call without frame pointer save/setup" warning. 141 + */ 142 + register unsigned int __asm_call_sp asm("esp"); 143 + #define ASM_CALL_CONSTRAINT "+r" (__asm_call_sp) 144 + #endif 145 + 135 146 #endif /* _ASM_X86_ASM_H */
+28 -4
arch/x86/include/asm/mmu_context.h
··· 286 286 return __pkru_allows_pkey(vma_pkey(vma), write); 287 287 } 288 288 289 + /* 290 + * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID 291 + * bits. This serves two purposes. It prevents a nasty situation in 292 + * which PCID-unaware code saves CR3, loads some other value (with PCID 293 + * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if 294 + * the saved ASID was nonzero. It also means that any bugs involving 295 + * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger 296 + * deterministically. 297 + */ 298 + 299 + static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) 300 + { 301 + if (static_cpu_has(X86_FEATURE_PCID)) { 302 + VM_WARN_ON_ONCE(asid > 4094); 303 + return __sme_pa(mm->pgd) | (asid + 1); 304 + } else { 305 + VM_WARN_ON_ONCE(asid != 0); 306 + return __sme_pa(mm->pgd); 307 + } 308 + } 309 + 310 + static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) 311 + { 312 + VM_WARN_ON_ONCE(asid > 4094); 313 + return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; 314 + } 289 315 290 316 /* 291 317 * This can be used from process context to figure out what the value of ··· 322 296 */ 323 297 static inline unsigned long __get_current_cr3_fast(void) 324 298 { 325 - unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); 326 - 327 - if (static_cpu_has(X86_FEATURE_PCID)) 328 - cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); 299 + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), 300 + this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 329 301 330 302 /* For now, be very restrictive about when this can be called. */ 331 303 VM_WARN_ON(in_nmi() || preemptible());
+4 -6
arch/x86/include/asm/mshyperv.h
··· 179 179 u64 input_address = input ? virt_to_phys(input) : 0; 180 180 u64 output_address = output ? virt_to_phys(output) : 0; 181 181 u64 hv_status; 182 - register void *__sp asm(_ASM_SP); 183 182 184 183 #ifdef CONFIG_X86_64 185 184 if (!hv_hypercall_pg) ··· 186 187 187 188 __asm__ __volatile__("mov %4, %%r8\n" 188 189 "call *%5" 189 - : "=a" (hv_status), "+r" (__sp), 190 + : "=a" (hv_status), ASM_CALL_CONSTRAINT, 190 191 "+c" (control), "+d" (input_address) 191 192 : "r" (output_address), "m" (hv_hypercall_pg) 192 193 : "cc", "memory", "r8", "r9", "r10", "r11"); ··· 201 202 202 203 __asm__ __volatile__("call *%7" 203 204 : "=A" (hv_status), 204 - "+c" (input_address_lo), "+r" (__sp) 205 + "+c" (input_address_lo), ASM_CALL_CONSTRAINT 205 206 : "A" (control), 206 207 "b" (input_address_hi), 207 208 "D"(output_address_hi), "S"(output_address_lo), ··· 223 224 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 224 225 { 225 226 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; 226 - register void *__sp asm(_ASM_SP); 227 227 228 228 #ifdef CONFIG_X86_64 229 229 { 230 230 __asm__ __volatile__("call *%4" 231 - : "=a" (hv_status), "+r" (__sp), 231 + : "=a" (hv_status), ASM_CALL_CONSTRAINT, 232 232 "+c" (control), "+d" (input1) 233 233 : "m" (hv_hypercall_pg) 234 234 : "cc", "r8", "r9", "r10", "r11"); ··· 240 242 __asm__ __volatile__ ("call *%5" 241 243 : "=A"(hv_status), 242 244 "+c"(input1_lo), 243 - "+r"(__sp) 245 + ASM_CALL_CONSTRAINT 244 246 : "A" (control), 245 247 "b" (input1_hi), 246 248 "m" (hv_hypercall_pg)
+7 -7
arch/x86/include/asm/paravirt_types.h
··· 459 459 */ 460 460 #ifdef CONFIG_X86_32 461 461 #define PVOP_VCALL_ARGS \ 462 - unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ 463 - register void *__sp asm("esp") 462 + unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; 463 + 464 464 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 465 465 466 466 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) ··· 480 480 /* [re]ax isn't an arg, but the return val */ 481 481 #define PVOP_VCALL_ARGS \ 482 482 unsigned long __edi = __edi, __esi = __esi, \ 483 - __edx = __edx, __ecx = __ecx, __eax = __eax; \ 484 - register void *__sp asm("rsp") 483 + __edx = __edx, __ecx = __ecx, __eax = __eax; 484 + 485 485 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 486 486 487 487 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) ··· 532 532 asm volatile(pre \ 533 533 paravirt_alt(PARAVIRT_CALL) \ 534 534 post \ 535 - : call_clbr, "+r" (__sp) \ 535 + : call_clbr, ASM_CALL_CONSTRAINT \ 536 536 : paravirt_type(op), \ 537 537 paravirt_clobber(clbr), \ 538 538 ##__VA_ARGS__ \ ··· 542 542 asm volatile(pre \ 543 543 paravirt_alt(PARAVIRT_CALL) \ 544 544 post \ 545 - : call_clbr, "+r" (__sp) \ 545 + : call_clbr, ASM_CALL_CONSTRAINT \ 546 546 : paravirt_type(op), \ 547 547 paravirt_clobber(clbr), \ 548 548 ##__VA_ARGS__ \ ··· 569 569 asm volatile(pre \ 570 570 paravirt_alt(PARAVIRT_CALL) \ 571 571 post \ 572 - : call_clbr, "+r" (__sp) \ 572 + : call_clbr, ASM_CALL_CONSTRAINT \ 573 573 : paravirt_type(op), \ 574 574 paravirt_clobber(clbr), \ 575 575 ##__VA_ARGS__ \
+5 -10
arch/x86/include/asm/preempt.h
··· 100 100 101 101 #ifdef CONFIG_PREEMPT 102 102 extern asmlinkage void ___preempt_schedule(void); 103 - # define __preempt_schedule() \ 104 - ({ \ 105 - register void *__sp asm(_ASM_SP); \ 106 - asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \ 107 - }) 103 + # define __preempt_schedule() \ 104 + asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) 108 105 109 106 extern asmlinkage void preempt_schedule(void); 110 107 extern asmlinkage void ___preempt_schedule_notrace(void); 111 - # define __preempt_schedule_notrace() \ 112 - ({ \ 113 - register void *__sp asm(_ASM_SP); \ 114 - asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \ 115 - }) 108 + # define __preempt_schedule_notrace() \ 109 + asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) 110 + 116 111 extern asmlinkage void preempt_schedule_notrace(void); 117 112 #endif 118 113
+2 -4
arch/x86/include/asm/processor.h
··· 677 677 * Like all of Linux's memory ordering operations, this is a 678 678 * compiler barrier as well. 679 679 */ 680 - register void *__sp asm(_ASM_SP); 681 - 682 680 #ifdef CONFIG_X86_32 683 681 asm volatile ( 684 682 "pushfl\n\t" ··· 684 686 "pushl $1f\n\t" 685 687 "iret\n\t" 686 688 "1:" 687 - : "+r" (__sp) : : "memory"); 689 + : ASM_CALL_CONSTRAINT : : "memory"); 688 690 #else 689 691 unsigned int tmp; 690 692 ··· 701 703 "iretq\n\t" 702 704 UNWIND_HINT_RESTORE 703 705 "1:" 704 - : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); 706 + : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); 705 707 #endif 706 708 } 707 709
+2 -2
arch/x86/include/asm/rwsem.h
··· 103 103 ({ \ 104 104 long tmp; \ 105 105 struct rw_semaphore* ret; \ 106 - register void *__sp asm(_ASM_SP); \ 107 106 \ 108 107 asm volatile("# beginning down_write\n\t" \ 109 108 LOCK_PREFIX " xadd %1,(%4)\n\t" \ ··· 113 114 " call " slow_path "\n" \ 114 115 "1:\n" \ 115 116 "# ending down_write" \ 116 - : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ 117 + : "+m" (sem->count), "=d" (tmp), \ 118 + "=a" (ret), ASM_CALL_CONSTRAINT \ 117 119 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ 118 120 : "memory", "cc"); \ 119 121 ret; \
+2 -2
arch/x86/include/asm/uaccess.h
··· 166 166 ({ \ 167 167 int __ret_gu; \ 168 168 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 169 - register void *__sp asm(_ASM_SP); \ 170 169 __chk_user_ptr(ptr); \ 171 170 might_fault(); \ 172 171 asm volatile("call __get_user_%P4" \ 173 - : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ 172 + : "=a" (__ret_gu), "=r" (__val_gu), \ 173 + ASM_CALL_CONSTRAINT \ 174 174 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 175 175 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 176 176 __builtin_expect(__ret_gu, 0); \
+2 -3
arch/x86/include/asm/xen/hypercall.h
··· 113 113 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ 114 114 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ 115 115 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ 116 - register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \ 117 - register void *__sp asm(_ASM_SP); 116 + register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; 118 117 119 - #define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp) 118 + #define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT 120 119 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) 121 120 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) 122 121 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
-8
arch/x86/kernel/cpu/bugs.c
··· 21 21 22 22 void __init check_bugs(void) 23 23 { 24 - #ifdef CONFIG_X86_32 25 - /* 26 - * Regardless of whether PCID is enumerated, the SDM says 27 - * that it can't be enabled in 32-bit mode. 28 - */ 29 - setup_clear_cpu_cap(X86_FEATURE_PCID); 30 - #endif 31 - 32 24 identify_boot_cpu(); 33 25 34 26 if (!IS_ENABLED(CONFIG_SMP)) {
+8
arch/x86/kernel/cpu/common.c
··· 904 904 905 905 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 906 906 fpu__init_system(c); 907 + 908 + #ifdef CONFIG_X86_32 909 + /* 910 + * Regardless of whether PCID is enumerated, the SDM says 911 + * that it can't be enabled in 32-bit mode. 912 + */ 913 + setup_clear_cpu_cap(X86_FEATURE_PCID); 914 + #endif 907 915 } 908 916 909 917 void __init early_cpu_init(void)
+7 -6
arch/x86/kernel/smpboot.c
··· 232 232 */ 233 233 if (boot_cpu_has(X86_FEATURE_PCID)) 234 234 __write_cr4(__read_cr4() | X86_CR4_PCIDE); 235 - cpu_init(); 236 - x86_cpuinit.early_percpu_clock_init(); 237 - preempt_disable(); 238 - smp_callin(); 239 - 240 - enable_start_cpu0 = 0; 241 235 242 236 #ifdef CONFIG_X86_32 243 237 /* switch away from the initial page table */ 244 238 load_cr3(swapper_pg_dir); 245 239 __flush_tlb_all(); 246 240 #endif 241 + 242 + cpu_init(); 243 + x86_cpuinit.early_percpu_clock_init(); 244 + preempt_disable(); 245 + smp_callin(); 246 + 247 + enable_start_cpu0 = 0; 247 248 248 249 /* otherwise gcc will move up smp_processor_id before the cpu_init */ 249 250 barrier();
+1 -2
arch/x86/kvm/emulate.c
··· 5298 5298 5299 5299 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 5300 5300 { 5301 - register void *__sp asm(_ASM_SP); 5302 5301 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 5303 5302 5304 5303 if (!(ctxt->d & ByteOp)) ··· 5305 5306 5306 5307 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 5307 5308 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 5308 - [fastop]"+S"(fop), "+r"(__sp) 5309 + [fastop]"+S"(fop), ASM_CALL_CONSTRAINT 5309 5310 : "c"(ctxt->src2.val)); 5310 5311 5311 5312 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
+1 -2
arch/x86/kvm/vmx.c
··· 9045 9045 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) 9046 9046 { 9047 9047 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 9048 - register void *__sp asm(_ASM_SP); 9049 9048 9050 9049 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) 9051 9050 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { ··· 9073 9074 #ifdef CONFIG_X86_64 9074 9075 [sp]"=&r"(tmp), 9075 9076 #endif 9076 - "+r"(__sp) 9077 + ASM_CALL_CONSTRAINT 9077 9078 : 9078 9079 [entry]"r"(entry), 9079 9080 [ss]"i"(__KERNEL_DS),
+1 -2
arch/x86/mm/fault.c
··· 806 806 if (is_vmalloc_addr((void *)address) && 807 807 (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || 808 808 address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { 809 - register void *__sp asm("rsp"); 810 809 unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); 811 810 /* 812 811 * We're likely to be running with very little stack space ··· 820 821 asm volatile ("movq %[stack], %%rsp\n\t" 821 822 "call handle_stack_overflow\n\t" 822 823 "1: jmp 1b" 823 - : "+r" (__sp) 824 + : ASM_CALL_CONSTRAINT 824 825 : "D" ("kernel stack overflow (page fault)"), 825 826 "S" (regs), "d" (address), 826 827 [stack] "rm" (stack));
+5 -6
arch/x86/mm/tlb.c
··· 126 126 * isn't free. 127 127 */ 128 128 #ifdef CONFIG_DEBUG_VM 129 - if (WARN_ON_ONCE(__read_cr3() != 130 - (__sme_pa(real_prev->pgd) | prev_asid))) { 129 + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { 131 130 /* 132 131 * If we were to BUG here, we'd be very likely to kill 133 132 * the system so hard that we don't see the call trace. ··· 171 172 */ 172 173 this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, 173 174 next_tlb_gen); 174 - write_cr3(__sme_pa(next->pgd) | prev_asid); 175 + write_cr3(build_cr3(next, prev_asid)); 175 176 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 176 177 TLB_FLUSH_ALL); 177 178 } ··· 215 216 if (need_flush) { 216 217 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 217 218 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 218 - write_cr3(__sme_pa(next->pgd) | new_asid); 219 + write_cr3(build_cr3(next, new_asid)); 219 220 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 220 221 TLB_FLUSH_ALL); 221 222 } else { 222 223 /* The new ASID is already up to date. */ 223 - write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); 224 + write_cr3(build_cr3_noflush(next, new_asid)); 224 225 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); 225 226 } 226 227 ··· 264 265 !(cr4_read_shadow() & X86_CR4_PCIDE)); 265 266 266 267 /* Force ASID 0 and force a TLB flush. */ 267 - write_cr3(cr3 & ~CR3_PCID_MASK); 268 + write_cr3(build_cr3(mm, 0)); 268 269 269 270 /* Reinitialize tlbstate. */ 270 271 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
+3 -3
tools/objtool/Documentation/stack-validation.txt
··· 194 194 If it's a GCC-compiled .c file, the error may be because the function 195 195 uses an inline asm() statement which has a "call" instruction. An 196 196 asm() statement with a call instruction must declare the use of the 197 - stack pointer in its output operand. For example, on x86_64: 197 + stack pointer in its output operand. On x86_64, this means adding 198 + the ASM_CALL_CONSTRAINT as an output constraint: 198 199 199 - register void *__sp asm("rsp"); 200 - asm volatile("call func" : "+r" (__sp)); 200 + asm volatile("call func" : ASM_CALL_CONSTRAINT); 201 201 202 202 Otherwise the stack frame may not get created before the call. 203 203
+3 -3
tools/objtool/arch/x86/decode.c
··· 208 208 break; 209 209 210 210 case 0x89: 211 - if (rex == 0x48 && modrm == 0xe5) { 211 + if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) { 212 212 213 - /* mov %rsp, %rbp */ 213 + /* mov %rsp, reg */ 214 214 *type = INSN_STACK; 215 215 op->src.type = OP_SRC_REG; 216 216 op->src.reg = CFI_SP; 217 217 op->dest.type = OP_DEST_REG; 218 - op->dest.reg = CFI_BP; 218 + op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b]; 219 219 break; 220 220 } 221 221
+29 -14
tools/objtool/check.c
··· 1203 1203 switch (op->src.type) { 1204 1204 1205 1205 case OP_SRC_REG: 1206 - if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP) { 1206 + if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 1207 + cfa->base == CFI_SP && 1208 + regs[CFI_BP].base == CFI_CFA && 1209 + regs[CFI_BP].offset == -cfa->offset) { 1207 1210 1208 - if (cfa->base == CFI_SP && 1209 - regs[CFI_BP].base == CFI_CFA && 1210 - regs[CFI_BP].offset == -cfa->offset) { 1211 + /* mov %rsp, %rbp */ 1212 + cfa->base = op->dest.reg; 1213 + state->bp_scratch = false; 1214 + } 1211 1215 1212 - /* mov %rsp, %rbp */ 1213 - cfa->base = op->dest.reg; 1214 - state->bp_scratch = false; 1215 - } 1216 + else if (op->src.reg == CFI_SP && 1217 + op->dest.reg == CFI_BP && state->drap) { 1216 1218 1217 - else if (state->drap) { 1219 + /* drap: mov %rsp, %rbp */ 1220 + regs[CFI_BP].base = CFI_BP; 1221 + regs[CFI_BP].offset = -state->stack_size; 1222 + state->bp_scratch = false; 1223 + } 1218 1224 1219 - /* drap: mov %rsp, %rbp */ 1220 - regs[CFI_BP].base = CFI_BP; 1221 - regs[CFI_BP].offset = -state->stack_size; 1222 - state->bp_scratch = false; 1223 - } 1225 + else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 1226 + 1227 + /* 1228 + * mov %rsp, %reg 1229 + * 1230 + * This is needed for the rare case where GCC 1231 + * does: 1232 + * 1233 + * mov %rsp, %rax 1234 + * ... 1235 + * mov %rax, %rsp 1236 + */ 1237 + state->vals[op->dest.reg].base = CFI_CFA; 1238 + state->vals[op->dest.reg].offset = -state->stack_size; 1224 1239 } 1225 1240 1226 1241 else if (op->dest.reg == cfa->base) {