Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
"Here's the weekly batch of fixes for arm64. Not an awful lot here, but
there are still a few unresolved issues relating to CPU hotplug, RCU
and IRQ tracing that I hope to queue fixes for next week.

Summary:

- Fix early use of kprobes

- Fix kernel placement in kexec_file_load()

- Bump maximum number of NUMA nodes"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: kexec_file: try more regions if loading segments fails
arm64: kprobes: Use BRK instead of single-step when executing instructions out-of-line
arm64: NUMA: Kconfig: Increase NODES_SHIFT to 4

+69 -61
+1 -1
arch/arm64/Kconfig
··· 1002 1002 config NODES_SHIFT 1003 1003 int "Maximum NUMA Nodes (as a power of 2)" 1004 1004 range 1 10 1005 - default "2" 1005 + default "4" 1006 1006 depends on NEED_MULTIPLE_NODES 1007 1007 help 1008 1008 Specify the maximum number of NUMA Nodes available on the target
+2
arch/arm64/include/asm/brk-imm.h
··· 10 10 * #imm16 values used for BRK instruction generation 11 11 * 0x004: for installing kprobes 12 12 * 0x005: for installing uprobes 13 + * 0x006: for kprobe software single-step 13 14 * Allowed values for kgdb are 0x400 - 0x7ff 14 15 * 0x100: for triggering a fault on purpose (reserved) 15 16 * 0x400: for dynamic BRK instruction ··· 20 19 */ 21 20 #define KPROBES_BRK_IMM 0x004 22 21 #define UPROBES_BRK_IMM 0x005 22 + #define KPROBES_BRK_SS_IMM 0x006 23 23 #define FAULT_BRK_IMM 0x100 24 24 #define KGDB_DYN_DBG_BRK_IMM 0x400 25 25 #define KGDB_COMPILED_DBG_BRK_IMM 0x401
+1
arch/arm64/include/asm/debug-monitors.h
··· 53 53 54 54 /* kprobes BRK opcodes with ESR encoding */ 55 55 #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5)) 56 + #define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5)) 56 57 /* uprobes BRK opcodes with ESR encoding */ 57 58 #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5)) 58 59
+1 -1
arch/arm64/include/asm/kprobes.h
··· 16 16 #include <linux/percpu.h> 17 17 18 18 #define __ARCH_WANT_KPROBES_INSN_SLOT 19 - #define MAX_INSN_SIZE 1 19 + #define MAX_INSN_SIZE 2 20 20 21 21 #define flush_insn_slot(p) do { } while (0) 22 22 #define kretprobe_blacklist_size 0
+32 -11
arch/arm64/kernel/kexec_image.c
··· 43 43 u64 flags, value; 44 44 bool be_image, be_kernel; 45 45 struct kexec_buf kbuf; 46 - unsigned long text_offset; 46 + unsigned long text_offset, kernel_segment_number; 47 47 struct kexec_segment *kernel_segment; 48 48 int ret; 49 49 ··· 88 88 /* Adjust kernel segment with TEXT_OFFSET */ 89 89 kbuf.memsz += text_offset; 90 90 91 - ret = kexec_add_buffer(&kbuf); 92 - if (ret) 93 - return ERR_PTR(ret); 91 + kernel_segment_number = image->nr_segments; 94 92 95 - kernel_segment = &image->segment[image->nr_segments - 1]; 93 + /* 94 + * The location of the kernel segment may make it impossible to satisfy 95 + * the other segment requirements, so we try repeatedly to find a 96 + * location that will work. 97 + */ 98 + while ((ret = kexec_add_buffer(&kbuf)) == 0) { 99 + /* Try to load additional data */ 100 + kernel_segment = &image->segment[kernel_segment_number]; 101 + ret = load_other_segments(image, kernel_segment->mem, 102 + kernel_segment->memsz, initrd, 103 + initrd_len, cmdline); 104 + if (!ret) 105 + break; 106 + 107 + /* 108 + * We couldn't find space for the other segments; erase the 109 + * kernel segment and try the next available hole. 110 + */ 111 + image->nr_segments -= 1; 112 + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; 113 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 114 + } 115 + 116 + if (ret) { 117 + pr_err("Could not find any suitable kernel location!"); 118 + return ERR_PTR(ret); 119 + } 120 + 121 + kernel_segment = &image->segment[kernel_segment_number]; 96 122 kernel_segment->mem += text_offset; 97 123 kernel_segment->memsz -= text_offset; 98 124 image->start = kernel_segment->mem; ··· 127 101 kernel_segment->mem, kbuf.bufsz, 128 102 kernel_segment->memsz); 129 103 130 - /* Load additional data */ 131 - ret = load_other_segments(image, 132 - kernel_segment->mem, kernel_segment->memsz, 133 - initrd, initrd_len, cmdline); 134 - 135 - return ERR_PTR(ret); 104 + return 0; 136 105 } 137 106 138 107 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+8 -1
arch/arm64/kernel/machine_kexec_file.c
··· 240 240 return ret; 241 241 } 242 242 243 + /* 244 + * Tries to add the initrd and DTB to the image. If it is not possible to find 245 + * valid locations, this function will undo changes to the image and return non 246 + * zero. 247 + */ 243 248 int load_other_segments(struct kimage *image, 244 249 unsigned long kernel_load_addr, 245 250 unsigned long kernel_size, ··· 253 248 { 254 249 struct kexec_buf kbuf; 255 250 void *headers, *dtb = NULL; 256 - unsigned long headers_sz, initrd_load_addr = 0, dtb_len; 251 + unsigned long headers_sz, initrd_load_addr = 0, dtb_len, 252 + orig_segments = image->nr_segments; 257 253 int ret = 0; 258 254 259 255 kbuf.image = image; ··· 340 334 return 0; 341 335 342 336 out_err: 337 + image->nr_segments = orig_segments; 343 338 vfree(dtb); 344 339 return ret; 345 340 }
+24 -47
arch/arm64/kernel/probes/kprobes.c
··· 36 36 static void __kprobes 37 37 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 38 38 39 - static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) 40 - { 41 - void *addrs[1]; 42 - u32 insns[1]; 43 - 44 - addrs[0] = addr; 45 - insns[0] = opcode; 46 - 47 - return aarch64_insn_patch_text(addrs, insns, 1); 48 - } 49 - 50 39 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 51 40 { 52 - /* prepare insn slot */ 53 - patch_text(p->ainsn.api.insn, p->opcode); 41 + kprobe_opcode_t *addr = p->ainsn.api.insn; 42 + void *addrs[] = {addr, addr + 1}; 43 + u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS}; 54 44 55 - flush_icache_range((uintptr_t) (p->ainsn.api.insn), 56 - (uintptr_t) (p->ainsn.api.insn) + 57 - MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 45 + /* prepare insn slot */ 46 + aarch64_insn_patch_text(addrs, insns, 2); 47 + 48 + flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE)); 58 49 59 50 /* 60 51 * Needs restoring of return address after stepping xol. ··· 119 128 /* arm kprobe: install breakpoint in text */ 120 129 void __kprobes arch_arm_kprobe(struct kprobe *p) 121 130 { 122 - patch_text(p->addr, BRK64_OPCODE_KPROBES); 131 + void *addr = p->addr; 132 + u32 insn = BRK64_OPCODE_KPROBES; 133 + 134 + aarch64_insn_patch_text(&addr, &insn, 1); 123 135 } 124 136 125 137 /* disarm kprobe: remove breakpoint from text */ 126 138 void __kprobes arch_disarm_kprobe(struct kprobe *p) 127 139 { 128 - patch_text(p->addr, p->opcode); 140 + void *addr = p->addr; 141 + 142 + aarch64_insn_patch_text(&addr, &p->opcode, 1); 129 143 } 130 144 131 145 void __kprobes arch_remove_kprobe(struct kprobe *p) ··· 159 163 } 160 164 161 165 /* 162 - * Interrupts need to be disabled before single-step mode is set, and not 163 - * reenabled until after single-step mode ends. 164 - * Without disabling interrupt on local CPU, there is a chance of 165 - * interrupt occurrence in the period of exception return and start of 166 - * out-of-line single-step, that result in wrongly single stepping 167 - * into the interrupt handler. 166 + * Mask all of DAIF while executing the instruction out-of-line, to keep things 167 + * simple and avoid nesting exceptions. Interrupts do have to be disabled since 168 + * the kprobe state is per-CPU and doesn't get migrated. 168 169 */ 169 170 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 170 171 struct pt_regs *regs) 171 172 { 172 173 kcb->saved_irqflag = regs->pstate & DAIF_MASK; 173 - regs->pstate |= PSR_I_BIT; 174 - /* Unmask PSTATE.D for enabling software step exceptions. */ 175 - regs->pstate &= ~PSR_D_BIT; 174 + regs->pstate |= DAIF_MASK; 176 175 } 177 176 178 177 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, ··· 210 219 slot = (unsigned long)p->ainsn.api.insn; 211 220 212 221 set_ss_context(kcb, slot); /* mark pending ss */ 213 - 214 - /* IRQs and single stepping do not mix well. */ 215 222 kprobes_save_local_irqflag(kcb, regs); 216 - kernel_enable_single_step(regs); 217 223 instruction_pointer_set(regs, slot); 218 224 } else { 219 225 /* insn simulation */ ··· 261 273 } 262 274 /* call post handler */ 263 275 kcb->kprobe_status = KPROBE_HIT_SSDONE; 264 - if (cur->post_handler) { 265 - /* post_handler can hit breakpoint and single step 266 - * again, so we enable D-flag for recursive exception. 267 - */ 276 + if (cur->post_handler) 268 277 cur->post_handler(cur, regs, 0); 269 - } 270 278 271 279 reset_current_kprobe(); 272 280 } ··· 285 301 instruction_pointer_set(regs, (unsigned long) cur->addr); 286 302 if (!instruction_pointer(regs)) 287 303 BUG(); 288 - 289 - kernel_disable_single_step(); 290 304 291 305 if (kcb->kprobe_status == KPROBE_REENTER) 292 306 restore_previous_kprobe(kcb); ··· 347 365 * pre-handler and it returned non-zero, it will 348 366 * modify the execution path and no need to single 349 367 * stepping. Let's just reset current kprobe and exit. 350 - * 351 - * pre_handler can hit a breakpoint and can step thru 352 - * before return, keep PSTATE D-flag enabled until 353 - * pre_handler return back. 354 368 */ 355 369 if (!p->pre_handler || !p->pre_handler(p, regs)) { 356 370 setup_singlestep(p, regs, kcb, 0); ··· 377 399 } 378 400 379 401 static int __kprobes 380 - kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) 402 + kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) 381 403 { 382 404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 383 405 int retval; ··· 387 409 388 410 if (retval == DBG_HOOK_HANDLED) { 389 411 kprobes_restore_local_irqflag(kcb, regs); 390 - kernel_disable_single_step(); 391 - 392 412 post_kprobe_handler(kcb, regs); 393 413 } 394 414 395 415 return retval; 396 416 } 397 417 398 - static struct step_hook kprobes_step_hook = { 399 - .fn = kprobe_single_step_handler, 418 + static struct break_hook kprobes_break_ss_hook = { 419 + .imm = KPROBES_BRK_SS_IMM, 420 + .fn = kprobe_breakpoint_ss_handler, 400 421 }; 401 422 402 423 static int __kprobes ··· 463 486 int __init arch_init_kprobes(void) 464 487 { 465 488 register_kernel_break_hook(&kprobes_break_hook); 466 - register_kernel_step_hook(&kprobes_step_hook); 489 + register_kernel_break_hook(&kprobes_break_ss_hook); 467 490 468 491 return 0; 469 492 }