Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.5 1040 lines 29 kB view raw
1/* 2 * mpx.c - Memory Protection eXtensions 3 * 4 * Copyright (c) 2014, Intel Corporation. 5 * Qiaowei Ren <qiaowei.ren@intel.com> 6 * Dave Hansen <dave.hansen@intel.com> 7 */ 8#include <linux/kernel.h> 9#include <linux/slab.h> 10#include <linux/syscalls.h> 11#include <linux/sched/sysctl.h> 12 13#include <asm/insn.h> 14#include <asm/mman.h> 15#include <asm/mmu_context.h> 16#include <asm/mpx.h> 17#include <asm/processor.h> 18#include <asm/fpu/internal.h> 19 20#define CREATE_TRACE_POINTS 21#include <asm/trace/mpx.h> 22 23static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 24{ 25 if (is_64bit_mm(mm)) 26 return MPX_BD_SIZE_BYTES_64; 27 else 28 return MPX_BD_SIZE_BYTES_32; 29} 30 31static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) 32{ 33 if (is_64bit_mm(mm)) 34 return MPX_BT_SIZE_BYTES_64; 35 else 36 return MPX_BT_SIZE_BYTES_32; 37} 38 39/* 40 * This is really a simplified "vm_mmap". it only handles MPX 41 * bounds tables (the bounds directory is user-allocated). 42 */ 43static unsigned long mpx_mmap(unsigned long len) 44{ 45 struct mm_struct *mm = current->mm; 46 unsigned long addr, populate; 47 48 /* Only bounds table can be allocated here */ 49 if (len != mpx_bt_size_bytes(mm)) 50 return -EINVAL; 51 52 down_write(&mm->mmap_sem); 53 addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE, 54 MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate); 55 up_write(&mm->mmap_sem); 56 if (populate) 57 mm_populate(addr, populate); 58 59 return addr; 60} 61 62enum reg_type { 63 REG_TYPE_RM = 0, 64 REG_TYPE_INDEX, 65 REG_TYPE_BASE, 66}; 67 68static int get_reg_offset(struct insn *insn, struct pt_regs *regs, 69 enum reg_type type) 70{ 71 int regno = 0; 72 73 static const int regoff[] = { 74 offsetof(struct pt_regs, ax), 75 offsetof(struct pt_regs, cx), 76 offsetof(struct pt_regs, dx), 77 offsetof(struct pt_regs, bx), 78 offsetof(struct pt_regs, sp), 79 offsetof(struct pt_regs, bp), 80 offsetof(struct pt_regs, si), 81 offsetof(struct pt_regs, di), 82#ifdef CONFIG_X86_64 83 offsetof(struct pt_regs, r8), 84 offsetof(struct pt_regs, r9), 85 offsetof(struct pt_regs, r10), 86 offsetof(struct pt_regs, r11), 87 offsetof(struct pt_regs, r12), 88 offsetof(struct pt_regs, r13), 89 offsetof(struct pt_regs, r14), 90 offsetof(struct pt_regs, r15), 91#endif 92 }; 93 int nr_registers = ARRAY_SIZE(regoff); 94 /* 95 * Don't possibly decode a 32-bit instructions as 96 * reading a 64-bit-only register. 97 */ 98 if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) 99 nr_registers -= 8; 100 101 switch (type) { 102 case REG_TYPE_RM: 103 regno = X86_MODRM_RM(insn->modrm.value); 104 if (X86_REX_B(insn->rex_prefix.value)) 105 regno += 8; 106 break; 107 108 case REG_TYPE_INDEX: 109 regno = X86_SIB_INDEX(insn->sib.value); 110 if (X86_REX_X(insn->rex_prefix.value)) 111 regno += 8; 112 break; 113 114 case REG_TYPE_BASE: 115 regno = X86_SIB_BASE(insn->sib.value); 116 if (X86_REX_B(insn->rex_prefix.value)) 117 regno += 8; 118 break; 119 120 default: 121 pr_err("invalid register type"); 122 BUG(); 123 break; 124 } 125 126 if (regno >= nr_registers) { 127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 128 return -EINVAL; 129 } 130 return regoff[regno]; 131} 132 133/* 134 * return the address being referenced be instruction 135 * for rm=3 returning the content of the rm reg 136 * for rm!=3 calculates the address using SIB and Disp 137 */ 138static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) 139{ 140 unsigned long addr, base, indx; 141 int addr_offset, base_offset, indx_offset; 142 insn_byte_t sib; 143 144 insn_get_modrm(insn); 145 insn_get_sib(insn); 146 sib = insn->sib.value; 147 148 if (X86_MODRM_MOD(insn->modrm.value) == 3) { 149 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); 150 if (addr_offset < 0) 151 goto out_err; 152 addr = regs_get_register(regs, addr_offset); 153 } else { 154 if (insn->sib.nbytes) { 155 base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); 156 if (base_offset < 0) 157 goto out_err; 158 159 indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); 160 if (indx_offset < 0) 161 goto out_err; 162 163 base = regs_get_register(regs, base_offset); 164 indx = regs_get_register(regs, indx_offset); 165 addr = base + indx * (1 << X86_SIB_SCALE(sib)); 166 } else { 167 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); 168 if (addr_offset < 0) 169 goto out_err; 170 addr = regs_get_register(regs, addr_offset); 171 } 172 addr += insn->displacement.value; 173 } 174 return (void __user *)addr; 175out_err: 176 return (void __user *)-1; 177} 178 179static int mpx_insn_decode(struct insn *insn, 180 struct pt_regs *regs) 181{ 182 unsigned char buf[MAX_INSN_SIZE]; 183 int x86_64 = !test_thread_flag(TIF_IA32); 184 int not_copied; 185 int nr_copied; 186 187 not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf)); 188 nr_copied = sizeof(buf) - not_copied; 189 /* 190 * The decoder _should_ fail nicely if we pass it a short buffer. 191 * But, let's not depend on that implementation detail. If we 192 * did not get anything, just error out now. 193 */ 194 if (!nr_copied) 195 return -EFAULT; 196 insn_init(insn, buf, nr_copied, x86_64); 197 insn_get_length(insn); 198 /* 199 * copy_from_user() tries to get as many bytes as we could see in 200 * the largest possible instruction. If the instruction we are 201 * after is shorter than that _and_ we attempt to copy from 202 * something unreadable, we might get a short read. This is OK 203 * as long as the read did not stop in the middle of the 204 * instruction. Check to see if we got a partial instruction. 205 */ 206 if (nr_copied < insn->length) 207 return -EFAULT; 208 209 insn_get_opcode(insn); 210 /* 211 * We only _really_ need to decode bndcl/bndcn/bndcu 212 * Error out on anything else. 213 */ 214 if (insn->opcode.bytes[0] != 0x0f) 215 goto bad_opcode; 216 if ((insn->opcode.bytes[1] != 0x1a) && 217 (insn->opcode.bytes[1] != 0x1b)) 218 goto bad_opcode; 219 220 return 0; 221bad_opcode: 222 return -EINVAL; 223} 224 225/* 226 * If a bounds overflow occurs then a #BR is generated. This 227 * function decodes MPX instructions to get violation address 228 * and set this address into extended struct siginfo. 229 * 230 * Note that this is not a super precise way of doing this. 231 * Userspace could have, by the time we get here, written 232 * anything it wants in to the instructions. We can not 233 * trust anything about it. They might not be valid 234 * instructions or might encode invalid registers, etc... 235 * 236 * The caller is expected to kfree() the returned siginfo_t. 237 */ 238siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) 239{ 240 const struct mpx_bndreg_state *bndregs; 241 const struct mpx_bndreg *bndreg; 242 siginfo_t *info = NULL; 243 struct insn insn; 244 uint8_t bndregno; 245 int err; 246 247 err = mpx_insn_decode(&insn, regs); 248 if (err) 249 goto err_out; 250 251 /* 252 * We know at this point that we are only dealing with 253 * MPX instructions. 254 */ 255 insn_get_modrm(&insn); 256 bndregno = X86_MODRM_REG(insn.modrm.value); 257 if (bndregno > 3) { 258 err = -EINVAL; 259 goto err_out; 260 } 261 /* get bndregs field from current task's xsave area */ 262 bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS); 263 if (!bndregs) { 264 err = -EINVAL; 265 goto err_out; 266 } 267 /* now go select the individual register in the set of 4 */ 268 bndreg = &bndregs->bndreg[bndregno]; 269 270 info = kzalloc(sizeof(*info), GFP_KERNEL); 271 if (!info) { 272 err = -ENOMEM; 273 goto err_out; 274 } 275 /* 276 * The registers are always 64-bit, but the upper 32 277 * bits are ignored in 32-bit mode. Also, note that the 278 * upper bounds are architecturally represented in 1's 279 * complement form. 280 * 281 * The 'unsigned long' cast is because the compiler 282 * complains when casting from integers to different-size 283 * pointers. 284 */ 285 info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound; 286 info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound; 287 info->si_addr_lsb = 0; 288 info->si_signo = SIGSEGV; 289 info->si_errno = 0; 290 info->si_code = SEGV_BNDERR; 291 info->si_addr = mpx_get_addr_ref(&insn, regs); 292 /* 293 * We were not able to extract an address from the instruction, 294 * probably because there was something invalid in it. 295 */ 296 if (info->si_addr == (void *)-1) { 297 err = -EINVAL; 298 goto err_out; 299 } 300 trace_mpx_bounds_register_exception(info->si_addr, bndreg); 301 return info; 302err_out: 303 /* info might be NULL, but kfree() handles that */ 304 kfree(info); 305 return ERR_PTR(err); 306} 307 308static __user void *mpx_get_bounds_dir(void) 309{ 310 const struct mpx_bndcsr *bndcsr; 311 312 if (!cpu_feature_enabled(X86_FEATURE_MPX)) 313 return MPX_INVALID_BOUNDS_DIR; 314 315 /* 316 * The bounds directory pointer is stored in a register 317 * only accessible if we first do an xsave. 318 */ 319 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); 320 if (!bndcsr) 321 return MPX_INVALID_BOUNDS_DIR; 322 323 /* 324 * Make sure the register looks valid by checking the 325 * enable bit. 326 */ 327 if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG)) 328 return MPX_INVALID_BOUNDS_DIR; 329 330 /* 331 * Lastly, mask off the low bits used for configuration 332 * flags, and return the address of the bounds table. 333 */ 334 return (void __user *)(unsigned long) 335 (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK); 336} 337 338int mpx_enable_management(void) 339{ 340 void __user *bd_base = MPX_INVALID_BOUNDS_DIR; 341 struct mm_struct *mm = current->mm; 342 int ret = 0; 343 344 /* 345 * runtime in the userspace will be responsible for allocation of 346 * the bounds directory. Then, it will save the base of the bounds 347 * directory into XSAVE/XRSTOR Save Area and enable MPX through 348 * XRSTOR instruction. 349 * 350 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is 351 * expected to be relatively expensive. Storing the bounds 352 * directory here means that we do not have to do xsave in the 353 * unmap path; we can just use mm->bd_addr instead. 354 */ 355 bd_base = mpx_get_bounds_dir(); 356 down_write(&mm->mmap_sem); 357 mm->bd_addr = bd_base; 358 if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR) 359 ret = -ENXIO; 360 361 up_write(&mm->mmap_sem); 362 return ret; 363} 364 365int mpx_disable_management(void) 366{ 367 struct mm_struct *mm = current->mm; 368 369 if (!cpu_feature_enabled(X86_FEATURE_MPX)) 370 return -ENXIO; 371 372 down_write(&mm->mmap_sem); 373 mm->bd_addr = MPX_INVALID_BOUNDS_DIR; 374 up_write(&mm->mmap_sem); 375 return 0; 376} 377 378static int mpx_cmpxchg_bd_entry(struct mm_struct *mm, 379 unsigned long *curval, 380 unsigned long __user *addr, 381 unsigned long old_val, unsigned long new_val) 382{ 383 int ret; 384 /* 385 * user_atomic_cmpxchg_inatomic() actually uses sizeof() 386 * the pointer that we pass to it to figure out how much 387 * data to cmpxchg. We have to be careful here not to 388 * pass a pointer to a 64-bit data type when we only want 389 * a 32-bit copy. 390 */ 391 if (is_64bit_mm(mm)) { 392 ret = user_atomic_cmpxchg_inatomic(curval, 393 addr, old_val, new_val); 394 } else { 395 u32 uninitialized_var(curval_32); 396 u32 old_val_32 = old_val; 397 u32 new_val_32 = new_val; 398 u32 __user *addr_32 = (u32 __user *)addr; 399 400 ret = user_atomic_cmpxchg_inatomic(&curval_32, 401 addr_32, old_val_32, new_val_32); 402 *curval = curval_32; 403 } 404 return ret; 405} 406 407/* 408 * With 32-bit mode, a bounds directory is 4MB, and the size of each 409 * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB, 410 * and the size of each bounds table is 4MB. 411 */ 412static int allocate_bt(struct mm_struct *mm, long __user *bd_entry) 413{ 414 unsigned long expected_old_val = 0; 415 unsigned long actual_old_val = 0; 416 unsigned long bt_addr; 417 unsigned long bd_new_entry; 418 int ret = 0; 419 420 /* 421 * Carve the virtual space out of userspace for the new 422 * bounds table: 423 */ 424 bt_addr = mpx_mmap(mpx_bt_size_bytes(mm)); 425 if (IS_ERR((void *)bt_addr)) 426 return PTR_ERR((void *)bt_addr); 427 /* 428 * Set the valid flag (kinda like _PAGE_PRESENT in a pte) 429 */ 430 bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG; 431 432 /* 433 * Go poke the address of the new bounds table in to the 434 * bounds directory entry out in userspace memory. Note: 435 * we may race with another CPU instantiating the same table. 436 * In that case the cmpxchg will see an unexpected 437 * 'actual_old_val'. 438 * 439 * This can fault, but that's OK because we do not hold 440 * mmap_sem at this point, unlike some of the other part 441 * of the MPX code that have to pagefault_disable(). 442 */ 443 ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry, 444 expected_old_val, bd_new_entry); 445 if (ret) 446 goto out_unmap; 447 448 /* 449 * The user_atomic_cmpxchg_inatomic() will only return nonzero 450 * for faults, *not* if the cmpxchg itself fails. Now we must 451 * verify that the cmpxchg itself completed successfully. 452 */ 453 /* 454 * We expected an empty 'expected_old_val', but instead found 455 * an apparently valid entry. Assume we raced with another 456 * thread to instantiate this table and desclare succecss. 457 */ 458 if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { 459 ret = 0; 460 goto out_unmap; 461 } 462 /* 463 * We found a non-empty bd_entry but it did not have the 464 * VALID_FLAG set. Return an error which will result in 465 * a SEGV since this probably means that somebody scribbled 466 * some invalid data in to a bounds table. 467 */ 468 if (expected_old_val != actual_old_val) { 469 ret = -EINVAL; 470 goto out_unmap; 471 } 472 trace_mpx_new_bounds_table(bt_addr); 473 return 0; 474out_unmap: 475 vm_munmap(bt_addr, mpx_bt_size_bytes(mm)); 476 return ret; 477} 478 479/* 480 * When a BNDSTX instruction attempts to save bounds to a bounds 481 * table, it will first attempt to look up the table in the 482 * first-level bounds directory. If it does not find a table in 483 * the directory, a #BR is generated and we get here in order to 484 * allocate a new table. 485 * 486 * With 32-bit mode, the size of BD is 4MB, and the size of each 487 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB, 488 * and the size of each bound table is 4MB. 489 */ 490static int do_mpx_bt_fault(void) 491{ 492 unsigned long bd_entry, bd_base; 493 const struct mpx_bndcsr *bndcsr; 494 struct mm_struct *mm = current->mm; 495 496 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); 497 if (!bndcsr) 498 return -EINVAL; 499 /* 500 * Mask off the preserve and enable bits 501 */ 502 bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK; 503 /* 504 * The hardware provides the address of the missing or invalid 505 * entry via BNDSTATUS, so we don't have to go look it up. 506 */ 507 bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK; 508 /* 509 * Make sure the directory entry is within where we think 510 * the directory is. 511 */ 512 if ((bd_entry < bd_base) || 513 (bd_entry >= bd_base + mpx_bd_size_bytes(mm))) 514 return -EINVAL; 515 516 return allocate_bt(mm, (long __user *)bd_entry); 517} 518 519int mpx_handle_bd_fault(void) 520{ 521 /* 522 * Userspace never asked us to manage the bounds tables, 523 * so refuse to help. 524 */ 525 if (!kernel_managing_mpx_tables(current->mm)) 526 return -EINVAL; 527 528 if (do_mpx_bt_fault()) { 529 force_sig(SIGSEGV, current); 530 /* 531 * The force_sig() is essentially "handling" this 532 * exception, so we do not pass up the error 533 * from do_mpx_bt_fault(). 534 */ 535 } 536 return 0; 537} 538 539/* 540 * A thin wrapper around get_user_pages(). Returns 0 if the 541 * fault was resolved or -errno if not. 542 */ 543static int mpx_resolve_fault(long __user *addr, int write) 544{ 545 long gup_ret; 546 int nr_pages = 1; 547 int force = 0; 548 549 gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, 550 nr_pages, write, force, NULL, NULL); 551 /* 552 * get_user_pages() returns number of pages gotten. 553 * 0 means we failed to fault in and get anything, 554 * probably because 'addr' is bad. 555 */ 556 if (!gup_ret) 557 return -EFAULT; 558 /* Other error, return it */ 559 if (gup_ret < 0) 560 return gup_ret; 561 /* must have gup'd a page and gup_ret>0, success */ 562 return 0; 563} 564 565static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, 566 unsigned long bd_entry) 567{ 568 unsigned long bt_addr = bd_entry; 569 int align_to_bytes; 570 /* 571 * Bit 0 in a bt_entry is always the valid bit. 572 */ 573 bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG; 574 /* 575 * Tables are naturally aligned at 8-byte boundaries 576 * on 64-bit and 4-byte boundaries on 32-bit. The 577 * documentation makes it appear that the low bits 578 * are ignored by the hardware, so we do the same. 579 */ 580 if (is_64bit_mm(mm)) 581 align_to_bytes = 8; 582 else 583 align_to_bytes = 4; 584 bt_addr &= ~(align_to_bytes-1); 585 return bt_addr; 586} 587 588/* 589 * We only want to do a 4-byte get_user() on 32-bit. Otherwise, 590 * we might run off the end of the bounds table if we are on 591 * a 64-bit kernel and try to get 8 bytes. 592 */ 593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, 594 long __user *bd_entry_ptr) 595{ 596 u32 bd_entry_32; 597 int ret; 598 599 if (is_64bit_mm(mm)) 600 return get_user(*bd_entry_ret, bd_entry_ptr); 601 602 /* 603 * Note that get_user() uses the type of the *pointer* to 604 * establish the size of the get, not the destination. 605 */ 606 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr); 607 *bd_entry_ret = bd_entry_32; 608 return ret; 609} 610 611/* 612 * Get the base of bounds tables pointed by specific bounds 613 * directory entry. 614 */ 615static int get_bt_addr(struct mm_struct *mm, 616 long __user *bd_entry_ptr, 617 unsigned long *bt_addr_result) 618{ 619 int ret; 620 int valid_bit; 621 unsigned long bd_entry; 622 unsigned long bt_addr; 623 624 if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr))) 625 return -EFAULT; 626 627 while (1) { 628 int need_write = 0; 629 630 pagefault_disable(); 631 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr); 632 pagefault_enable(); 633 if (!ret) 634 break; 635 if (ret == -EFAULT) 636 ret = mpx_resolve_fault(bd_entry_ptr, need_write); 637 /* 638 * If we could not resolve the fault, consider it 639 * userspace's fault and error out. 640 */ 641 if (ret) 642 return ret; 643 } 644 645 valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG; 646 bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry); 647 648 /* 649 * When the kernel is managing bounds tables, a bounds directory 650 * entry will either have a valid address (plus the valid bit) 651 * *OR* be completely empty. If we see a !valid entry *and* some 652 * data in the address field, we know something is wrong. This 653 * -EINVAL return will cause a SIGSEGV. 654 */ 655 if (!valid_bit && bt_addr) 656 return -EINVAL; 657 /* 658 * Do we have an completely zeroed bt entry? That is OK. It 659 * just means there was no bounds table for this memory. Make 660 * sure to distinguish this from -EINVAL, which will cause 661 * a SEGV. 662 */ 663 if (!valid_bit) 664 return -ENOENT; 665 666 *bt_addr_result = bt_addr; 667 return 0; 668} 669 670static inline int bt_entry_size_bytes(struct mm_struct *mm) 671{ 672 if (is_64bit_mm(mm)) 673 return MPX_BT_ENTRY_BYTES_64; 674 else 675 return MPX_BT_ENTRY_BYTES_32; 676} 677 678/* 679 * Take a virtual address and turns it in to the offset in bytes 680 * inside of the bounds table where the bounds table entry 681 * controlling 'addr' can be found. 682 */ 683static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm, 684 unsigned long addr) 685{ 686 unsigned long bt_table_nr_entries; 687 unsigned long offset = addr; 688 689 if (is_64bit_mm(mm)) { 690 /* Bottom 3 bits are ignored on 64-bit */ 691 offset >>= 3; 692 bt_table_nr_entries = MPX_BT_NR_ENTRIES_64; 693 } else { 694 /* Bottom 2 bits are ignored on 32-bit */ 695 offset >>= 2; 696 bt_table_nr_entries = MPX_BT_NR_ENTRIES_32; 697 } 698 /* 699 * We know the size of the table in to which we are 700 * indexing, and we have eliminated all the low bits 701 * which are ignored for indexing. 702 * 703 * Mask out all the high bits which we do not need 704 * to index in to the table. Note that the tables 705 * are always powers of two so this gives us a proper 706 * mask. 707 */ 708 offset &= (bt_table_nr_entries-1); 709 /* 710 * We now have an entry offset in terms of *entries* in 711 * the table. We need to scale it back up to bytes. 712 */ 713 offset *= bt_entry_size_bytes(mm); 714 return offset; 715} 716 717/* 718 * How much virtual address space does a single bounds 719 * directory entry cover? 720 * 721 * Note, we need a long long because 4GB doesn't fit in 722 * to a long on 32-bit. 723 */ 724static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 725{ 726 unsigned long long virt_space; 727 unsigned long long GB = (1ULL << 30); 728 729 /* 730 * This covers 32-bit emulation as well as 32-bit kernels 731 * running on 64-bit harware. 732 */ 733 if (!is_64bit_mm(mm)) 734 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32; 735 736 /* 737 * 'x86_virt_bits' returns what the hardware is capable 738 * of, and returns the full >32-bit adddress space when 739 * running 32-bit kernels on 64-bit hardware. 740 */ 741 virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 742 return virt_space / MPX_BD_NR_ENTRIES_64; 743} 744 745/* 746 * Free the backing physical pages of bounds table 'bt_addr'. 747 * Assume start...end is within that bounds table. 748 */ 749static noinline int zap_bt_entries_mapping(struct mm_struct *mm, 750 unsigned long bt_addr, 751 unsigned long start_mapping, unsigned long end_mapping) 752{ 753 struct vm_area_struct *vma; 754 unsigned long addr, len; 755 unsigned long start; 756 unsigned long end; 757 758 /* 759 * if we 'end' on a boundary, the offset will be 0 which 760 * is not what we want. Back it up a byte to get the 761 * last bt entry. Then once we have the entry itself, 762 * move 'end' back up by the table entry size. 763 */ 764 start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping); 765 end = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1); 766 /* 767 * Move end back up by one entry. Among other things 768 * this ensures that it remains page-aligned and does 769 * not screw up zap_page_range() 770 */ 771 end += bt_entry_size_bytes(mm); 772 773 /* 774 * Find the first overlapping vma. If vma->vm_start > start, there 775 * will be a hole in the bounds table. This -EINVAL return will 776 * cause a SIGSEGV. 777 */ 778 vma = find_vma(mm, start); 779 if (!vma || vma->vm_start > start) 780 return -EINVAL; 781 782 /* 783 * A NUMA policy on a VM_MPX VMA could cause this bounds table to 784 * be split. So we need to look across the entire 'start -> end' 785 * range of this bounds table, find all of the VM_MPX VMAs, and 786 * zap only those. 787 */ 788 addr = start; 789 while (vma && vma->vm_start < end) { 790 /* 791 * We followed a bounds directory entry down 792 * here. If we find a non-MPX VMA, that's bad, 793 * so stop immediately and return an error. This 794 * probably results in a SIGSEGV. 795 */ 796 if (!(vma->vm_flags & VM_MPX)) 797 return -EINVAL; 798 799 len = min(vma->vm_end, end) - addr; 800 zap_page_range(vma, addr, len, NULL); 801 trace_mpx_unmap_zap(addr, addr+len); 802 803 vma = vma->vm_next; 804 addr = vma->vm_start; 805 } 806 return 0; 807} 808 809static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm, 810 unsigned long addr) 811{ 812 /* 813 * There are several ways to derive the bd offsets. We 814 * use the following approach here: 815 * 1. We know the size of the virtual address space 816 * 2. We know the number of entries in a bounds table 817 * 3. We know that each entry covers a fixed amount of 818 * virtual address space. 819 * So, we can just divide the virtual address by the 820 * virtual space used by one entry to determine which 821 * entry "controls" the given virtual address. 822 */ 823 if (is_64bit_mm(mm)) { 824 int bd_entry_size = 8; /* 64-bit pointer */ 825 /* 826 * Take the 64-bit addressing hole in to account. 827 */ 828 addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1); 829 return (addr / bd_entry_virt_space(mm)) * bd_entry_size; 830 } else { 831 int bd_entry_size = 4; /* 32-bit pointer */ 832 /* 833 * 32-bit has no hole so this case needs no mask 834 */ 835 return (addr / bd_entry_virt_space(mm)) * bd_entry_size; 836 } 837 /* 838 * The two return calls above are exact copies. If we 839 * pull out a single copy and put it in here, gcc won't 840 * realize that we're doing a power-of-2 divide and use 841 * shifts. It uses a real divide. If we put them up 842 * there, it manages to figure it out (gcc 4.8.3). 843 */ 844} 845 846static int unmap_entire_bt(struct mm_struct *mm, 847 long __user *bd_entry, unsigned long bt_addr) 848{ 849 unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; 850 unsigned long uninitialized_var(actual_old_val); 851 int ret; 852 853 while (1) { 854 int need_write = 1; 855 unsigned long cleared_bd_entry = 0; 856 857 pagefault_disable(); 858 ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, 859 bd_entry, expected_old_val, cleared_bd_entry); 860 pagefault_enable(); 861 if (!ret) 862 break; 863 if (ret == -EFAULT) 864 ret = mpx_resolve_fault(bd_entry, need_write); 865 /* 866 * If we could not resolve the fault, consider it 867 * userspace's fault and error out. 868 */ 869 if (ret) 870 return ret; 871 } 872 /* 873 * The cmpxchg was performed, check the results. 874 */ 875 if (actual_old_val != expected_old_val) { 876 /* 877 * Someone else raced with us to unmap the table. 878 * That is OK, since we were both trying to do 879 * the same thing. Declare success. 880 */ 881 if (!actual_old_val) 882 return 0; 883 /* 884 * Something messed with the bounds directory 885 * entry. We hold mmap_sem for read or write 886 * here, so it could not be a _new_ bounds table 887 * that someone just allocated. Something is 888 * wrong, so pass up the error and SIGSEGV. 889 */ 890 return -EINVAL; 891 } 892 /* 893 * Note, we are likely being called under do_munmap() already. To 894 * avoid recursion, do_munmap() will check whether it comes 895 * from one bounds table through VM_MPX flag. 896 */ 897 return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm)); 898} 899 900static int try_unmap_single_bt(struct mm_struct *mm, 901 unsigned long start, unsigned long end) 902{ 903 struct vm_area_struct *next; 904 struct vm_area_struct *prev; 905 /* 906 * "bta" == Bounds Table Area: the area controlled by the 907 * bounds table that we are unmapping. 908 */ 909 unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1); 910 unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm); 911 unsigned long uninitialized_var(bt_addr); 912 void __user *bde_vaddr; 913 int ret; 914 /* 915 * We already unlinked the VMAs from the mm's rbtree so 'start' 916 * is guaranteed to be in a hole. This gets us the first VMA 917 * before the hole in to 'prev' and the next VMA after the hole 918 * in to 'next'. 919 */ 920 next = find_vma_prev(mm, start, &prev); 921 /* 922 * Do not count other MPX bounds table VMAs as neighbors. 923 * Although theoretically possible, we do not allow bounds 924 * tables for bounds tables so our heads do not explode. 925 * If we count them as neighbors here, we may end up with 926 * lots of tables even though we have no actual table 927 * entries in use. 928 */ 929 while (next && (next->vm_flags & VM_MPX)) 930 next = next->vm_next; 931 while (prev && (prev->vm_flags & VM_MPX)) 932 prev = prev->vm_prev; 933 /* 934 * We know 'start' and 'end' lie within an area controlled 935 * by a single bounds table. See if there are any other 936 * VMAs controlled by that bounds table. If there are not 937 * then we can "expand" the are we are unmapping to possibly 938 * cover the entire table. 939 */ 940 next = find_vma_prev(mm, start, &prev); 941 if ((!prev || prev->vm_end <= bta_start_vaddr) && 942 (!next || next->vm_start >= bta_end_vaddr)) { 943 /* 944 * No neighbor VMAs controlled by same bounds 945 * table. Try to unmap the whole thing 946 */ 947 start = bta_start_vaddr; 948 end = bta_end_vaddr; 949 } 950 951 bde_vaddr = mm->bd_addr + mpx_get_bd_entry_offset(mm, start); 952 ret = get_bt_addr(mm, bde_vaddr, &bt_addr); 953 /* 954 * No bounds table there, so nothing to unmap. 955 */ 956 if (ret == -ENOENT) { 957 ret = 0; 958 return 0; 959 } 960 if (ret) 961 return ret; 962 /* 963 * We are unmapping an entire table. Either because the 964 * unmap that started this whole process was large enough 965 * to cover an entire table, or that the unmap was small 966 * but was the area covered by a bounds table. 967 */ 968 if ((start == bta_start_vaddr) && 969 (end == bta_end_vaddr)) 970 return unmap_entire_bt(mm, bde_vaddr, bt_addr); 971 return zap_bt_entries_mapping(mm, bt_addr, start, end); 972} 973 974static int mpx_unmap_tables(struct mm_struct *mm, 975 unsigned long start, unsigned long end) 976{ 977 unsigned long one_unmap_start; 978 trace_mpx_unmap_search(start, end); 979 980 one_unmap_start = start; 981 while (one_unmap_start < end) { 982 int ret; 983 unsigned long next_unmap_start = ALIGN(one_unmap_start+1, 984 bd_entry_virt_space(mm)); 985 unsigned long one_unmap_end = end; 986 /* 987 * if the end is beyond the current bounds table, 988 * move it back so we only deal with a single one 989 * at a time 990 */ 991 if (one_unmap_end > next_unmap_start) 992 one_unmap_end = next_unmap_start; 993 ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end); 994 if (ret) 995 return ret; 996 997 one_unmap_start = next_unmap_start; 998 } 999 return 0; 1000} 1001 1002/* 1003 * Free unused bounds tables covered in a virtual address region being 1004 * munmap()ed. Assume end > start. 1005 * 1006 * This function will be called by do_munmap(), and the VMAs covering 1007 * the virtual address region start...end have already been split if 1008 * necessary, and the 'vma' is the first vma in this range (start -> end). 1009 */ 1010void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 1011 unsigned long start, unsigned long end) 1012{ 1013 int ret; 1014 1015 /* 1016 * Refuse to do anything unless userspace has asked 1017 * the kernel to help manage the bounds tables, 1018 */ 1019 if (!kernel_managing_mpx_tables(current->mm)) 1020 return; 1021 /* 1022 * This will look across the entire 'start -> end' range, 1023 * and find all of the non-VM_MPX VMAs. 1024 * 1025 * To avoid recursion, if a VM_MPX vma is found in the range 1026 * (start->end), we will not continue follow-up work. This 1027 * recursion represents having bounds tables for bounds tables, 1028 * which should not occur normally. Being strict about it here 1029 * helps ensure that we do not have an exploitable stack overflow. 1030 */ 1031 do { 1032 if (vma->vm_flags & VM_MPX) 1033 return; 1034 vma = vma->vm_next; 1035 } while (vma && vma->vm_start < end); 1036 1037 ret = mpx_unmap_tables(mm, start, end); 1038 if (ret) 1039 force_sig(SIGSEGV, current); 1040}